How to create a confusion matrix for VGG16 image calssification (2 options) when using preprocessing.image_dataset_from_directory

150
August 01, 2021, at 7:20 PM

I have been trying to create a confusion matrix to test my data on from my VGG16 classification model (python 3.8, using Keras).

I found a boiler plate based off of datagenerator. I tried leaving the test data as datagenerator, however then it always picks the first option. I tried converting my data coming in to data gen and ran into all sorts of problems.

Now I am trying to fit the confusion matrix to my preprocessing.image_dataset_from_directory and I get

AttributeError: 'PrefetchDataset' object has no attribute 'class_names'

Here is my code (the directory has been changed as I don't want it on the internet)

import tensorflow as tf
import sys
import os
import numpy as np
import pandas as pd
import seaborn as sns
import itertools
import sklearn
from tensorflow import keras
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions
from tensorflow.keras import layers

#Importing images
train_ds = keras.preprocessing.image_dataset_from_directory(
    'directory\\classification', 
    labels="inferred", 
    label_mode="binary", 
    class_names=("Negative", "Positive"), 
    color_mode="rgb", 
    batch_size=32, 
    image_size=(227, 227), 
    shuffle=True, 
    seed=50, 
    validation_split=0.2, 
    subset="training", 
    interpolation="bilinear", 
    follow_links=False,  
)
val_ds = keras.preprocessing.image_dataset_from_directory(
    'directory\\classification', 
    labels="inferred", 
    label_mode="binary", 
    class_names=("Negative", "Positive"), 
    color_mode="rgb", 
    batch_size=32, 
    image_size=(227, 227), 
    shuffle=True, 
    seed=50, 
    validation_split=0.2, 
    subset="validation", 
    interpolation="bilinear", 
    follow_links=False,  
)
test_ds = keras.preprocessing.image_dataset_from_directory(
    'directory\\test', 
    labels="inferred", 
    label_mode="binary", 
    class_names=("Negative", "Positive"), 
    color_mode="rgb", 
    batch_size=32, 
    image_size=(227, 227), 
    shuffle=True, 
    seed=50, 
    validation_split=None, 
    subset=None, 
    interpolation="bilinear", 
    follow_links=False,  
)
print("Number of training samples: %d" % tf.data.experimental.cardinality(train_ds))
print("Number of validation samples: %d" % tf.data.experimental.cardinality(val_ds))
print("Number of test samples: %d" % tf.data.experimental.cardinality(test_ds))
# Visualise the data
import matplotlib.pyplot as plt 
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
    for i in range(9): 
        ax = plt.subplot(3, 3, i + 1) 
        plt.imshow(images[i].numpy().astype("uint8")) 
        plt.title(int(labels[i])) 
        plt.axis("off")
# data augmentation
data_augmentation = keras.Sequential(
    [layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"), 
     layers.experimental.preprocessing.RandomRotation(0.2),])
# Visualise
plt.figure(figsize=(10, 10))
for images, _ in train_ds.take(1):
    for i in range(9):
        augmented_images = data_augmentation(images)
        ax = plt.subplot(3, 3, i + 1)
        plt.imshow(augmented_images[0].numpy().astype("uint8"))
        plt.axis("off")
        
# buffered prefetching - last op on dataset, suggested 2xbatch, default batch =32
train_ds = train_ds.prefetch(buffer_size=64)
val_ds = val_ds.prefetch(buffer_size=64)
test_ds = test_ds.prefetch(buffer_size=64)
# build the model
base_model = keras.applications.VGG16(
    include_top=False,  # Do not include the ImageNet classifier at the top.
    weights="imagenet",   # Load weights pre-trained on ImageNet.
    input_tensor=None,
    input_shape=(227, 227, 3),
    pooling=max,
    classes=2,
    classifier_activation="softmax") 
# Freeze the base_model
base_model.trainable = False
# Create new model on top
inputs = keras.Input(shape=(227, 227, 3))
x = data_augmentation(inputs)  # Apply random data augmentation (GPU method)
# Pre-trained Imagenet weights requires that input be normalized
# from (0, 255) to a range (-1., +1.), the normalization layer
# does the following, outputs = (inputs - mean) / sqrt(var)
norm_layer = keras.layers.experimental.preprocessing.Normalization()
mean = np.array([127.5] * 3)
var = mean ** 2
# Scale inputs to [-1, +1]
x = norm_layer(x)
norm_layer.set_weights([mean, var])
# The base model contains batchnorm layers. We want to keep them in inference mode
# when we unfreeze the base model for fine-tuning, so we make sure that the
# base_model is running in inference mode here.
x = base_model(x, training=False)
x = keras.layers.GlobalMaxPooling2D()(x)
x = keras.layers.Dropout(0.2)(x)  # Regularize with dropout
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.summary()
#train the top layer
model.compile(
    optimizer=keras.optimizers.Adam(),
    loss=keras.losses.BinaryCrossentropy(from_logits=True),
    metrics=[keras.metrics.BinaryAccuracy()],
)
history = model.fit(train_ds, epochs=1, validation_data=val_ds)
#Evaluate model
validation_steps = 20
loss0,accuracy0 = model.evaluate(val_ds, steps = validation_steps)
print("loss: {:.2f}".format(loss0))
print("accuracy: {:.2f}".format(accuracy0))
#learning curves
# accuracy
plt.plot(history.history['binary_accuracy'])
plt.plot(history.history['val_binary_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
#loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
#confusion matrix
#1.Predict classes
predictions = model.predict(test_ds, verbose=1)
# Get most likely class
predicted_classes = np.argmax(predictions, axis=1)
# 2.Get ground-truth classes and class-labels
true_classes = test_ds.labels
class_labels = list(test_ds.labels.keys()) 
# 3. Use scikit-learn to get statistics
from sklearn.metrics import confusion_matrix,classification_report
print(class_labels)
print(confusion_matrix(test_ds.labels, predicted_classes))
report = sklearn.metrics.classification_report(true_classes, predicted_classes, target_names=class_labels)
print(report) 

I am struggling. I know that there is an issue with the prefect dataset, but I don't know how to fix. I tried classes, class_names, labels. I can't find things to help.

Any help would be greatly appreciated.

READ ALSO
I can't link the information from a database into my php document

I can't link the information from a database into my php document

I'm trying to make an online shop for my school canteen (this is a school assignment) and I'm really struggling with linking items from the database I've created into my PHP document

84
How do I turn my website into an app when adding to homescreen on an ios device?

How do I turn my website into an app when adding to homescreen on an ios device?

I have a website and I would like for it to be an app, take as an example the samsung itest which prompts you to add to the home screen on an ios device, how would I implement such a thing and then make it work as a fullscreen app?

63
Align Images To The Right Within a Table Data (td) Cell

Align Images To The Right Within a Table Data (td) Cell

I have a <td> img element within my react-bootstrap table that I want to align to the rightCurrently, it's set to have a marginLeft of 10px after the text to the left (see picture) but I would like all the imgs to be consistent in a single "column"...

115
Unable to attach to nodemon: Could not find any debuggable target at Object.retryGetNodeEndpoint

Unable to attach to nodemon: Could not find any debuggable target at Object.retryGetNodeEndpoint

First I start my node application with command nodemon indexjs and then I use the launch configuration provided below to connect the debugger

148