Classification with Keras: prediction and multiclass - python

I implement a multiclass classifier with keras.
My problem now is to make predictions, because I obtain an error. I believe that it is related with the prediction part of the code.
The code is the following:
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras import applications
from keras.utils.np_utils import to_categorical
from PIL import Image
import matplotlib.pyplot as plt
import math
%matplotlib inline
# dimensions of our images.
img_width, img_height = 150, 150
top_model_weights_path = 'bottleneck_fc_model.h5'
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
epochs = 30
batch_size = 16
def save_bottleneck_features():
model = applications.VGG16(include_top=False, weights='imagenet')
datagen = ImageDataGenerator(rescale=1. / 255)
generator = datagen.flow_from_directory(train_data_dir, target_size=(img_width, img_height), \
batch_size=batch_size, class_mode=None, shuffle=False)
n_train_samples = len(generator.filenames)
n_classes = len(generator.class_indices)
print("Number of train files = {}".format(n_train_samples))
print("Number of classes = {}".format(n_classes))
predict_size_train = int(math.ceil(n_train_samples / batch_size))
bottleneck_features_train = model.predict_generator(generator, predict_size_train)
np.save('bottleneck_features_train.npy', bottleneck_features_train)
generator = datagen.flow_from_directory(validation_data_dir, target_size=(img_width, img_height), \
batch_size=batch_size, class_mode=None, shuffle=False)
n_validation_samples = len(generator.filenames)
predict_size_validation = int(math.ceil(n_validation_samples / batch_size))
bottleneck_features_validation = model.predict_generator(generator, predict_size_validation)
np.save('bottleneck_features_validation.npy', bottleneck_features_validation)
def train_top_model():
datagen_top = ImageDataGenerator(rescale=1./255)
generator_top = datagen_top.flow_from_directory(train_data_dir, target_size=(img_width, img_height),\
batch_size=batch_size, class_mode='categorical', \
shuffle=False)
n_train_samples = len(generator_top.filenames)
n_classes = len(generator_top.class_indices)
# load the bottleneck features saved earlier
train_data = np.load('bottleneck_features_train.npy')
# get the class lebels for the training data, in the original order
train_labels = generator_top.classes
# convert the training labels to categorical vectors
train_labels = to_categorical(train_labels, num_classes=n_classes)
generator_top = datagen_top.flow_from_directory(validation_data_dir, target_size=(img_width, img_height),\
batch_size=batch_size, class_mode=None, shuffle=False)
n_validation_samples = len(generator_top.filenames)
validation_data = np.load('bottleneck_features_validation.npy')
validation_labels = generator_top.classes
validation_labels = to_categorical(validation_labels, num_classes=n_classes)
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(train_data, train_labels, epochs=epochs, batch_size=batch_size,\
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
(eval_loss, eval_accuracy) = model.evaluate(validation_data, validation_labels, \
batch_size=batch_size, verbose=1)
print("[INFO] accuracy: {:.2f}%".format(eval_accuracy * 100))
print("[INFO] Loss: {}".format(eval_loss))
return model
To execute the program we do:
save_bottleneck_features()
model = train_top_model()
when I try to make a prediction, using the following code:
img_path = 'image_test/bird.jpg'
# predicting images
img = load_img(img_path, target_size=(img_width, img_height))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict_classes(images, batch_size=10)
print (classes)
it gives me the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-44-c3652addeabc> in <module>()
8
9 images = np.vstack([x])
---> 10 classes = model.predict_classes(images, batch_size=10)
11 print (classes)
~/anaconda/lib/python3.6/site-packages/keras/models.py in predict_classes(self, x, batch_size, verbose)
1016 A numpy array of class predictions.
1017 """
-> 1018 proba = self.predict(x, batch_size=batch_size, verbose=verbose)
1019 if proba.shape[-1] > 1:
1020 return proba.argmax(axis=-1)
~/anaconda/lib/python3.6/site-packages/keras/models.py in predict(self, x, batch_size, verbose)
911 if not self.built:
912 self.build()
--> 913 return self.model.predict(x, batch_size=batch_size, verbose=verbose)
914
915 def predict_on_batch(self, x):
~/anaconda/lib/python3.6/site-packages/keras/engine/training.py in predict(self, x, batch_size, verbose, steps)
1693 x = _standardize_input_data(x, self._feed_input_names,
1694 self._feed_input_shapes,
-> 1695 check_batch_axis=False)
1696 if self.stateful:
1697 if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
~/anaconda/lib/python3.6/site-packages/keras/engine/training.py in _standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
142 ' to have shape ' + str(shapes[i]) +
143 ' but got array with shape ' +
--> 144 str(array.shape))
145 return arrays
146
ValueError: Error when checking : expected flatten_8_input to have shape (None, 7, 7, 512) but got array with shape (1, 150, 150, 3)

I finally found the answer.
In order to predict the class of an image, we need to run it through the same pipeline as before.
The prediction function must be:
image_path = 'image_test/bird.jpg'
orig = cv2.imread(image_path)
print("[INFO] loading and preprocessing image...")
image = load_img(image_path, target_size=(img_width, img_height))
image = img_to_array(image)
# important! otherwise the predictions will be '0'
image = image / 255
image = np.expand_dims(image, axis=0)
# build the VGG16 network
model = applications.VGG16(include_top=False, weights='imagenet')
# get the bottleneck prediction from the pre-trained VGG16 model
bottleneck_prediction = model.predict(image)
# build top model
model = Sequential()
model.add(Flatten(input_shape=bottleneck_prediction.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))
model.load_weights(top_model_weights_path)
# use the bottleneck prediction on the top model to get the final classification
class_predicted = model.predict_classes(bottleneck_prediction)
inID = class_predicted[0]
class_dictionary = generator_top.class_indices
inv_map = {v: k for k, v in class_dictionary.items()}
label = inv_map[inID]
# get the prediction label
print("Image ID: {}, Label: {}".format(inID, label))

Related

Model.predict IndexError: tuple index out of range

Hello I am trying to Use Tkinter as a GUI for the Prediction of a Binary Model. but keep getting a problem IndexError: tuple index out of range I Believe that there is a problem with the Size Inputs but I am lost as to where to start on where to modify so that the out of the Creation of the Model fits the predictors Desired Input
this is the Code I used for the creation of the Model
# dimensions of our images.
img_width, img_height = 480, 289
train_data_dir = r'C:\Users\felix\OneDrive\Desktop\Datasets\TRAIN'
validation_data_dir = r'C:\Users\felix\OneDrive\Desktop\Datasets\TEST'
nb_train_samples = 23
nb_validation_samples = 6
epochs = 1
batch_size = 1
if K.image_data_format() == 'channels_first':
input_shape = (1, img_width, img_height)
else:
input_shape = (img_width, img_height, 1)
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Dense
vgg = VGG16(include_top=False, weights='imagenet', input_shape=[], pooling='avg')
x = vgg.output
x = Dense(1, activation='sigmoid')(x)
model = Model(vgg.input, x)
model.summary()
model.compile(keras.optimizers.Adam(lr=1e-5), 'binary_crossentropy', metrics=['accuracy'])
model.save('working.h5')
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import seaborn as sns
test_steps_per_epoch = numpy.math.ceil(validation_generator.samples / validation_generator.batch_size)
predictions = model.predict_generator(validation_generator, steps=test_steps_per_epoch)
# Get most likely class
predicted_classes = numpy.argmax(predictions, axis=1)
true_classes = validation_generator.classes
class_labels = list(validation_generator.class_indices.keys())
report = classification_report(true_classes, predicted_classes, target_names=class_labels)
print(report)
cm=confusion_matrix(true_classes,predicted_classes)
sns.heatmap(cm, annot=True)
print(cm)
plt.show()
while this is the Tkinter Code I Used for the Model Prediction
import tensorflow as tf
model = tf.keras.models.load_model(r'C:\Users\felix\PycharmProjects\CatsDogs\working.h5')
classes = ["Health", "Unhealthy"]
top=tk.Tk()
top.geometry('800x600')
top.title('Health Detector')
top.configure(background='white')
label=Label(top,background='white', font=('arial',20,'bold'))
sign_image = Label(top)
def prepare(filepath):
IMG_SIZE = 50 # 50 in txt-based
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) # read in the image, convert to grayscale
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing
return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1) # return the image with shaping that TF wants.
def classify(file_path):
global label_packed
pred = (model.predict(file_path) > 0.5).astype("int32")
sign = classes[pred[0, 0]]
print(sign)
label.configure(foreground='black', text=sign)
def show_classify_button(file_path):
classify_b=Button(top,text="Authenticate",
command=lambda: classify(file_path),padx=10,pady=5)
classify_b.configure(background='black', foreground='white',
font=('arial',10,'bold'))
classify_b.place(relx=0.43,rely=0.93)
def upload_image():
try:
file_path=filedialog.askopenfilename()
uploaded=Image.open(file_path)
uploaded.thumbnail(((top.winfo_width()/2.25),
(top.winfo_height()/2.25)))
im=ImageTk.PhotoImage(uploaded)
sign_image.configure(image=im)
sign_image.image=im
label.configure(text='')
show_classify_button(file_path)
except:
pass
upload=Button(top,text="Upload the Findings",command=upload_image,
padx=10,pady=5)
upload.configure(background='black', foreground='white',
font=('arial',10,'bold'))
upload.pack(side=BOTTOM,pady=50)
sign_image.pack(side=BOTTOM,expand=True)
label.pack(side=BOTTOM,expand=True)
heading = Label(top, text="Health Detector",pady=20, font=('arial',20,'bold'))
heading.configure(background='white',foreground='black')
heading.pack()
top.mainloop()
I tried to Change the IMG_SIZE into img_width, img_height Also I have tried to change the Input Shapes but still same problem Maybe I did it wrong or that was not the right fix. I was Expecting for Tkinter to have an Output of Either Healthy or Unhealthy
here are the traceback
Traceback (most recent call last):
File "C:\Users\felix\anaconda3\envs\pythonProject\lib\tkinter\__init__.py", line 1921, in __call__
return self.func(*args)
File "C:\Users\felix\Dropbox\PC\Downloads\64x300-CNN.model-20230115T145146Z-001\64x300-CNN.model\fly.py", line 35, in <lambda>
command=lambda: classify(file_path),padx=10,pady=5)
File "C:\Users\felix\Dropbox\PC\Downloads\64x300-CNN.model-20230115T145146Z-001\64x300-CNN.model\fly.py", line 28, in classify
pred = (model.predict(file_path) > 0.5).astype("int32")
File "C:\Users\felix\anaconda3\envs\pythonProject\lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\felix\anaconda3\envs\pythonProject\lib\site-packages\tensorflow\python\framework\tensor_shape.py", line 906, in __getitem__
return self._dims[key]

augemtation error, tensorflow.python.keras.preprocessing.image.ImageDataGenerator

Failed to find data adapter that can handle input: <class 'tensorflow.python.keras.preprocessing.image.ImageDataGenerator'>, <class 'NoneType'>
I am trying to implement deep learning model and I have run the code more than 10 times , some times in 41, 72,88,100 epoch I got this error, is there anybody to help me
def Tuning_Model():
for k in range(FOLDS):
timestamp = datetime.fromtimestamp(time()).strftime('%Y%m%d-%H%M%S')
output_directory = model_path + "\\" + timestamp
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Training image augmentation
train_data_generator =tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1. / 255,
fill_mode="constant",
shear_range=0.2,
zoom_range=(0.5, 1),
horizontal_flip=True,
rotation_range=360,
channel_shift_range=25,
brightness_range=(0.75, 1.25))
# Validation image augmentation
val_data_generator =tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1. / 255,
fill_mode="constant",
shear_range=0.2,
zoom_range=(0.5, 1),
horizontal_flip=True,
rotation_range=360,
channel_shift_range=25,
brightness_range=(0.75, 1.25))
test_data_generator =tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)
train_data_generator = train_data_generator.flow_from_directory(
train_data_path,
target_size = RAW_IMG_SIZE,
batch_size = BATCH_SIZE,
class_mode ='categorical'
)
val_data_generator = val_data_generator.flow_from_directory(
val_data_path,
target_size = RAW_IMG_SIZE,
batch_size = BATCH_SIZE,
class_mode = 'categorical')
test_data = test_data_generator.flow_from_directory(
test_data_path,
target_size = IMG_SIZE,
batch_size = BATCH_SIZE,
class_mode = 'categorical',
shuffle = False)
train_data_generator = crop_generator(train_data_generator, IMG_SIZE)
val_data_generator = crop_generator(val_data_generator, IMG_SIZE)
model = Build_Model(model_name)
model_checkpoint = ModelCheckpoint(output_directory + "\\best_model.hdf5", verbose=1, save_best_only=True)
early_stopping = EarlyStopping(patience=STOPPING_PATIENCE, restore_best_weights=True)
reduce_lr = ReduceLROnPlateau('val_loss', factor=0.5, patience=LR_PATIENCE, min_lr=0.000003125)
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=INITIAL_LR), metrics=['categorical_accuracy'])
history = model.fit_generator(
generator = train_data_generator,
steps_per_epoch = train_image_count // BATCH_SIZE,
epochs = epochs,
validation_data = val_data_generator,
validation_steps= val_image_count // BATCH_SIZE,
callbacks = [model_checkpoint, early_stopping, reduce_lr],
shuffle = False)
# Load the last best model
model = load_model(
output_directory + "\\best_model.hdf5")
# Evaluate model on test set
predictions = model.predict_generator(test_data_generator, test_image_count // BATCH_SIZE + 1)
y_true = test_data_generator.classes
y_pred = np.argmax(predictions, axis=1)
print(classification_report(y_true, y_pred, labels=CLASSES, target_names=CLASS_NAMES))
report = classification_report(y_true, y_pred, labels=CLASSES, target_names=CLASS_NAMES, output_dict=True)
with open(output_directory + '\\classification_report.csv', 'w') as f:
for key in report.keys():
f.write("%s,%s\n" % (key, report[key]))
conf_arr = confusion_matrix(y_true, y_pred, labels=CLASSES)
print(conf_arr)
np.savetxt(output_directory + "\\confusion_matrix.csv", conf_arr, delimiter=",")
# Clear model from GPU after each iteration
print("Finished testing fold {}\n".format(k + 1))
K.clear_session()
k = k + 1
if __name__ == '__main__':
Tuning_Model()
ValueError: Failed to find data adapter that can handle input: <class 'tensorflow.python.keras.preprocessing.image.ImageDataGenerator'>, <class 'NoneType
Have you checked whether training/testing data are in the form of numpy arrays before fitting the model?
Also it might be the case where you are importing model from keras like keras.Sequential() but the generator from Tensorflow like tf.keras.preprocessing.image.ImageDataGenerator().

model.predict() function always returns 1 for binary image classification model

I was working on a binary image classification deep learning model using transfer learning in Google colab.
!wget https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 -O /tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5
local_weights_file = '/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
pre_trained_model = InceptionV3(input_shape = (300, 300, 3),
include_top = False,
weights = None)
pre_trained_model.load_weights(local_weights_file)
for layer in pre_trained_model.layers:
layer.trainable = False
last_layer = pre_trained_model.get_layer('mixed7')
last_output = last_layer.output
x = layers.Flatten()(last_output)
x = layers.Dense(512, activation='relu')(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(1, activation='sigmoid')(x)
model = Model(pre_trained_model.input, x)
from tensorflow.keras.optimizers import RMSprop
model.compile(optimizer=RMSprop(lr=0.0001),
loss='binary_crossentropy',
metrics=['accuracy'])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1/255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode="nearest")
validation_datagen = ImageDataGenerator(rescale=1/255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(300, 300),
batch_size=100,
class_mode='binary')
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(300, 300),
batch_size=100,
class_mode='binary')
history = model.fit(
train_generator,
steps_per_epoch=20,
epochs=30,
verbose=1,
validation_data=validation_generator,
validation_steps=10,
callbacks=[callbacks])
import numpy as np
from google.colab import files
from tensorflow.keras.preprocessing import image
uploaded=files.upload()
for fn in uploaded.keys():
path='/content/' + fn
img=image.load_img(path, target_size=(300, 300))
x=image.img_to_array(img)
x=np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(classes)
Even though after training the model and obtaining a quite good accuracy on training and validation data, the model is always predicting 1 for any new image. I have tried changing the batch size, epochs, learning rate, etc. But, no luck.
Can anyone explain what's the problem here?

ValueError: Error when checking target: expected avg_pool to have 4 dimensions, but got array with shape (100, 2)

I try to run this code (binary classification) but still stuck with this error : ValueError: Error when checking target: expected avg_pool to have 4 dimensions, but got array with shape (100, 2)
NUM_CLASSES = 2
CHANNELS = 3
IMAGE_RESIZE = 224
RESNET50_POOLING_AVERAGE = 'avg'
DENSE_LAYER_ACTIVATION = 'softmax'
OBJECTIVE_FUNCTION = 'binary_crossentropy'
NUM_EPOCHS = 10
EARLY_STOP_PATIENCE = 3
STEPS_PER_EPOCH_VALIDATION = 10
BATCH_SIZE_TRAINING = 100
BATCH_SIZE_VALIDATION = 100
resnet_weights_path = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
train_data_dir = "C:\\Users\\Desktop\\RESNET"
model = ResNet50(include_top=True, weights='imagenet')
x = model.get_layer('avg_pool').output
predictions = Dense(1, activation='sigmoid')(x)
model = Model(input = model.input, output = predictions)
print(model.summary())
model.layers.pop()
model = Model(input=model.input,output=model.layers[-1].output)
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics= ['binary_accuracy'])
data_dir = "C:\\Users\\Desktop\\RESNET"
batch_size = 32
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
image_size = IMAGE_RESIZE
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
def append_ext(fn):
return fn+".jpg"
dir_path = os.path.dirname(os.path.realpath(__file__))
train_dir_path = dir_path + '\data'
onlyfiles = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
NUM_CLASSES = 2
data_labels = [0, 1]
t = []
maxi = 25145
LieOffset = 15799
i = 0
while i < maxi: # t = tuple
if i <= LieOffset:
t.append(label['Lie'])
else:
t.append(label['Truth'])
i = i+1
train_datagenerator = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2)
train_generator = train_datagenerator.flow_from_directory(
train_data_dir,
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical', shuffle=False, subset='training') # set as training data
validation_generator = train_datagenerator.flow_from_directory(
train_data_dir, # same directory as training data
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical', shuffle=False, subset='validation')
(BATCH_SIZE_TRAINING, len(train_generator), BATCH_SIZE_VALIDATION, len(validation_generator))
from sklearn.grid_search import ParameterGrid
param_grid = {'epochs': [5, 10, 15], 'steps_per_epoch' : [10, 20, 50]}
grid = ParameterGrid(param_grid)
# Accumulate history of all permutations (may be for viewing trend) and keep watching for lowest val_loss as final model
for params in grid:
fit_history = model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH_TRAINING,
epochs = NUM_EPOCHS,
validation_data=validation_generator,
validation_steps=STEPS_PER_EPOCH_VALIDATION,
callbacks=[cb_checkpointer, cb_early_stopper]
)
model.load_weights("../working/best.hdf5")
Remove these lines and it'll work,
model.layers.pop()
model = Model(input=model.input,output=model.layers[-1].output)
The former is removing the last(Dense) layer, and the letter means create a model without the last(Flatten, as Dense is already popped) layer.
It doesn't make sense as your target data is (100, 2). Why do you put them there in the first place?
Also I think this line
predictions = Dense(1, activation='sigmoid')(x)
Will error, as your target data is 2 channel, if it does error then change this to
predictions = Dense(2, activation='sigmoid')(x)
Update
The output of avg_pool is 4 dimention, (batch_size, height, width, channel). You need to do the Flatten first or use GlobalAveragePooling2D instead of AveragePooling2D.
Like
x = model.get_layer('avg_pool').output
x = keras.layers.Flatten()(x)
predictions = Dense(1, activation='sigmoid')(x)
Or
model = ResNet50(include_top=False, pooling='avg', weights='imagenet') # `pooling='avg'` makes the `ResNet50` include a `GlobalAveragePoiling` layer and `include_top=False` means that you don't include the imagenet's output layer
x = model.output # as I use `include_top=False`, you don't need to care the layer name, just use the model's output right away
predictions = Dense(1, activation='sigmoid')(x)
Also just as #bit01 said, change class_mode='categorical' to class_mode='binary'.
Change class_mode='categorical' to class_mode='binary' in both train_generator and validation_generator.
Additionally, delete the following lines as you have already created model.
model.layers.pop()
model = Model(input=model.input,output=model.layers[-1].output)
So, your model would be like:
model = ResNet50(include_top=True, weights='imagenet')
x = model.get_layer('avg_pool').output
x = Flatten()(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(input = model.input, output = predictions)

Keras model.predict always 0

I am using keras applications for transfer learning with resnet 50 and inception v3 but when predicting always get [[ 0.]]
The below code is for a binary classification problem. I have also tried vgg19 and vgg16 but they work fine, its just resnet and inception. The dataset is a 50/50 split. And I am only changing the model = applications.resnet50.ResNet50 line of code for each model.
below is the code:
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='val_loss', patience=2)
img_width, img_height = 256, 256
train_data_dir = xxx
validation_data_dir = xxx
nb_train_samples = 14000
nb_validation_samples = 6000
batch_size = 16
epochs = 50
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = applications.resnet50.ResNet50(weights = "imagenet", include_top=False, input_shape = (img_width, img_height, 3))
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='val_loss', patience=2)
img_width, img_height = 256, 256
train_data_dir = xxx
validation_data_dir = xxx
nb_train_samples = 14000
nb_validation_samples = 6000
batch_size = 16
epochs = 50
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = applications.resnet50.ResNet50(weights = "imagenet", include_top=False, input_shape = (img_width, img_height, 3))
#Freeze the layers which you don't want to train. Here I am freezing the first 5 layers.
for layer in model.layers[:5]:
layer.trainable = False
#Adding custom Layers
x = model.output
x = Flatten()(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.5)(x)
#x = Dense(1024, activation="relu")(x)
predictions = Dense(1, activation="sigmoid")(x)
# creating the final model
model_final = Model(input = model.input, output = predictions)
# compile the model
model_final.compile(loss = "binary_crossentropy", optimizer = optimizers.SGD(lr=0.0001, momentum=0.9), metrics=["accuracy"])
# Initiate the train and test generators with data Augumentation
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
# Save the model according to the conditions
#checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
#early = EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=1, mode='auto')
model_final.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size,
callbacks=[early_stopping])
from keras.models import load_model
import numpy as np
from keras.preprocessing.image import img_to_array, load_img
#test_model = load_model('vgg16_1.h5')
img = load_img('testn7.jpg',False,target_size=(img_width,img_height))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
#preds = model_final.predict_classes(x)
prob = model_final.predict(x, verbose=0)
#print(preds)
print(prob)
Note That model_final.evaluate_generator(validation_generator, nb_validation_samples) provides an expected accuracy like 80% its just predict that is always 0.
Just find it strange that vgg19 and vgg16 work fine but not resnet50 and inception. Do these models require something else to work?
Any insight would be great.
Thanks in advance.
I was running into similar problem. You are scaling all the RGB values from 0-255 to 0-1 during training.
Thse same should be done at the time of prediction.
Try
x = img_to_array(img)
x = x/255

Categories

Resources