Get indices of incorrect predictions keras - python

The used CNN has been displayed already in Image prediction using Keras but I want to follow another target. Im apparently using the following CNN of the KERAS tutorial "Building powerful image classification models using very little data" (Francois Chollet). I would like to get the false predictions as an additional Output to see if the Images are too difficult to classify of if the CNN has Problems - is there any easy way to include that in the code?
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
from keras.preprocessing.image import ImageDataGenerator,
array_to_img, img_to_array, load_img
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
img = load_img('data/train/cats/cat.0.jpg') # this is a PIL image x =
img_to_array(img) # this is a Numpy array with shape (3, 150, 150) x
= x.reshape((1,) + x.shape) # this is a Numpy array with shape (1, 3, 150, 150)
i = 0 for batch in datagen.flow(x, batch_size=1,
save_to_dir='preview', save_prefix='cat', save_format='jpeg'):
i += 1
if i > 20:
break # otherwise the generator would loop indefinitely
from keras.models import Sequential from keras.layers import Conv2D,
MaxPooling2D from keras.layers import Activation, Dropout, Flatten,
Dense
model = Sequential() model.add(Conv2D(32, (3, 3), input_shape=(3, 150,
150))) model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3))) model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3))) model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D
feature vectors model.add(Dense(64)) model.add(Activation('relu'))
model.add(Dropout(0.5)) model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy']) batch_size = 16
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train', # this is the target directory
target_size=(150, 150), # all images will be resized to 150x150
batch_size=batch_size,
class_mode='binary') # since we use binary_crossentropy loss, we need binary labels
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000 // batch_size,
epochs=50,
validation_data=validation_generator,
validation_steps=800 // batch_size)
model.save_weights('first_try.h5')

Related

Keras multiclass images classification and prediction

I am doing image classification with ImageDataGenerator. My data has this structure:
Train
101
102
103
104
Test
101
102
103
104
So, if I understood good, the ImageGenerator automatically does what is needed with labeling.
I train the model, and I get some kind of accuracy. Now I want to do the prediction.
- model.predict
- model.predict_proba
- model.predict_classes
All these give me the same value. Can you quickly explain or refer(I cannot find anything concerning my problem) how I should proceed, or maybe I did something terrible in the code. The biggest problem, I don't understand how the output will differ for 4 different classes. As predict_classes gives me an output [[1]], should not it give me the predicted class?
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, MaxPool2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.regularizers import l1, l2, l1_l2
model = Sequential()
model.add(Conv2D(60, (3, 3), input_shape=(480, 640,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(60, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(100, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(100, activation='relu', activity_regularizer=l1(0.001)))
#model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy',
optimizer='Adam',
metrics=['accuracy'])
batch_size = 32
# augmentation configuration for train
train_datagen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=False,
vertical_flip=True,
fill_mode = 'nearest')
# augmentation configuration for testing, only rescale
test_datagen = ImageDataGenerator(rescale=1./255)
# reading pictures and generating batches of augmented image data
train_generator = train_datagen.flow_from_directory(
'/media/data/working_dir/categories/readytotest/train',
target_size=(480, 640),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'/media/data/working_dir/categories/readytotest/test',
target_size=(480, 640),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=800 // batch_size,
epochs=15,
validation_data=validation_generator,
validation_steps=800 // batch_size)
Your model and the generators not for multi class but binary classification. First you need to fix your model last layer to get output with class size. Second you need to fix the generators to use in multi class.
(...)
model.add(Dense(CLS_SZ))
model.add(Activation('softmax'))
(...)
# I am not sure about this read some docs about generator you used.
train_generator = train_datagen.flow_from_directory(
'/media/data/working_dir/categories/readytotest/train',
target_size=(480, 640),
batch_size=batch_size,
class_mode=None)
validation_generator = test_datagen.flow_from_directory(
'/media/data/working_dir/categories/readytotest/test',
target_size=(480, 640),
batch_size=batch_size,
class_mode=None)

Keras Predictions on python

I have this code for my CNN:
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
# dimensions of our images.
img_width, img_height = 64, 64
train_data_dir = "path_trainning"
validation_data_dir = "path_validation"
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 10
batch_size = 16
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save('my_cnn.h5')
And this is the code for my predictions:
for file in os.listdir(targets_path):
filef = '\\' + file
test_image = image.load_img(targets_path + filef, target_size=(64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
result = model.predict(test_image)
print("\nOriginal: " + file)
print("Prediction: " + str(result[0][0]))
if result[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat'
print(prediction)
My question is:
With this code as the "Prediction" part, I am realising that unless the CNN has a 1, it won't be a dog. And I am getting results like 0.99999 is a cat, but with that value it is closer to be a dog.
I think I am not understanding it properly.
Could someone explain me please?
That is due to the output layer being a node with sigmoid activation, which returns values between 0 and 1. Therefore, the result will never be 1 (or 0), so that code will always return 'cat'.
This might be the issue with your CNN.
You are using an ReLU activation in the hidden layers. They have an output range from 0 to Infinity. When these values flow through the final output activation which is sigmoid in your case. If I pass a greater value like 25 to sigmoid the output will be close to 1. The same would happen with a very small value which would result in threshold closer to 0.
You should use a softmax function at the output layer if you are using ReLU in the hidden layers. Softmax converts the logits to class probabilities.
And also, with softmax, you would use categorical classes and not binary. You will have 2 classes and hence 2 output nodes.

Python Keras - How to input custom image?

from keras import *
import os
import numpy as np
from keras.models import Sequential
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras import optimizers
# Collecting data:
img_width, img_height = 150, 150
training_data_dir = "train"
testing_data_dir = "test"
batch_size = 16
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
training_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
testing_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
# Building model:
model = Sequential()
model.add(Convolution2D(32, (3, 3), input_shape=(img_width, img_height,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss="binary_crossentropy",
optimizer="rmsprop",
metrics=["accuracy"])
# Training model:
nb_epoch = 1
nb_train_samples = 2048
nb_validation_samples = 832
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
epochs=nb_epoch,
validation_data=validation_generator,
validation_steps=nb_validation_samples,
steps_per_epoch=64)
My code here creates a neural network for image classification based on pictures it is trained on, I have searched all over the internet but one thing I can't understand is how to input my own image file to test against the network and for it to print out the output. For example if the net was for classifying dogs and cats, I'm not sure on how to code the bit where I input a jpg/png file for a dog or a cat and the program to output which class it is. Help please?
You call the model's predict method. https://keras.io/models/model/#predict

Python Keras Image Recognition using Tensorflow model.predict returns [[0.]]

I've been following the tutorial here to process images of cats, and see if a specific picture contains a cat. The data set I use is here. Is there something missing in the way I read in an image for testing? In my results from model.predict(filePath), I always get the value '[[0.]]' when reading an image containing a cat. The train and validation sets seem to work correctly. I am only having issues reading in an image. (Source code is copied from here)
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
import numpy as np
from keras.preprocessing import image
# dimensions of our images.
img_width, img_height = 150, 150
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 50
batch_size = 16
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save_weights('first_try.h5')
def _LoadImage(filePath):
test_image = image.load_img(filePath, target_size = (150,150))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
return test_image
test_this = _LoadImage('test.jpg')
result = model.predict(test_this)
print(result)
Looks like "0" is the label of cat ("The training archive contains 25,000 images of dogs and cats. Train your algorithm on these files and predict the labels for test1.zip (1 = dog, 0 = cat)."), so your model predictions seem to be correct. Remember that the model is predicting (cat and dog) labels and not what class string you might be corresponding with the labels yourself. Try feeding an image of a dog and you should get "1" in return.

What is the meaning of rank 4 of data In the flow method of ImageDataGenerator (Keras) which has argument x

In ImageDataGenerator of Keras the flow method has argument x which takes data with rank 4. Why?
I have a test image which has RGB (150, 150, 3).
Data has been trained on the images of type (150,150,3) where 150,150 are width and height, and 3 is for RGB. But I am getting error
ValueError: ('Input data in NumpyArrayIterator should have rank 4. You passed an array with shape', (3, 150, 150))
how can an image have shape with rank 4?
and how to make test input image type with rank 4?
For the reference, my code is as per follow:
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
# dimensions of our images.
img_width, img_height = 150, 150
train_data_dir = 'C:/Users/imageNetToyDataset/train'
validation_data_dir = 'C:/Users/imageNetToyDataset/validation'
epochs = 5
nb_train_samples = 2000
nb_validation_samples = 50
batch_size = 16
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
import numpy as np
import cv2
import csv
import os
from keras.preprocessing.image import ImageDataGenerator, array_to_img,
img_to_array, load_img
from scipy.misc import imresize
import scipy
def predict_labels(model):
"""writes test image labels and predictions to csv"""
test_data_dir = "C:/Users/imageNetToyDataset/test"
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_data_dir,
target_size=(img_width, img_height),
batch_size=32,
shuffle=False,
class_mode="binary")
with open("prediction.csv", "w") as f:
p_writer = csv.writer(f, delimiter=',', lineterminator='\n')
for _, _, imgs in os.walk(test_data_dir):
print ("number of images: {}".format(len(imgs)))
for im in imgs:
print ("image:\n{}".format(im))
pic_id = im.split(".")[0]
imgPath = os.path.join(test_data_dir,im)
print (imgPath)
img = load_img(imgPath)
img = imresize(img, size=(img_width, img_height))
print ("img shape = {}".format(img.shape))
test_x = img_to_array(img).reshape(3, img_width, img_height)
print ("test_x shape = {}".format(test_x.shape))
test_generator = test_datagen.flow(test_x,
batch_size=1,
shuffle=False)
prediction = model.predict_generator(test_generator,1,epochs)
p_writer.writerow([pic_id, prediction])
prediction=predict_labels(model)
The forth dimension is the number of samples in a batch.
Look at https://keras.io/preprocessing/image/ at the data_format explanation

Categories

Resources