Image prediction using Keras - python

I am following this guide as a start to train a model using some cats and dogs images:
https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
This is the code:
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
# dimensions of our images.
img_width, img_height = 150, 150
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 1
batch_size = 16
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
# this is a similar generator, for validation data
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save_weights('first_try.h5')
with open('model.json', 'w') as f:
f.write(model.to_json())
So I get two files: first_try.h5 and model.json.
Now I want to try to do a simple image prediction using a sample dog.jpg and a cat.jpg. This is what I tried:
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
from PIL import Image
import cv2, numpy as np
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("first_try.h5")
print("Loaded model from disk")
#attempt 1
img = cv2.resize(cv2.imread('cat.jpg'), (150, 150))
mean_pixel = [103.939, 116.779, 123.68]
img = img.astype(np.float32, copy=False)
for c in range(3):
img[:, :, c] = img[:, :, c] - mean_pixel[c]
img = img.transpose((2,0,1))
img = np.expand_dims(img, axis=0)
out1 = loaded_model.predict(img)
print(np.argmax(out1))
#attempt 2
loaded_model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
img = Image.open('dog.jpg')
img = img.convert('RGB')
x = np.asarray(img, dtype='float32')
x = x.transpose(2, 0, 1)
x = np.expand_dims(x, axis=0)
out1 = loaded_model.predict(x)
print(np.argmax(out1))
I get this output:
Using Theano backend.
Loaded model from disk
0
0
Can someone guide me? How to do a model.predict correctly?

I would suggest you use (https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model):
from keras.models import load_model
model.save('model.hdf5')
model = load_model('model.hdf5')
Anyways, what makes you think that this is not the correct output? You do the argmax on 1 value. This is naturally the index 0. If you want the final output of the last layer remove the argmax and then you get a probability.

Related

ValueError: Input 0 of layer sequential is incompatible with the layer: expected axis -1 of input shape to have value 1

Title : ValueError: Input 0 of layer sequential is incompatible with the layer: expected axis -1 of input shape to have value 1 but received input with shape [None, None, None, 3]
from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten, Activation
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.models import Sequential
from tensorflow.keras import backend as K
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import pandas as pd
import cv2
import os
img_width, img_height = 28,28
input_depth = 1
train_data_dir = r'C:\Users\Shreya\Desktop\Project\basedata\training'
validation_data_dir = r'C:\Users\Shreya\Desktop\Project\basedata\validation'
testing1_data_dir = r'C:\Users\Shreya\Desktop\Project\basedata\testing\tomato'
testing2_data_dir = r'C:\Users\Shreya\Desktop\Project\basedata\testing\not tomato'
epochs = 2
batch_size = 5
train_datagen = ImageDataGenerator(rescale=1/255)
validation_datagen = ImageDataGenerator(rescale=1/255)
test_datagen = ImageDataGenerator(rescale=1/255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
color_mode='grayscale',
target_size=(img_width,img_height),
batch_size=batch_size,
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
color_mode='grayscale',
target_size=(img_width,img_height),
batch_size=batch_size,
class_mode='categorical')
testing1_generator = test_datagen.flow_from_directory(
testing1_data_dir,
color_mode='grayscale',
target_size=(img_width,img_height),
batch_size=batch_size,
class_mode='categorical')
testing2_generator = test_datagen.flow_from_directory(
testing2_data_dir,
color_mode='grayscale',
target_size=(img_width,img_height),
batch_size=batch_size,
class_mode='categorical')
NUMB_FILTER_L1 = 20
NUMB_FILTER_L2 = 20
NUMB_FILTER_L3 = 20
NUMB_NODE_FC_LAYER = 10
if K.image_data_format() == 'channels_first':
input_shape_val = (input_depth, img_width, img_height)
else:
input_shape_val = (img_width, img_height, input_depth)
model = Sequential()
model.add(Conv2D(NUMB_FILTER_L1, (5, 5),
input_shape=input_shape_val,
padding='same', name='input_tensor'))
model.add(Activation('relu'))
model.add(MaxPool2D((2, 2)))
model.add(Conv2D(NUMB_FILTER_L2, (5, 5), padding='same'))
model.add(Activation('relu'))
model.add(MaxPool2D((2, 2)))
model.add(Conv2D(NUMB_FILTER_L3, (5, 5), padding='same'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(NUMB_NODE_FC_LAYER, activation='relu'))
model.add(Dense(train_generator.num_classes,
activation='softmax', name='output_tensor'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd', metrics=['accuracy'])
model.summary()
model.fit(
train_generator,
steps_per_epoch=np.floor(train_generator.n/batch_size),
epochs=23,
validation_data=validation_generator,
validation_steps=np.floor(validation_generator.n / batch_size))
train_generator.class_indices
for i in os.listdir(testing1_data_dir):
img = image.load_img(testing1_data_dir + '//' + i, target_size=(200,200))
plt.imshow(img)
plt.show()
X = image.img_to_array(img)
X = np.expand_dims(X,axis=0)
images = np.vstack([X])
val = model.predict(image)
if val==1:
print("Tomato")
else:
print("Not Tomato")
When the last part was run, The following error was displayed
enter image description here
ValueError: Input 0 of layer sequential is incompatible with the layer: expected axis -1 of input shape to have value 1 but received input with shape [None, None, None, 3]
Issue is with this line
model.add(Conv2D(NUMB_FILTER_L1, (5, 5),
input_shape=input_shape_val,
padding='same', name='input_tensor'))
If your dataset is of RGB images, input_shape should have 3 channels
something like
input_shape =(img_width, img_height, 3)
If in case Grayscale image, use channel value as 1

Python Keras - How to input custom image?

from keras import *
import os
import numpy as np
from keras.models import Sequential
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras import optimizers
# Collecting data:
img_width, img_height = 150, 150
training_data_dir = "train"
testing_data_dir = "test"
batch_size = 16
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
training_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
testing_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
# Building model:
model = Sequential()
model.add(Convolution2D(32, (3, 3), input_shape=(img_width, img_height,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss="binary_crossentropy",
optimizer="rmsprop",
metrics=["accuracy"])
# Training model:
nb_epoch = 1
nb_train_samples = 2048
nb_validation_samples = 832
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
epochs=nb_epoch,
validation_data=validation_generator,
validation_steps=nb_validation_samples,
steps_per_epoch=64)
My code here creates a neural network for image classification based on pictures it is trained on, I have searched all over the internet but one thing I can't understand is how to input my own image file to test against the network and for it to print out the output. For example if the net was for classifying dogs and cats, I'm not sure on how to code the bit where I input a jpg/png file for a dog or a cat and the program to output which class it is. Help please?
You call the model's predict method. https://keras.io/models/model/#predict

Python Keras Image Recognition using Tensorflow model.predict returns [[0.]]

I've been following the tutorial here to process images of cats, and see if a specific picture contains a cat. The data set I use is here. Is there something missing in the way I read in an image for testing? In my results from model.predict(filePath), I always get the value '[[0.]]' when reading an image containing a cat. The train and validation sets seem to work correctly. I am only having issues reading in an image. (Source code is copied from here)
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
import numpy as np
from keras.preprocessing import image
# dimensions of our images.
img_width, img_height = 150, 150
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 50
batch_size = 16
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save_weights('first_try.h5')
def _LoadImage(filePath):
test_image = image.load_img(filePath, target_size = (150,150))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
return test_image
test_this = _LoadImage('test.jpg')
result = model.predict(test_this)
print(result)
Looks like "0" is the label of cat ("The training archive contains 25,000 images of dogs and cats. Train your algorithm on these files and predict the labels for test1.zip (1 = dog, 0 = cat)."), so your model predictions seem to be correct. Remember that the model is predicting (cat and dog) labels and not what class string you might be corresponding with the labels yourself. Try feeding an image of a dog and you should get "1" in return.

Keras CNN dimension problems

I am trying to build a CNN using Keras for an image segmentation task, based on this article. Because my dataset is small, I wanted to use Keras ImageDataGenerator and feed it to fit_generator(). So, I followed the example on the Keras website. But, since zipping the image and mask generators didn't work, I followed this answer and created my own generator.
My input data is of size (701,256,1) and my problem is binary (foreground, background). For each image I have a label of the same shape.
Now, I am facing a dimensionality problem. This was also mentioned in the answer, but I am unsure of how to solve it.
The error:
ValueError: Error when checking target: expected dense_3 to have 2 dimensions, but got array with shape (2, 704, 256, 1)
I am pasting the entire code I have here:
import numpy
import pygpu
import theano
import keras
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, Reshape
from keras.layers import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras import backend as K
def superGenerator(image_gen, label_gen):
while True:
x = image_gen.next()
y = label_gen.next()
yield x[0], y[0]
img_height = 704
img_width = 256
train_data_dir = 'Dataset/Train/Images'
train_label_dir = 'Dataset/Train/Labels'
validation_data_dir = 'Dataset/Validation/Images'
validation_label_dir = 'Dataset/Validation/Labels'
n_train_samples = 1000
n_validation_samples = 500
epochs = 50
batch_size = 2
input_shape = (img_height, img_width,1)
target_shape = (img_height, img_width)
model = Sequential()
model.add(Conv2D(80,(28,28), input_shape=input_shape))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Conv2D(96,(18,18)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Conv2D(128,(13,13)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(160,(8,8)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(2, activation='softmax'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
data_gen_args = dict(
rescale=1./255,
horizontal_flip=True,
vertical_flip=True
)
train_datagen = ImageDataGenerator(**data_gen_args)
train_label_datagen = ImageDataGenerator(**data_gen_args)
test_datagen = ImageDataGenerator(**data_gen_args)
test_label_datagen = ImageDataGenerator(**data_gen_args)
seed = 1
train_image_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=target_shape,
color_mode='grayscale',
batch_size=batch_size,
class_mode = 'binary',
seed=seed)
train_label_generator = train_label_datagen.flow_from_directory(
train_label_dir,
target_size=target_shape,
color_mode='grayscale',
batch_size=batch_size,
class_mode = 'binary',
seed=seed)
validation_image_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=target_shape,
color_mode='grayscale',
batch_size=batch_size,
class_mode = 'binary',
seed=seed)
validation_label_generator = test_label_datagen.flow_from_directory(
validation_label_dir,
target_size=target_shape,
color_mode='grayscale',
batch_size=batch_size,
class_mode = 'binary',
seed=seed)
train_generator = superGenerator(train_image_generator, train_label_generator,batch_size)
test_generator = superGenerator(validation_image_generator, validation_label_generator,batch_size)
model.fit_generator(
train_generator,
steps_per_epoch= n_train_samples // batch_size,
epochs=50,
validation_data=test_generator,
validation_steps=n_validation_samples // batch_size)
model.save_weights('first_try.h5')
I am new to Keras (and CNNs), so any help would be very much appreciated.
Ok. I did some rubberduck-debugging and read a few more articles. Of course the dimensionality was a problem. This simple answer did it for me.
My labels are of shape same as the input image so the output of the model should be of that shape as well. I used Conv2DTranspose to solve this issue.

What is the meaning of rank 4 of data In the flow method of ImageDataGenerator (Keras) which has argument x

In ImageDataGenerator of Keras the flow method has argument x which takes data with rank 4. Why?
I have a test image which has RGB (150, 150, 3).
Data has been trained on the images of type (150,150,3) where 150,150 are width and height, and 3 is for RGB. But I am getting error
ValueError: ('Input data in NumpyArrayIterator should have rank 4. You passed an array with shape', (3, 150, 150))
how can an image have shape with rank 4?
and how to make test input image type with rank 4?
For the reference, my code is as per follow:
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
# dimensions of our images.
img_width, img_height = 150, 150
train_data_dir = 'C:/Users/imageNetToyDataset/train'
validation_data_dir = 'C:/Users/imageNetToyDataset/validation'
epochs = 5
nb_train_samples = 2000
nb_validation_samples = 50
batch_size = 16
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
import numpy as np
import cv2
import csv
import os
from keras.preprocessing.image import ImageDataGenerator, array_to_img,
img_to_array, load_img
from scipy.misc import imresize
import scipy
def predict_labels(model):
"""writes test image labels and predictions to csv"""
test_data_dir = "C:/Users/imageNetToyDataset/test"
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
test_data_dir,
target_size=(img_width, img_height),
batch_size=32,
shuffle=False,
class_mode="binary")
with open("prediction.csv", "w") as f:
p_writer = csv.writer(f, delimiter=',', lineterminator='\n')
for _, _, imgs in os.walk(test_data_dir):
print ("number of images: {}".format(len(imgs)))
for im in imgs:
print ("image:\n{}".format(im))
pic_id = im.split(".")[0]
imgPath = os.path.join(test_data_dir,im)
print (imgPath)
img = load_img(imgPath)
img = imresize(img, size=(img_width, img_height))
print ("img shape = {}".format(img.shape))
test_x = img_to_array(img).reshape(3, img_width, img_height)
print ("test_x shape = {}".format(test_x.shape))
test_generator = test_datagen.flow(test_x,
batch_size=1,
shuffle=False)
prediction = model.predict_generator(test_generator,1,epochs)
p_writer.writerow([pic_id, prediction])
prediction=predict_labels(model)
The forth dimension is the number of samples in a batch.
Look at https://keras.io/preprocessing/image/ at the data_format explanation

Categories

Resources