How to prepare multiple RGB images as numpy array for CNN - python

I want to use the code below with my own input images instead of the mnist images. However I am having a hard time inputting several color .jpg images into a numpy array similar to the X_train used in the code below. I have a folder called data with another folder called train that includes several images that I would like to use as my X_train. I can generate the labels for them and one-hot encode them. I just don't know how to make all my images go into a nice array like X_train. Help? I did look here, but I got a Value Error: setting an array element with a sequence when I just copied and pasted the summarized code.
import numpy as np
np.random.seed(123) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
# 4. Load pre-shuffled MNIST data into train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# 5. Preprocess input data
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# 6. Preprocess class labels
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
# 7. Define model architecture
model = Sequential()
model.add(Convolution2D(32, 3, 3, activation='relu', input_shape=(1,28,28)))
model.add(Convolution2D(32, 3, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# 8. Compile model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# 9. Fit model on training data
model.fit(X_train, Y_train,
batch_size=32, nb_epoch=10, verbose=1)
# 10. Evaluate model on test data
score = model.evaluate(X_test, Y_test, verbose=0)

I am assuming that you are using Theano and that your jpgs have 3 bands. In addition, your jpgs should have the same input shape as the input shape that you indicate in the first convolutional model (28x28 pixels). In that case you can reshape all your jpes with the following lines:
#create random data
no_of_jpgs = 10
jpgs = [np.random.randint(0,255,(28,28,3)) for i in range(no_of_jpgs)]
jpgs = np.array(jpgs)
#reshape data
jpgs.reshape(no_of_jpgs, jpgs.shape[1], jpgs.shape[2], 3)
now you have an array with: (features, n_bands, x, y)
In addition you should change your input_shape so that it supports 3 bands:
model.add(Convolution2D(32, 3, 3, activation='relu', input_shape=(3, 28,28)))
If you have jpgs with a different shape or more bands, just change the input_shape values in the first convolutional layer.

Related

How to give file or image to model.predict as a parameter in a Keras model?

I've watched a tutorial about image recognition in Python, and used written code for training a network. It compiles and learning fine, but how to use it for prediction on new images? Maybe something like: model.predict(y)?
Here is the code:
import numpy
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Flatten, Activation
from keras.layers import Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras.optimizers import SGD
numpy.random.seed(42)
#Loading data
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
batch_size = 32
nb_classes = 10
#Number of epochs
epochNumber = 25
#Image size
img_rows, img_cols = 32, 32
#RGB
img_channels = 3
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
#To catogories
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
#Creating a model
model = Sequential()
#Adding layers
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=(32, 32, 3), activation='relu'))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))
#Optimization
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
#Training model
model.fit(X_train, Y_train,
batch_size=batch_size,
epochs=epochNumber,
validation_split=0.1,
shuffle=True,
verbose=2)
scores = model.evaluate(X_test, Y_test, verbose=0)
print("Accuracy on test data: %.2f%%" % (scores[1]*100))
Then, what to do to predict?
target = "C://Users//Target.png"
print(model.predict(target))
How to correctly use model.predict and how to convert result to user-friendly output?
Note: if you are using keras package instead of tf.keras, replace tf.keras with keras in all the following code snippets.
To load a single image, you can use tf.keras.preprocessing.image.load_img:
image = tf.keras.preprocessing.image.load_img(image_path, target_size=(img_rows, img_cols))
This would load the image into PIL format; therefore, we need to convert it to numpy array before feeding it to our model:
import numpy as np
input_arr = tf.keras.preprocessing.image.img_to_array(image)
input_arr = np.array([input_arr]) # Convert single image to a batch.
Now, you might make a mistake by rushing into using predict method on input_arr. However, you should first perform the same preprocessing steps of training phase in prediction phase as well:
input_arr = input_arr.astype('float32') / 255. # This is VERY important
Now, it's ready to be given to the model for prediction:
predictions = model.predict(input_arr)
Bonus: Since your model is a classifier and it's using Softmax activation at the top, the predictions variable would contain the probabilities for each class. To find out the predicted class, we use argmax from Numpy to find the index of the class with the highest probability:
predicted_class = np.argmax(predictions, axis=-1)
you can use cv2 to read in the image. You want to make sure that what ever processing you did on the input image in training you also do on the image you read in with CV2. Be careful CV2 reads images in BGR format. If you trained your model on rgb images you need to convert the cv2 image to rgb as shown in the code below.Then you want to make the image 32 X 32 X3 so if it is not that size use cv2 to resize the image. I assume you rescaled your training images so you need to rescale the cv2 image as well. Code is below
import cv2
img=cv2.imread(f_path) # where f_path is the path to the image file
img=cv2.resize(img, (32,32), interpolation = cv2.INTER_AREA)
img=img/255
# CV2 inputs images in BGR format in general when you train a model you may have
#trained it with images in rgb format. If so you need to convert the cv2 image.
#uncomment the line below if that is the case.
#img=img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
predictions=model.predict(img)
pre_class=predictions.argmax()
# this will give you an integer value

Keras CNN Incompatible with Convolution2D

I am getting into Convolutional Neural Networks and want to create one for MNIST data. Whenever I add a convolutional Layer to my CNN, I get an error:
Input 0 is incompatible with layer conv2d_4: expected ndim=4, found ndim=5
I have attemped to reshape X_Train data set but was not successful
I tried to add a flatten layer first but that returns this error:
Input 0 is incompatible with layer conv2d_5: expected ndim=4, found ndim=2
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import Flatten, Dense, Dropout
img_width, img_height = 28, 28
mnist = keras.datasets.mnist
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = keras.utils.normalize(X_train, axis=1) #Normalizes from 0-1 (originally each pixel is valued 0-255)
X_test = keras.utils.normalize(X_test, axis=1) #Normalizes from 0-1 (originally each pixel is valued 0-255)
Y_train = keras.utils.to_categorical(Y_train) #Reshapes to allow ytrain to work with x train
Y_test = keras.utils.to_categorical(Y_test)
from sklearn import preprocessing
lb = preprocessing.LabelBinarizer()
Y_train = lb.fit_transform(Y_train)
Y_test = lb.fit_transform(Y_test)
#Model
model = Sequential()
model.add(Flatten())
model.add(Convolution2D(16, 5, 5, activation='relu', input_shape=(1,img_width, img_height, 1)))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer = 'adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=3, verbose=2)
val_loss, val_acc = model.evaluate(X_test, Y_test) #Check to see if model fits test
print(val_loss, val_acc)
If I comment out the Convolutional layer, it works very well (accuracy>95%), but I am planning on making a more complex neural network that requires Convolution in the future and this is my starting point
Keras is looking for a tensor of dimension 4 but it's getting ndim as number of dimension as 2.
first make sure you kernel size in Conv2D layer is in parenthesis
model.add(Convolution2D(32, (3, 3), activation='relu', input_shape=(img_height, img_height, 1)))
Second you need to reshape the X_train, X_test variable as Conv2D layer is expecting a tensor input.
X_train = X_train.reshape(-1,28, 28, 1) #Reshape for CNN - should work!!
X_test = X_test.reshape(-1,28, 28, 1)
model.fit(X_train, Y_train, epochs=3, verbose=2)
For more information about Conv2D you can look into Keras Documentation here
Hope this helps.
There are two issues in your code.
You are encoding your labels two times, once using to_categorical, and another time using LabelBinarizer. The latter is no needed here, so just encode your labels into categorical once, using to_categorical.
2.- Your input shape is incorrect, it should be (28, 28, 1).
Also you should add a Flatten layer after the convolutional layers so the Dense layer works properly.

What do i have to modify to load the SVHN Dataset in my code?

I work with the Keras MNIST dataset and I now I want to use the google dataset Street view house numbers (SVHN) to train my program. I don't know what I have to modify to load the SVHN dataset ( file .mat) in my code
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras import backend as K
import tensorflow as tf
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
img_rows, img_cols = 28, 28
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
image_index = 7777
print(y_train[image_index])
plt.imshow(x_train[image_index], cmap='Greys')
x_train.shape
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
input_shape = (28, 28, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print('Number of images in x_train', x_train.shape[0])
print('Number of images in x_test', x_test.shape[0])
model = Sequential()
model.add(Conv2D(28, kernel_size=(3,3), input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation=tf.nn.relu))
model.add(Dropout(0.2))
model.add(Dense(10,activation=tf.nn.softmax))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x=x_train,y=y_train, epochs=2)
model.evaluate(x_test, y_test)
image_index = 9999
plt.imshow(x_test[image_index].reshape(28, 28),cmap='Greys')
pred = model.predict(x_test[image_index].reshape(1, img_rows, img_cols, 1))
print(pred.argmax())
You can read .mat files using scipy.io.loadmat which returns a dict with values as numpy arrays. See documentations here. You may need to reshape the data according to your requirement.
Notice that they mention
You will need an HDF5 python library to read MATLAB 7.3 format mat files.

How to convert 1D flattened MNIST Keras to LSTM model without unflattening?

I want to change my model architecture a bit on the LSTM so it accepts the same exact flattened inputs the full connected approach does.
Working Dnn model from Keras examples
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.utils import to_categorical
# import the data
from keras.datasets import mnist
# read the data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
num_pixels = x_train.shape[1] * x_train.shape[2] # find size of one-dimensional vector
x_train = x_train.reshape(x_train.shape[0], num_pixels).astype('float32') # flatten training images
x_test = x_test.reshape(x_test.shape[0], num_pixels).astype('float32') # flatten test images
# normalize inputs from 0-255 to 0-1
x_train = x_train / 255
x_test = x_test / 255
# one hot encode outputs
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
num_classes = y_test.shape[1]
print(num_classes)
# define classification model
def classification_model():
# create model
model = Sequential()
model.add(Dense(num_pixels, activation='relu', input_shape=(num_pixels,)))
model.add(Dense(100, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# compile model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
# build the model
model = classification_model()
# fit the model
model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=10, verbose=2)
# evaluate the model
scores = model.evaluate(x_test, y_test, verbose=0)
Same problem but trying LSTM (syntax error still)
def kaggle_LSTM_model():
model = Sequential()
model.add(LSTM(128, input_shape=(x_train.shape[1:]), activation='relu', return_sequences=True))
# What does return_sequences=True do?
model.add(Dropout(0.2))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
opt = tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5)
model.compile(loss='sparse_categorical_crossentropy', optimizer=opt,
metrics=['accuracy'])
return model
model_kaggle_LSTM = kaggle_LSTM_model()
# fit the model
model_kaggle_LSTM.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=10, verbose=2)
# evaluate the model
scores = model_kaggle_LSTM.evaluate(x_test, y_test, verbose=0)
Problem is here:
model.add(LSTM(128, input_shape=(x_train.shape[1:]), activation='relu', return_sequences=True))
ValueError: Input 0 is incompatible with layer lstm_17: expected
ndim=3, found ndim=2
If I go back and don't flatten x_train and y_train, it works. However, I'd like this to be "just another model choice" that feeds off the same pre-processed input. I thought passing shape[1:] would work as that it the real flattened input_shape. I'm sure it's something easy I'm missing about the dimensionality, but I couldn't get it after an hour of twiddling and debugging, although did figure out not flattening the 28x28 to 784 works, but I don't understand why it works. Thanks a lot!
For bonus points, an example of how to do either DNN or LSTM in either 1D (784,) or 2D (28, 28) would be the best.
RNN layers such as LSTM are meant for sequence processing (i.e. a series of vectors which their order of appearance matters). You can look at an image from top to bottom, and consider each row of pixels as a vector. Therefore, the image would be a sequence of vectors and can be fed to the RNN layer. Therefore, according to this description, you should expect that the RNN layer take an input of shape (sequence_length, number_of_features). That's why when you feed the images to the LSTM network in their original shape, i.e. (28,28), it works.
Now if you insist on feeding the LSTM model the flattened image, i.e. with shape (784,), you have at least two options: either you can consider this as a sequence of length one, i.e. (1, 748), which does not make much sense; or you can add a Reshape layer to your model to reshape back the input to its original shape suitable for the input shape of a LSTM layer, like this:
from keras.layers import Reshape
def kaggle_LSTM_model():
model = Sequential()
model.add(Reshape((28,28), input_shape=x_train.shape[1:]))
# the rest is the same...

Why Keras produces dimension error during denses with my code?

Hello I am currently making some simple NN but there are some problems that I don't know why.
The code looks like this
import csv
import numpy as np
np.random.seed(123) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
f = open('training.csv', 'r', encoding='utf-8')
rdr = csv.reader(f)
X_train = list()
Y_train = list()
print(type(X_train))
for ele in rdr :
# print(type(ele))
# print(type(ele[0]))
X_train.append([float(ele[0])])
Y_train.append([float(ele[1])])
# reshaping the datas
X_train = np.asarray(X_train)
Y_train = np.asarray(Y_train)
print(X_train.shape)
X_train_1 = X_train.reshape(X_train.shape[0], 1,1)
print(X_train_1.shape)
# print(X_train.shape)
# defining models
model = Sequential()
# model.add(Flatten())
model.add(Dense(4, activation='relu',input_shape=((1, 1))))
print ("model.output_shape1: ", model.output_shape)
# model.add(Dropout(0.5))
# print("Hello")
# print(model.input_shape)
# model.add(Dense(4, activation='softmax'))
# model.add(Dense(1, activation='softmax'))
# print(model.input_shape)
# 8. Compile model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# 9. Fit model on training data
model.fit(X_train, Y_train,
batch_size=32, nb_epoch=10, verbose=0)
# 10. Evaluate model on test data
score = model.evaluate(X_train_1, Y_train, verbose=0)
# print (problem)
# print (answer)
f.close()
the error message was
expected dense_1_input to have 3 dimensions, but got array with shape
(863, 1)
the problem is I think I made my X_train_1 array into available shape before entering to NN what did I do wrong?
Also, if I increase the dimension 1 more time by
X_train_1 = X_train.reshape(X_train.shape[0], 1,1, 1)
then there are errors like
expected dense_1_input to have 3 dimensions, but got array with shape
(863, 1,1, 1)
It seems that I can't make 3 demension array What did I do wrong?
Your input dim id 1. First number in (863, 1) is a number of samples.
Error message
expected dense_1_input to have 3 dimensions, but got array with shape
(863, 1)
suggests that your input is a list of 863 float numbers shaped (1,) please try to change the input shape to input_shape=(1,) or input_dim=1
IF you would like to reshape your data please take a look at this code:
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Flatten
X_train = list()
Y_train = list()
for ele in range(0,10):
X_train.append([float(ele)])
Y_train.append([float(ele)])
# reshaping the data
X_train = np.asarray(X_train)
Y_train = np.asarray(Y_train)
X_train = X_train.reshape(X_train.shape[0], 1, 1)
print(X_train.shape)
model = Sequential()
model.add(Dense(4, activation='relu', input_shape=(1, 1)))
model.add(Flatten())
model.add(Dense(1, activation='softmax'))
# 8. Compile model
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
#
# # 9. Fit model on training data
model.fit(X_train, Y_train,
batch_size=32, epochs=10, verbose=1)
I had to change it a bit but it shows the way you should reshape your data.
Please notice that I had to flatten the data in order to match the output shape. I also changed loss to binary_crossentropy as my output is a binary data. If you would like to classify to multiple classes you'll have to one hot your output.

Categories

Resources