ValueError while running keras model in Python - python

I am trying to run the Keras tutorial mentioned below in python:
#Import Libraries
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.optimizers import SGD
#model details
vgg19 = Sequential()
vgg19.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
vgg19.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
vgg19.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
vgg19.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
vgg19.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
vgg19.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
vgg19.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
vgg19.add(Flatten())
vgg19.add(Dense(units=4096,activation="relu"))
vgg19.add(Dense(units=4096,activation="relu"))
vgg19.add(Dense(units=10, activation="softmax"))
#Preparing Dataset
from keras.datasets import cifar10
from keras.utils import to_categorical
(X, Y), (tsX, tsY) = cifar10.load_data()
# Use a one-hot-encoding
Y = to_categorical(Y)
tsY = to_categorical(tsY)
# Change datatype to float
X = X.astype('float32')
tsX = tsX.astype('float32')
# Scale X and tsX so each entry is between 0 and 1
X = X / 255.0
tsX = tsX / 255.0
#training
optimizer = SGD(lr=0.001, momentum=0.9)
vgg19.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
history = vgg19.fit(X, Y, epochs=100, batch_size=64, validation_data=(tsX, tsY), verbose=0)
Upon training the model, I am getting the below mentioned value error:
ValueError: Input 0 of layer dense_9 is incompatible with the layer: expected axis -1 of input shape to have value 25088 but received input with shape (None, 512)
Please suggest, how to fix the input shape and would be better if someone can provide a brief explanation of the issue.
Thanks in advance!

You can check the shape of X using X.shape.
It clearly shows that the shape of X is (50000,32,32,3)
So your first layer should be like this:
vgg19 = Sequential()
vgg19.add(Conv2D(input_shape=(32,32,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))

Related

I am getting shape related error on running the fit function

valueError: A target array with shape (90, 300, 300, 1) was passed for an output of shape (None, 1) while using as loss binary_crossentropy. This loss expects targets to have the same shape as the output.
I am getting the above error when I run the below code. Can anyone please help me to rectify it.
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
import pickle
import numpy as np
# X = np.array(pickle.load(open("X.pickle","rb")))
# Y = np.array(pickle.load(open("Y.pickle","rb")))
X = np.array(pickle.load(open("x_train.pickle","rb")))
Y = np.array(pickle.load(open("y_train.pickle","rb")))
#scaling our image data
X = X/255.0
model = Sequential()
model.add(Conv2D(64 ,(3,3), input_shape = X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(128 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(256 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(512 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Flatten())
model.add(Dense(2048))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss="binary_crossentropy",
optimizer = "adam",
metrics = ['accuracy'])
model.summary()
model.fit(X, Y, batch_size=32, epochs = 1, validation_split=0.1)

Input 0 of layer lstm_9 is incompatible with the layer: expected ndim=3, found ndim=4. Full shape received: [None, 300, 300, 1]

The code is exectuted over the image dataset and on execution of the below code i am getting the value error.Help me to figure out how to fix this error.
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, LSTM
import pickle
import numpy as np
X = np.array(pickle.load(open("X.pickle","rb")))
Y = np.array(pickle.load(open("Y.pickle","rb")))
#scaling our image data
X = X/255.0
model = Sequential()
print(X.shape)
print(Y.shape)
#model.add(Conv2D(64 ,(3,3), input_shape = X.shape[1:]))
model.add(Conv2D(64 ,(3,3), input_shape = X.shape[1:]))
# print(X.shape)
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
print(X.shape)
print(Y.shape)
model.add(Conv2D(128 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
print(X.shape)
print(Y.shape)
model.add(Conv2D(256 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
print(X.shape)
print(Y.shape)
model.add(Conv2D(512 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
print(X.shape)
print(Y.shape)
model.add(Flatten())
print(X.shape)
print(Y.shape)
model = Sequential()
model.add(LSTM(128, input_shape=(X.shape[1:]), activation='relu', return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
opt = tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5)
model.compile(loss='binary_crossentropy', optimizer=opt,
metrics=['accuracy'])
model.fit(X, Y, batch_size=32, epochs = 2, validation_split=0.1)
The above code produces following outputs. I was printing shape again and to again just to see the resultant matrix.
(90, 300, 300, 1)
(90,)
(90, 300, 300, 1)
(90,)
(90, 300, 300, 1)
(90,)
(90, 300, 300, 1)
(90,)
(90, 300, 300, 1)
(90,)
(90, 300, 300, 1)
(90,)
Here a very small change is needed in input dimension of an image in the first LSTM as per the error in the code. Can you please make changes into the same code.
model = Sequential()
model.add(Conv2D(64 ,(3,3), input_shape = (300,300,1)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(128 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(256 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(512 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Reshape((16, 16*512)))
model.add(LSTM(128, activation='relu', return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
opt = tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5)
model.compile(loss='binary_crossentropy', optimizer=opt,
metrics=['accuracy'])
model.summary()

How can I extract predictions results after multiclass training?

I am trying to do some multiclass image classification with VGG-style, but unfortunately, I cannot reach my goal, and I think I have a stupid mistake in my code.
I have almost 16K images with 4 categories, let's say 1,2,3 and 4. With the following code, I import all the images. I don't want to write for each category in order not to make a huge post. 4 times lines like this:
path_101 = ("/media/data/working_dir/categories/101/")
train_101 = []
for png in os.listdir(path_101):
imageread = img.imread(path_101+png)
imageread = cv2.resize(imageread, (320,240)) #resizing
train_101.append(imageread)
After I concatenate them into one x_data variable.
x_data = np.concatenate((train_101, train_102, train_104, train_105), axis=0)
Following to this, I create my categorical data and do One Hot encoding:
# We create our classify data.
one = np.ones(len(train_101))
four = np.ones(len(train_104)) +3
two = np.ones(len(train_102)) + 1
five = np.ones(len(train_105)) + 4
y = np.concatenate((one, two, four, five), axis= 0).reshape(-1,1)
from sklearn.preprocessing import OneHotEncoder
labels=OneHotEncoder(categories = "auto", handle_unknown = "ignore")
y=labels.fit_transform(y).toarray()
And after these all I do the training part as you can see below:
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_data, y, test_size = 0.2)
# import Keras and layers libraries
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
model = Sequential()
model.add(Conv2D(input_shape=(240,320,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
model.add(Flatten())
model.add(Dense(units=512,activation="relu"))
model.add(Dense(units=64, activation="relu"))
model.add(Dense(units=4, activation="softmax"))
model.add(Dense(units=4, activation="softmax"))
from keras.optimizers import Adam
opt = Adam(lr=0.001)
model.compile(optimizer=opt , loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
model.fit(x_train, y_train, verbose=1, epochs=5, validation_split=0.2)
results = model.evaluate(x_test, y_test, batch_size=64)
After I was having terrible results like 25% of accuracy. And for each epoch it was not changing. I taught that maybe my training output is not going well. I try to predict every 100th image of my initial db with this code:
prs=[]
for k in np.arange(1,15000,100):
imgg=x_data[k]
imgg=imgg[np.newaxis,...]
pr=model.predict_classes(imgg)
prs.append(pr[0])
print(prs)
And I was getting only 0s, or 2s depending on the class that has more input images. So something was wrong with it.
I am new in Neural Networks and maybe I did some amateur stuff.
How can I have class 0, if my y data was one-hot encoded 1,2,4 and 5?
I was thinking about the y variable, if I trained with one-hot encoding, maybe I should decode for predictions or what?
Thanks in advance! Do not hesitate to ask for details

Accuracy Equals 0 CNN Python Keras

I'm working on a binary classification problem. I was getting 69% accuracy at first, but kept running out of memory so I shrunk certain parameters, now it's coming up 0. Any idea whats going on?
model = Sequential()
from keras.layers import Dropout
model.add(Conv2D(96, kernel_size=11, padding="same", input_shape=(300, 300, 1), activation = 'relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(Conv2D(128, kernel_size=3, padding="same", activation = 'relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(128, kernel_size=3, padding="same", activation = 'relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
from keras.layers.core import Activation
model.add(Flatten())
# model.add(Dense(units=1000, activation='relu' ))
model.add(Dense(units= 300, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1))
model.add(Activation("softmax"))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
featurewise_center=True,
rotation_range=90,
fill_mode='nearest',
validation_split = 0.2
)
datagen.fit(train)
train_generator = datagen.flow(train, train_labels, batch_size=8)
# # fits the model on batches with real-time data augmentation:
history = model.fit_generator(generator=train_generator,
use_multiprocessing=True,
steps_per_epoch = len(train_generator) / 8,
epochs = 5,
workers=20)
Softmax should only be used if you have a multiclass classification problem. You have a single output from your Dense layer, so you should use sigmoid.

"The first layer in a Sequential model must get an `inputShape` or `batchInputShape` argument." when loading Keras model with TensorFlow.js

I trained the following model using Keras (Version 2.2.4):
# imports ...
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=5, data_format="channels_last", activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(data_format="channels_last"))
model.add(Conv2D(filters=32, kernel_size=3, data_format="channels_last", activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(data_format="channels_last"))
model.add(Flatten(data_format="channels_last"))
model.add(Dense(units=256, activation="relu"))
model.add(Dense(units=128, activation="relu"))
model.add(Dense(units=32, activation="relu"))
model.add(Dense(units=8, activation="softmax"))
# training ...
model.save("model.h5")
The inputs are 28 x 28 grayscale images of shape(28, 28, 1).
I converted the model with tensorflowjs_converter and now I want to load it in my website using TensorFlow.js (Version 1.1.0):
tf.loadLayersModel('./model/model.json')
This produces the following error:
The first layer in a Sequential model must get an `inputShape` or `batchInputShape` argument.
at new e (errors.ts:48)
at e.add (models.ts:440)
at e.fromConfig (models.ts:1020)
at vp (generic_utils.ts:277)
at nd (serialization.ts:31)
at models.ts:299
at common.ts:14
at Object.next (common.ts:14)
at o (common.ts:14)
How can I fix this error without having to retrain the model?
Try to adjust your neural net to this format:
input_img = Input(batch_shape=(None, 28,28,1))
layer1=Conv2D(filters=64, kernel_size=5, data_format="channels_last", activation="relu")(input_img)
layer2=BatchNormalization()(layer1)
.
.
.
final_layer=Dense(units=8, activation="softmax")(previous_layer)
... and so forth. At the end:
model = Model(inputs = input_img, outputs = final_layer)
You have to specify the input shape in the Conv2D layer of your keras model.
# imports ...
model = Sequential()
model.add(Conv2D(input_shape=(28, 28, 1), filters=64, kernel_size=5, data_format="channels_last", activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(data_format="channels_last"))
model.add(Conv2D(filters=32, kernel_size=3, data_format="channels_last", activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(data_format="channels_last"))
model.add(Flatten(data_format="channels_last"))
model.add(Dense(units=256, activation="relu"))
model.add(Dense(units=128, activation="relu"))
model.add(Dense(units=32, activation="relu"))
model.add(Dense(units=8, activation="softmax"))
# training ...
model.save("model.h5")
Best approach is to change your keras model and retrain.
Anyway, if you cannot retrain your network, you can manually edit your model.json file.
You need to find the input layer in your model.json file and add
"config": {
...
"batch_input_shape": [
null,
28,
28,
1
]
...
}

Categories

Resources