I am getting shape related error on running the fit function - python

valueError: A target array with shape (90, 300, 300, 1) was passed for an output of shape (None, 1) while using as loss binary_crossentropy. This loss expects targets to have the same shape as the output.
I am getting the above error when I run the below code. Can anyone please help me to rectify it.
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
import pickle
import numpy as np
# X = np.array(pickle.load(open("X.pickle","rb")))
# Y = np.array(pickle.load(open("Y.pickle","rb")))
X = np.array(pickle.load(open("x_train.pickle","rb")))
Y = np.array(pickle.load(open("y_train.pickle","rb")))
#scaling our image data
X = X/255.0
model = Sequential()
model.add(Conv2D(64 ,(3,3), input_shape = X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(128 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(256 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(512 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Flatten())
model.add(Dense(2048))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss="binary_crossentropy",
optimizer = "adam",
metrics = ['accuracy'])
model.summary()
model.fit(X, Y, batch_size=32, epochs = 1, validation_split=0.1)

Related

Conv2D ValueError: logits and labels must have the same shape ((None, 2) vs (None, 1))

So I'm trying to make a faceID with Tensorflow using CNN type, i only gave to my data 15 images of my face(is a simple project), and i want it to predict if it is my face or is not my face, like true or false, the problem is that I don't understand what is the error in my code. this is
ValueError: logits and labels must have the same shape ((None, 2) vs (None, 1))
and here is my code
# Face ID project, using CNN tensorflow
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Activation
from tensorflow.keras import backend as K
import numpy as np
import cv2
import glob
# Preparing the data and parameters
epochs = 10
lr = 1e-3
batch_size = 64
img_dims = (96,96,3)
data = []
labels = []
image_files = glob.glob("C:/Users/berna/Desktop/Programming/AI_ML_DL/Projects/FaceID/Data/*")
for img in image_files:
image = cv2.imread(img)
image = cv2.resize(image, (img_dims[0], img_dims[1]))
image = img_to_array(image)
data.append(image)
if img == img:
label = 1
else:
label = 0
labels.append([label])
# Preproccesing the data (convert arrays)
data = np.array(data, dtype="float32") / 255.0
labels = np.array(labels)
X = data
y = labels
def build(width, height, depth, classes):
model = Sequential()
inputShape = height, width, depth
chanDim = -1
if K.image_data_format() == "channels_first":
inputShape = depth, height, width
chanDim = 1
# Creating the model
model.add(Conv2D(32, (3,3), padding="same", input_shape=inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(3,3)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3,3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(64, (3,3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3,3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(128, (3,3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(classes))
model.add(Activation("sigmoid"))
return model
# Build the model call
model = build(width=img_dims[0], height=img_dims[1], depth=img_dims[2], classes=2)
# compile the model
opt = Adam(lr=lr, decay=lr/epochs)
model.compile(loss="binary_crossentropy",
optimizer=opt,
metrics=['accuracy'])
# fitting the model
H = model.fit(X, y, batch_size=batch_size,
epochs=epochs, verbose=1)
model.save('faceid.model')
As stated by #Frightera, replacing:
model.add(Dense(classes)) by model.add(Dense(1)) should work.
Your label is an integer but your last layer output a 2d array.

ValueError while running keras model in Python

I am trying to run the Keras tutorial mentioned below in python:
#Import Libraries
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D , Flatten
from keras.optimizers import SGD
#model details
vgg19 = Sequential()
vgg19.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
vgg19.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
vgg19.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
vgg19.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
vgg19.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
vgg19.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
vgg19.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
vgg19.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
vgg19.add(Flatten())
vgg19.add(Dense(units=4096,activation="relu"))
vgg19.add(Dense(units=4096,activation="relu"))
vgg19.add(Dense(units=10, activation="softmax"))
#Preparing Dataset
from keras.datasets import cifar10
from keras.utils import to_categorical
(X, Y), (tsX, tsY) = cifar10.load_data()
# Use a one-hot-encoding
Y = to_categorical(Y)
tsY = to_categorical(tsY)
# Change datatype to float
X = X.astype('float32')
tsX = tsX.astype('float32')
# Scale X and tsX so each entry is between 0 and 1
X = X / 255.0
tsX = tsX / 255.0
#training
optimizer = SGD(lr=0.001, momentum=0.9)
vgg19.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
history = vgg19.fit(X, Y, epochs=100, batch_size=64, validation_data=(tsX, tsY), verbose=0)
Upon training the model, I am getting the below mentioned value error:
ValueError: Input 0 of layer dense_9 is incompatible with the layer: expected axis -1 of input shape to have value 25088 but received input with shape (None, 512)
Please suggest, how to fix the input shape and would be better if someone can provide a brief explanation of the issue.
Thanks in advance!
You can check the shape of X using X.shape.
It clearly shows that the shape of X is (50000,32,32,3)
So your first layer should be like this:
vgg19 = Sequential()
vgg19.add(Conv2D(input_shape=(32,32,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))

CNN model not learning

I'm building a CNN model to classify images, however, I guess that my model is not learning because of the constant values of accuracy and loss function. See my code below:
Building the images train, test and validation datasets
import pandas as pd
from keras_preprocessing.image import ImageDataGenerator
import numpy as np
#Creating three datasets from the the 3 .txt files.
trainingfile = pd.read_table('data/training.txt', delim_whitespace=True, names=('class', 'image'))
testingfile = pd.read_table('data/testing.txt', delim_whitespace=True, names=('class', 'image'))
validationfile = pd.read_table('data/validation.txt', delim_whitespace=True, names=('class', 'image'))
#Changing target variable type
trainingfile = trainingfile.replace([0, 1, 2], ['class0', 'class1', 'class2'])
testingfile = testingfile.replace([0, 1, 2], ['class0', 'class1', 'class2'])
validationfile = validationfile.replace([0, 1, 2], ['class0', 'class1', 'class2'])
#Data augmentation
datagen=ImageDataGenerator()
train_datagen = ImageDataGenerator(
#Apliquem una mica de rotació no gaire ja que generalment les fotografies estaran centrades
rotation_range=5,
zoom_range=0.1)
#Final datasets containing the images
train=train_datagen.flow_from_dataframe(dataframe=trainingfile, directory="data/", x_col="image", y_col="class", class_mode="categorical", target_size=(256,256),color_mode='rgb',batch_size=32)
test=datagen.flow_from_dataframe(dataframe=testingfile, directory="data/", x_col="image", y_col="class", class_mode="categorical", target_size=(256,256),color_mode='rgb',batch_size=32)
#No data augmentation to the validation dataset.
validation=datagen.flow_from_dataframe(dataframe=validationfile, directory="data/", x_col="image", y_col="class", class_mode="categorical", target_size=(256,256),color_mode='rgb', batch_size=32)
CNN model
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten, Activation, Dropout, MaxPooling2D, BatchNormalization
from keras.constraints import maxnorm
#Creació del model
model = Sequential()
#1r bloc convolució
model.add(Conv2D(32, kernel_size = (3, 3), activation='relu', input_shape=(256, 256,3)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
#2n bloc convolució
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
#3r bloc convolució
model.add(Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
#4t bloc convolució
model.add(Conv2D(96, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
#5e bloc convolució
model.add(Conv2D(32, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
#Dropout
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
#model.add(Dropout(0.3))
model.add(Dense(3, activation = 'softmax'))
from keras import regularizers, optimizers
from keras.optimizers import RMSprop
from keras.callbacks import EarlyStopping
# Compile model
model.compile(optimizer='adam',loss="categorical_crossentropy",metrics=["accuracy"])
# Early stopping
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1,patience=10)
Training the model
h=model.fit_generator(generator=train,
validation_data=validation,
epochs=50,
callbacks=[es])
Results
It is the first time that I use fit_generator and perhaps I'm not using it properly?
As I can see from the results images, you are training just for 1 epoch. This could be because the EarlyStopping is too strict. Try adding patience=3to the EarlyStopping callback.
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3)
EDIT
Overfitting example:
Check this post to know more about how to deal with overfitting.

Input 0 of layer lstm_9 is incompatible with the layer: expected ndim=3, found ndim=4. Full shape received: [None, 300, 300, 1]

The code is exectuted over the image dataset and on execution of the below code i am getting the value error.Help me to figure out how to fix this error.
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, LSTM
import pickle
import numpy as np
X = np.array(pickle.load(open("X.pickle","rb")))
Y = np.array(pickle.load(open("Y.pickle","rb")))
#scaling our image data
X = X/255.0
model = Sequential()
print(X.shape)
print(Y.shape)
#model.add(Conv2D(64 ,(3,3), input_shape = X.shape[1:]))
model.add(Conv2D(64 ,(3,3), input_shape = X.shape[1:]))
# print(X.shape)
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
print(X.shape)
print(Y.shape)
model.add(Conv2D(128 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
print(X.shape)
print(Y.shape)
model.add(Conv2D(256 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
print(X.shape)
print(Y.shape)
model.add(Conv2D(512 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
print(X.shape)
print(Y.shape)
model.add(Flatten())
print(X.shape)
print(Y.shape)
model = Sequential()
model.add(LSTM(128, input_shape=(X.shape[1:]), activation='relu', return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
opt = tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5)
model.compile(loss='binary_crossentropy', optimizer=opt,
metrics=['accuracy'])
model.fit(X, Y, batch_size=32, epochs = 2, validation_split=0.1)
The above code produces following outputs. I was printing shape again and to again just to see the resultant matrix.
(90, 300, 300, 1)
(90,)
(90, 300, 300, 1)
(90,)
(90, 300, 300, 1)
(90,)
(90, 300, 300, 1)
(90,)
(90, 300, 300, 1)
(90,)
(90, 300, 300, 1)
(90,)
Here a very small change is needed in input dimension of an image in the first LSTM as per the error in the code. Can you please make changes into the same code.
model = Sequential()
model.add(Conv2D(64 ,(3,3), input_shape = (300,300,1)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(128 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(256 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Conv2D(512 ,(3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Reshape((16, 16*512)))
model.add(LSTM(128, activation='relu', return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
opt = tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5)
model.compile(loss='binary_crossentropy', optimizer=opt,
metrics=['accuracy'])
model.summary()

AttributeError: 'ImageDataGenerator' object has no attribute 'shape'

I am new in coding. I am trying to get the score but unfortunately i am getting errors. I was using first import.keras until it gave me when i wanted to evaluate the score and predict.The training model happened well, i have gotten no problem there.It is after that, when i was aboout to get the score of my model that i got as error ImageDataGnerator: Object has no 'ndim'.
Then i looked for help and someone told me to use import.tensorflow.keras instead and i did it....
After training the model again,reaching that part in order to get the score and predict after i've gotten another error saying : ImageDataGenerator object has no attribute shapes and a warning saying :
WARNING : tensorflow : Falling back from v2 loop because of error : Failed to find data* **adapter that can handle input : < class 'tensorflow.python.keras.preprocessing.image.ImageDataGenerator'> , < class 'NoneType'
This is some of the code below.
import numpy as np
import tensorflow as tf
import cv2
import sys
import os
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.optimizers import Adam
image_width, image_height = 150,150
Epochs =10
batch_size=45
Steps_per_epoch=190
Validation_data=20
num_classes = len(map_characters)
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape= (image_height,image_width ,3)))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
opt = Adam(lr=0.01, decay=1e-6, )
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='Adam',
metrics=['accuracy'])'''
train_datagen= ImageDataGenerator (
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size = (image_height, image_width),
batch_size = batch_size,
class_mode = 'categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size = (image_height, image_width),
batch_size = batch_size,
class_mode = 'categorical')
result=model.fit_generator(training_generator,
steps_per_epoch=Steps_per_epoch,
epochs = Epochs,
validation_data = validation_generator,
validation_steps=Validation_data)
score = model.evaluate(test_datagen,
validation_generator,
batch_size=batch_size)
To evaluate on a generator, you need to use evaluate_generator, not evaluate.

Categories

Resources