Face recognition keras dimentionality problem - python

We are trying do image recognition with keras but we are getting get following error: ValueError: Error when checking input: expected conv2d_93_input to have 4 dimensions, but got array with shape (4999, 40).So we read image data with imread then we put it on an array but for some reason keras wants a fourth dimension.
this is how we read file:
def generator(BatchSize):
text_file = open("/content/list_attr_celeba.txt", "r")
lines = text_file.readlines()
lines = lines[2:]
prew = 1
e = []
while True:
for i in range(prew,prew+BatchSize):
#print(i)
lines[i] = lines[i].split()
name = lines[i][0]
lines[i] = lines[i][1:]
a = imread('/content/img_align_celeba/' + name)
#b = numpy.zeros(4,1)
#print(a)
e.append(numpy.array(a))
if i % BatchSize == 0 and i != 0:
yield (numpy.array(lines[prew:i]),e)
e = []
prew = i+1
This is how we define generator and model
gen = generator(5000)
model = Sequential()
model.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1),
activation='relu',
input_shape=((170,140,3))))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(64, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(40, activation='sigmoid'))
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics =
['accuracy'])
This is how we fit
model.fit_generator(gen, epochs=2, verbose=1, max_queue_size=10,
workers=1, use_multiprocessing=False, shuffle=False, initial_epoch=0,
steps_per_epoch = 4)

Reshape your data to be (4999, 40,1), e.g. add a dimension of size 1, conv2d expects (batch_size, x, y, filters).
a = numpy.array(a)
e.append(a.reshape((a.shape + (1,)))

Related

CNN - Detecting Handwritten Smilies: ValueError: could not broadcast input array from shape (26,26,3) into shape (26)

In my root folder images I have three folders called 0, 1, 2. In folder 0 there are no smilies. In folder 1 there are happy handwritten smilies and in folder 2 there are sad handwritten smilies.
The images are jpg color images with the dimension 26x26.
This is my code
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
import numpy as np
import os
import cv2
from sklearn.model_selection import train_test_split
def getImages(path, classes):
folder = os.listdir(path)
classes_counter = 0
images = []
images_classes = []
for x in range (0,len(folder)):
myPicList = os.listdir(path+"/"+ str(classes[classes_counter]))
for pic in myPicList:
img_path = path+"/" + str(classes[classes_counter]) + "/" + pic
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
images.append(img)
images_classes.append(classes_counter)
classes_counter +=1
images = np.array(images, dtype="float") / 255
return images, images_classes
def createModel(classes, images_dimension):
classes_amount = len(np.unique(classes))
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', input_shape=images_dimension))
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(classes_amount, activation='softmax'))
return model
labels = [0,1,2]
images, images_classes = getImages('training-images', labels)
images_dimension=(26,26,3)
X_train, X_test, Y_train, Y_test = train_test_split(images, images_classes, test_size=0.2) # if 1000 images split will 200 for testing
X_train, X_validation, Y_train, Y_validation = train_test_split(X_train, Y_train, test_size=0.2) # if 1000 images 20% of remaining 800 will be 160 for validation
model = createModel(labels, images_dimension)
batch_size = 20
epochs = 100
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_validation, Y_validation))
model.evaluate(X_test,Y_test,verbose=0)
In the Line images = np.array(images, dtype="float") / 255 I get the error:
Traceback (most recent call last):
File "train-nn.py", line 54, in <module>
images, images_classes = getImages('training-images', labels)
File "train-nn.py", line 24, in getImages
images = np.array(images, dtype="float") / 255
ValueError: could not broadcast input array from shape (26,26) into shape (26)
I think something is wrong with the datastructure or with the array structure. I have no clue what I did wrong. Maybe someone known this problem and can give me a hint!
Here you can download the whole project as zip file.
http://fileshare.mynotiz.de/cnn-handwritten-smilies.zip
I found the problem. One of my test-data images was not in the format of 26x26 it was 26x23.

Keras Face Recognition model bad accuracy

I am struggling for some days to create my own model for face recognition with Keras in python. I will walk you through my setup and give you my code, I'm sure there is something I'm not doing right but I can't tell what. So, my dataset is composed of 97 people with an average of 10 photos per person. The total number of photos is 1106. The dataset is in a folder "faces" and the pictures for each person are in folders named from 0 to 96. Here is the code I am using to process and split my data into train, validation and test and also to create my CNN using Keras. Any help or suggestions are much appreciated, thank you!
channels = 3
rows = 50
cols = 50
classes = 97
data = []
labels = []
images = sorted(list(paths.list_images("faces")))
for image in images:
img = cv2.imread(image)
img = cv2.resize(img, (rows, cols)).flatten()
data.append(img)
label = image.split(os.path.sep)[-2]
labels.append(label)
data = np.array(data, dtype="float32") / 255.0
labels = [int(i) for i in labels]
labels = np.array(labels)
xtrain, testX, ytrain, testY = train_test_split(data, labels, test_size = 0.3, random_state = 13)
trainX, validX, trainY, validY = train_test_split(xtrain, ytrain, test_size = 0.2, random_state = 14)
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
validY = lb.transform(validY)
testY = lb.transform(testY)
trainX = trainX.astype("float32")
validX = validX.astype("float32")
testX = testX.astype("float32")
trainY = trainY.astype("float32")
validY = validY.astype("float32")
testY = testY.astype("float32")
trainX = trainX.reshape([trainX.shape[0], cols, rows, channels])
validX = validX.reshape([validX.shape[0], cols, rows, channels])
testX = testX.reshape([testX.shape[0], cols, rows, channels])
weight_decay = 1e-4
model = Sequential()
model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=trainX.shape[1:]))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.3))
model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(classes))
model.add(Activation('softmax'))
datagen = ImageDataGenerator(horizontal_flip=True,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
vertical_flip=False)
datagen.fit(trainX)
optim = RMSprop(lr=0.001, decay=1e-6)
#sgd = SGD(lr=0.01, momentum=0.9)
model.compile(loss='categorical_crossentropy', optimizer=optim, metrics=['accuracy'])
model.fit_generator(datagen.flow(trainX, trainY, batch_size=64), steps_per_epoch = trainX.shape[0], epochs = 50, validation_data=(validX, validY), verbose = 1)
score = model.evaluate(testX, testY, batch_size = 64, verbose = 1)
print("Test score: ", score[0])
print("Test accuracy: ", score[1])
model_json = model.to_json()
open('face_architecture.json', 'w').write(model_json)
model.save_weights('face_weights.h5', overwrite=True)

ValueError: Error when checking target: expected dense_9 to have shape (7,) but got array with shape (1,)

Please help. I'm getting this error on running the training code on colab
want to do multi label classification (7 distinct output labels)
ValueError: Error when checking target: expected dense_9 to have shape (7,) but got array with shape (1,)
My code is as follows:
with open("data/fer2013/fer2013.csv") as f:
x_train = np.array(x_train, 'float32')
y_train = np.array(y_train, 'float32')
x_test = np.array(x_test, 'float32')
y_test = np.array(y_test, 'float32')
x_train /= 255 #normalize inputs between [0, 1]
x_test /= 255
x_train = x_train.reshape(x_train.shape[0], 48, 48, 1)
x_train = x_train.astype('float32')
x_test = x_test.reshape(x_test.shape[0], 48, 48, 1)
x_test = x_test.astype('float32')
model.add(BatchNormalization(input_shape=(48,48,1)))
#1st convolution layer
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2, 2)))
#2nd convolution layer
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2, 2)))
#3rd convolution layer
model.add(Conv2D(256, (2, 2), activation='relu'))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2, 2)))
#4th convolution layer
model.add(Conv2D(512, (2, 2), activation='relu'))
model.add(Conv2D(512, (2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(1,1), strides=(2, 2)))
model.add(Flatten())
#fully connected neural networks
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(7, activation='softmax'))
## Start the training
#model.fit(x_train, y_train, epochs=epochs, validation_split=0.0, shuffle=True) #train for all trainset
model.fit_generator(train_generator, steps_per_epoch=batch_size, epochs=epochs, validation_data=(x_test,y_test)) #train for randomly selected one
score = model.evaluate(x_test, y_test, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], score[1]*100))
##########################------------------------############################
# serialize model to JSON
model_json = model.to_json()
with open("model/model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model/weights.h5")
print("Saved model to disk")
############################---------------------###############################
want to do multi label classification (7 distinct output labels)
I'm expecting a json file model.json
but i'm getting an error
ValueError: Error when checking target: expected dense_9 to have shape (7,) but got array with shape (1,)

Create 5-dimension input shape to 3d-CNN in python [duplicate]

This question already has answers here:
Multiple images input to the same CNN using Conv3d in keras
(2 answers)
Closed 3 years ago.
I have a dataset of 15 class with 460 images all. I want to enter every 8 sequences of images at the same time to the same CNN structure. I use conv3d to do that, but I'm confusing with input shape, it returns error.
This is my model:
IMAGE_DIMS = (8, 460, 60, 60, 3)
data = []
labels = []
# loading images...
imagePaths = "dataset\\path"
listing = os.listdir(imagePaths)
for imagePath in listing:
image_fold = os.listdir(imagePaths + "\\" + imagePath)
for file in image_fold:
im = (imagePaths + "\\" + imagePath + "\\" + file)
image = cv2.imread(im)
image = cv2.resize(image, (IMAGE_DIMS[2], IMAGE_DIMS[3]))
image = img_to_array(image)
data.append(image)
label= imagePath.split(os.path.sep)[-1]
labels.append(label)
# scale the raw pixel intensities to the range [0, 1]
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
# binarize the labels
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.2, random_state=42)
model = Sequential()
sample= IMAGE_DIMS[0]
frame=IMAGE_DIMS[1]
height = IMAGE_DIMS[2]
width=IMAGE_DIMS[3]
channels=IMAGE_DIMS[4]
classes=len(lb.classes_)
inputShape = (sample, frame, height, width, channels)
chanDim = -1
if K.image_data_format() == "channels_first":
inputShape = (sample, frame, channels, height, width)
chanDim = 1
model.add(Conv3D(32, (3, 3, 3), padding="same", batch_input_shape=inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling3D(pool_size=(2, 2, 2), padding="same", data_format="channels_last"))
model.add(Dropout(0.25))
model.add(Conv3D(64, (3, 3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling3D(pool_size=(2, 2, 2), padding="same", data_format="channels_last"))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# softmax classifier
model.add(Dense(classes))
model.add(Activation("softmax"))
model.summary()
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="categorical_crossentropy", optimizer= opt, metrics=["accuracy"])
H = model.fit(trainX, trainY, batch_size=BS, epochs=EPOCHS, verbose=1,validation_data (testX,testY))
and this is my model summary:
But I get the following error:
ValueError: Error when checking input: expected conv3d_1_input to have 5 dimensions, but got array with shape (368, 60, 60, 3)
How can I fix the error, can anyone please help me, I will be thankful for any help. I know the problem with the input shape, the compiler refer to the model.fit step. I thing trainX, testX, trainY, testY must be in 5-dim, but I cannot able to that.
If I understand correctly, you would like to fit your model with 8 images which is called actually batch. So when you call the method model.fit() set batch_size = 8. Another point that, I think, you confused is about the input shape. If you would like to fit images to the network, your input shape is the height x width of the image and the number of channels which is in your case RGB. So, the set input_shape = (3, 60, 60). Please be aware of that the network structure does not includes the total number of images in it. Because the NN structure does not need to know what is the training number. When you fit the training images to the network it will just take a batch of it and does the training job. Lastly, Instead of using 3D convolution layer, you need to use 2D. Think it as a 2D frame that moves over the training image and it does the movement for each channel. Therefore, the frame size need to has a 2D shape, set it (x, x). This frame is called kernel in documents.
The following code just an sample and has not been tested. I hope it helps to understand the structure:
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(3, 60, 60)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(number_of_classes))
model.add(Activation('softmax'))

Implementing a generator for keras results in worse results

I am doing multi-label image classification on a dataset of around 3000 images. Because this is the limit of my working memory and the dataset will increase, I was trying to implement my own generator because I am also parsing the images from an online source. The network reached an accuracy of 25% where the three labels with the highest accuracy gave a pretty good representation of the images.
A normal batch would be of shape (32, 64, 64, 3) with labels of shape (32, 57).
My model looks like:
def createModel(shape, classes):
x = shape[0]
y = shape[1]
z = shape[2]
model = Sequential()
model.add(Conv2D(32, (2, 2), padding='same',input_shape=(x,y,z)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, (2, 1), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, (1, 2), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides = 2))
model.add(Conv2D(48, (2, 2), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides = 2))
model.add(Conv2D(80, (2, 2), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides = 2))
model.add(Flatten())
model.add(Dense(classes, activation = 'sigmoid'))
where classes would be 57 and the x,y,z would be 64,64,3.
My generator looks like:
def generator(data, urls, labels, batch_size):
counter = 0
X_train = []
Y_train = []
while 1:
for i in range(len(data)):
if counter == batch_size:
yield (np.array(X_train), np.array(Y_train))
X_train = []
Y_train = []
counter = 0
try:
ID = data[i][0]
if random.uniform(0,1) > 0.5:
X_train.append(getImage(64, urls[ID]))
else:
X_train.append(np.flip(getImage(64, urls[ID]),1))
Y_train.append(labels[i])
counter+=1
except:
continue
where data is an list with image ID and labels, urls is a list with image ID and the url to find the image, labels is the labels converted by MultiLabelBinarizer() (.fit_transform) and the batch_size. the getImage() function results in a np.array() where 64 gives the shape.
The main calls:
epochs = 60
lr = 1e-6
mlc = model.createModel((64,64,3), 57)
opt = Adam(lr=lr, decay=lr / epochs)
trainGenerator = data.generator(structuredData, urls, mlb_labels, 32)
validationGenerator = data.generator(structuredData, urls, mlb_labels, 32)
mlc.compile(loss="categorical_crossentropy", optimizer=opt, metrics=
["accuracy"])
mlc.fit_generator(trainGenerator, steps_per_epoch = 10, epochs=epochs,
validation_steps = 1, validation_data=validationGenerator)
mlc.save("datagenerator_test.h5")
Furthermore the network thus already works and trains if I do not use the generator, with the generator it seems to get a random accuracy between 1 and 3%. I hope this provides enough information.
EDIT: I takes about 90 seconds to prepare one batch of 32 images. Does the training wait for a batch to be ready?

Categories

Resources