Keras multi-class model with wrong dimensions - python

New to Keras, trying to reimplement this following binary image classification example from: https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html
It works for binary classification for me.
Rebuilding it for a 3-class classification I am getting the following dimensions mismatch error:
60 epochs=50,
61 validation_data=validation_generator,
---> 62 validation_steps=250 // batch_size)
ValueError: Error when checking target: expected activation_50 to have shape (None, 1) but got array with shape (16, 3)
This is my current implementation:
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
K.set_image_dim_ordering('th')
batch_size = 16
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1./255)
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
'F://train_data//', # this is the target directory
target_size=(150, 150), # all images will be resized to 150x150
batch_size=batch_size,
class_mode='categorical') # since we use binary_crossentropy loss, we need binary labels
# this is a similar generator, for validation data
validation_generator = test_datagen.flow_from_directory(
'F://validation_data//',
target_size=(150, 150),
batch_size=batch_size,
class_mode='categorical')
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(3, 150, 150)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), data_format="channels_first"))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), data_format="channels_first"))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), data_format="channels_first"))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('softmax')) # instead of sigmoid
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['accuracy'])
# another loss: sparse_categorical_crossentropy
model.fit_generator(
train_generator,
steps_per_epoch=1800 // batch_size,
epochs=50,
validation_data=validation_generator,
validation_steps=250 // batch_size)
So far I have changed the activation function of the output layer from sigmoid to softmax. Changed class_mode from binary to categorical. Can't seem to find the problem.
Also, I am aware of similar questions on StackOverflow:
Multi-Output Multi-Class Keras Model
Train multi-class image classifier in Keras
Multi-class classification using keras
But none of the solutions helped me.

You need to change the final Dense layer to model.add(Dense(3)). Softmax activation expects the units in the Dense layer to match the number of classes.
Also, if you are going to use loss='sparse_categorical_crossentropy', remember to change class_mode into 'sparse'. Your current setting, class_mode='categorical', should be used with loss='categorical_crossentropy'.

Related

Keras multiclass images classification and prediction

I am doing image classification with ImageDataGenerator. My data has this structure:
Train
101
102
103
104
Test
101
102
103
104
So, if I understood good, the ImageGenerator automatically does what is needed with labeling.
I train the model, and I get some kind of accuracy. Now I want to do the prediction.
- model.predict
- model.predict_proba
- model.predict_classes
All these give me the same value. Can you quickly explain or refer(I cannot find anything concerning my problem) how I should proceed, or maybe I did something terrible in the code. The biggest problem, I don't understand how the output will differ for 4 different classes. As predict_classes gives me an output [[1]], should not it give me the predicted class?
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, MaxPool2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.regularizers import l1, l2, l1_l2
model = Sequential()
model.add(Conv2D(60, (3, 3), input_shape=(480, 640,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(60, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(100, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(100, activation='relu', activity_regularizer=l1(0.001)))
#model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy',
optimizer='Adam',
metrics=['accuracy'])
batch_size = 32
# augmentation configuration for train
train_datagen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=False,
vertical_flip=True,
fill_mode = 'nearest')
# augmentation configuration for testing, only rescale
test_datagen = ImageDataGenerator(rescale=1./255)
# reading pictures and generating batches of augmented image data
train_generator = train_datagen.flow_from_directory(
'/media/data/working_dir/categories/readytotest/train',
target_size=(480, 640),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'/media/data/working_dir/categories/readytotest/test',
target_size=(480, 640),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=800 // batch_size,
epochs=15,
validation_data=validation_generator,
validation_steps=800 // batch_size)
Your model and the generators not for multi class but binary classification. First you need to fix your model last layer to get output with class size. Second you need to fix the generators to use in multi class.
(...)
model.add(Dense(CLS_SZ))
model.add(Activation('softmax'))
(...)
# I am not sure about this read some docs about generator you used.
train_generator = train_datagen.flow_from_directory(
'/media/data/working_dir/categories/readytotest/train',
target_size=(480, 640),
batch_size=batch_size,
class_mode=None)
validation_generator = test_datagen.flow_from_directory(
'/media/data/working_dir/categories/readytotest/test',
target_size=(480, 640),
batch_size=batch_size,
class_mode=None)

Keras.predict always gives "1."as the output

I trained a binary classifier distinguish clear MNIST images from blurry images. All images are 28*28*1 grayscale digits and I have 40000 for training, 10000 for validating and 8000 for testing. My code looks like:
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
import cv2
import numpy as np
import glob
from PIL import Image
img_width, img_height = 28, 28#all MNIST images are of size (28*28)
train_data_dir = '/Binary Classifier/data/train'#train directory generated by train_cla
validation_data_dir = '/Binary Classifier/data/val'#validation directory generated by val_cla
train_samples = 40000
validation_samples = 10000
epochs = 20
batch_size = 16
if K.image_data_format() == 'channels_first':
input_shape = (1, img_width, img_height)
else:
input_shape = (img_width, img_height, 1)
#build a sequential model to train data
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(#train data generator
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
val_datagen = ImageDataGenerator(rescale=1. / 255)#validation data generator
train_generator = train_datagen.flow_from_directory(#train generator
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary',color_mode = 'grayscale')
validation_generator = val_datagen.flow_from_directory(#validation generator
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary',color_mode = 'grayscale')
model.fit_generator(#fit the generator to train and validate the model
train_generator,
steps_per_epoch=train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_samples // batch_size)
#model.save_weights('output.h5')#save the output as HDF5 file
filelist = glob.glob('/Binary Classifier/data/image_data/*.png')
x = np.array([np.array(Image.open(fname)) for fname in filelist])
x = np.expand_dims(x, axis=3)
ones=model.predict(x)
But my output prediction in ones[] are all [1.] while the accuracy in training is actually really high(almost perfect). Does anyone know why?
Edit: I think I may get more help if I can show my image data. Basically the MNIST image in the directory is either a (clear) or a (blurry). All are (28*28*1) grayscale images whose format is .png. There are 40000 digits in '/Binary Classifier/data/train' for training, 10000 digits in '/Binary Classifier/data/val' for validation and 58000 digits in '/Binary Classifier/data/image_data/ for testing.
Some suggestions:
Pull data directly from one of your generators and test on that. Treat the generator like you would a list in a for loop to get image/label pairs out. This should sort out any differences in the way you are obtaining data and its formatting (e.g. channel order).
Check how many examples you have in each subdirectory of train/ and val/.
Change your metric to binary_accuracy since you are posing the problem as a binary classification problem (network only has one output).

Keras training model with images

My first go at training a model on a dataset, this is the data from a pandas dataset
In [16]: exists.head()
Out[16]:
id url \
1 0a58358a2afd3e4e http://lh6.ggpht.com/-igpT6wu0mIA/ROV8HnUuABI/...
2 6b2bb500b6a38aa0 http://lh6.ggpht.com/-vKr5G5MEusk/SR6r6SJi6mI/...
3 b399f09dee9c3c67 https://lh3.googleusercontent.com/-LOW2cjAqubA...
4 19ace29d77a5be66 https://lh5.googleusercontent.com/-tnmSXwQcWL8...
5 2c9c54b62f0a6a37 https://lh5.googleusercontent.com/-mEaSECO7D-4...
landmark_id exists filename
1 6651 True training_images/0a58358a2afd3e4e.jpg
2 11284 True training_images/6b2bb500b6a38aa0.jpg
3 8429 True training_images/b399f09dee9c3c67.jpg
4 6231 True training_images/19ace29d77a5be66.jpg
5 10400 True training_images/2c9c54b62f0a6a37.jpg
it shows the training image in filename and the classification name in landmark_id
This is the way I've written the model to train it
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(3, 150, 150)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(5))
model.add(Dense(activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=0, mode='auto')
checkpointer = ModelCheckpoint(filepath="best_weights.hdf5", verbose=0, save_best_only=True) # save best model
model.compile(loss='binary_crossentropy',
optimizer='adam',
callbacks=[monitor,checkpointer],
verbose=0,
epochs=1000,
metrics=['accuracy'])
batch_size = 16
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1./255)
# this is a generator that will read pictures found in
# subfolers of 'data/train', and indefinitely generate
# batches of augmented image data
train_generator = train_datagen.flow_from_directory(
'training_images', # this is the target directory
target_size=(150, 150), # all images will be resized to 150x150
batch_size=batch_size,
class_mode='binary') # since we use binary_crossentropy loss, we need binary labels
# this is a similar generator, for validation data
validation_generator = test_datagen.flow_from_directory(
'test_images',
target_size=(150, 150),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000 // batch_size,
epochs=50,
validation_data=validation_generator,
validation_steps=800 // batch_size)
model.load_weights('best_weights.hdf5') # load weights from best model
model.save('last_model.h5')
I don't know how I'm supposed to put the labels to the image while training. So when it trains and loops through the images in the training_images folder.
Samuel,
Your FitGenerator is getting the training input labels from the flow_from_directory method. This method uses the folder structure to determine the training categories. Since your class is binary, and you have a single sigmoid output, I'm assuming that you are doing a Hot Dog - Not Hot Dog type of classification where you just want a single probability value.
The other hint for me that you care about the single probability that something is a category or not is that your loss function is binary_crossentropy.
Check your training data folder. Look at how the data is organized. This should be set up such that the data is organized correctly.
You seem to be hinting that you want to create multiple labels (e.g., car, boat, train). If this is the case, you will want to create those folders under train and validate and put the images in the respective folder. You will need to change several things about your model if you do this though. Your loss, output layer size, and output layer activations will change accordingly.

How to check what features are extracted while training and testing a CNN model for image classification?

I'm using CNN for training and testing images of seeds. I want to know:
What features are getting extracted at every layer?
Is there any way to represent it in a graphical or image format?
How do I define my classifier to extract only specific features?
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
# dimensions of our images.
img_width, img_height = 150, 150
train_data_dir = 'Train_Walnut_Seed/train'
validation_data_dir = 'Train_Walnut_Seed/validation'
nb_train_samples = 70
nb_validation_samples = 9
epochs = 50
batch_size = 16
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save('first_try_walnut.h5')
The above code is for training the classifier using CNN. how to visually represent the output at each layer while training.
Also how to deploy my trained model into a protocolbuffer(.pb) file for using it in my android project
I believe the best way, or at least the best way I know of to extract useful features would be using an autoencoder.
Check out this article from the Keras blog.
Cheers,
Gabriel
I know this probably isn't an issue anymore, but I just thought I'd add this in case it's useful to someone else. As the features output by a CNN aren't really human-readable it is difficult to inspect them. One way is to use t-SNE which gives a visual indication of which embedded representations of the images are close to each other. Another way to do this is using a 'heat map' which shows in more detail which parts of an image are activating parts of the CNN. This post has a nice explanation of some of these techniques: http://cs231n.github.io/understanding-cnn/
Getting a classifier to focus on certain features is difficult - either you need to change the network architecture or use image pre-processing to accentuate the features you want the network to focus on. I'm afraid I can't really give more details on that.

Keras CNN dimension problems

I am trying to build a CNN using Keras for an image segmentation task, based on this article. Because my dataset is small, I wanted to use Keras ImageDataGenerator and feed it to fit_generator(). So, I followed the example on the Keras website. But, since zipping the image and mask generators didn't work, I followed this answer and created my own generator.
My input data is of size (701,256,1) and my problem is binary (foreground, background). For each image I have a label of the same shape.
Now, I am facing a dimensionality problem. This was also mentioned in the answer, but I am unsure of how to solve it.
The error:
ValueError: Error when checking target: expected dense_3 to have 2 dimensions, but got array with shape (2, 704, 256, 1)
I am pasting the entire code I have here:
import numpy
import pygpu
import theano
import keras
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, Reshape
from keras.layers import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras import backend as K
def superGenerator(image_gen, label_gen):
while True:
x = image_gen.next()
y = label_gen.next()
yield x[0], y[0]
img_height = 704
img_width = 256
train_data_dir = 'Dataset/Train/Images'
train_label_dir = 'Dataset/Train/Labels'
validation_data_dir = 'Dataset/Validation/Images'
validation_label_dir = 'Dataset/Validation/Labels'
n_train_samples = 1000
n_validation_samples = 500
epochs = 50
batch_size = 2
input_shape = (img_height, img_width,1)
target_shape = (img_height, img_width)
model = Sequential()
model.add(Conv2D(80,(28,28), input_shape=input_shape))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Conv2D(96,(18,18)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Conv2D(128,(13,13)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(160,(8,8)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(2, activation='softmax'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
data_gen_args = dict(
rescale=1./255,
horizontal_flip=True,
vertical_flip=True
)
train_datagen = ImageDataGenerator(**data_gen_args)
train_label_datagen = ImageDataGenerator(**data_gen_args)
test_datagen = ImageDataGenerator(**data_gen_args)
test_label_datagen = ImageDataGenerator(**data_gen_args)
seed = 1
train_image_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=target_shape,
color_mode='grayscale',
batch_size=batch_size,
class_mode = 'binary',
seed=seed)
train_label_generator = train_label_datagen.flow_from_directory(
train_label_dir,
target_size=target_shape,
color_mode='grayscale',
batch_size=batch_size,
class_mode = 'binary',
seed=seed)
validation_image_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=target_shape,
color_mode='grayscale',
batch_size=batch_size,
class_mode = 'binary',
seed=seed)
validation_label_generator = test_label_datagen.flow_from_directory(
validation_label_dir,
target_size=target_shape,
color_mode='grayscale',
batch_size=batch_size,
class_mode = 'binary',
seed=seed)
train_generator = superGenerator(train_image_generator, train_label_generator,batch_size)
test_generator = superGenerator(validation_image_generator, validation_label_generator,batch_size)
model.fit_generator(
train_generator,
steps_per_epoch= n_train_samples // batch_size,
epochs=50,
validation_data=test_generator,
validation_steps=n_validation_samples // batch_size)
model.save_weights('first_try.h5')
I am new to Keras (and CNNs), so any help would be very much appreciated.
Ok. I did some rubberduck-debugging and read a few more articles. Of course the dimensionality was a problem. This simple answer did it for me.
My labels are of shape same as the input image so the output of the model should be of that shape as well. I used Conv2DTranspose to solve this issue.

Categories

Resources