CNNLSTM2D Implementation - python

There is 2550 images as train set and 1530 images as test set. to classify these images into two classes, a hybrid deep learning model including is used but there is an error is occurred during running the code as is shown below. i was wonder if someone help me to know the ERROR reason. thank you
ERROR:
when checking input: expected conv_lst_m2d_39_input to have 5 dimensions, but got array with shape (32, 64, 64, 3)
# importing libraries
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
import tensorflow as tf
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.normalization import BatchNormalization
#Data_Prprocessing
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'D:\\thesis\\Paper 3\\Feature Extraction\\two_dimension_Feature_extraction\\stft_feature\\Training_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
test_set = test_datagen.flow_from_directory(
'D:\\thesis\\Paper 3\\Feature Extraction\\two_dimension_Feature_extraction\\stft_feature\\Test_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
#initializing the CNN
classifier = Sequential()
classifier.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),input_shape=(None, 64, 64, 3), padding='same', return_sequences=True))
classifier.add(BatchNormalization())
classifier.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
padding='same', return_sequences=True))
classifier.add(BatchNormalization())
classifier.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
padding='same'))
#Full Connection
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
#compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
#Fitting the CNN to the images
history = classifier.fit_generator(training_set,
steps_per_epoch=2550,
epochs=25,
validation_data= test_set,
validation_steps=510)
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.plot()
plt.legend()
plt.show()
test_loss, test_acc = classifier.evaluate(test_set)
print('test_acc:', test_acc)
ERROR:
ValueError: Input 0 is incompatible with layer conv_lst_m2d_23: expected ndim=5, found ndim=6
New ERROR:
import numpy as np
import glob
from PIL import Image
import tensorflow as tf
images_png = glob.glob("*.png")
images = []
for pic in images_png:
image = tf.read_file(pic)
image = tf.image.decode_png(image, channels=3, dtype=tf.uint16)
image = tf.cast(image, tf.float32)
image = image / 256.0
images.append(image)
img_seq_list = []
for i in range(0, len(images) - 5, 5):
j = i+5
img_seq = np.stack(images[i:j], axis=0)
img_seq_list.append(img_seq)
labels = np.zeros((2,len(img_seq_list) + 1), dtype=int)
labels = np.transpose(labels)
length = len(img_seq_list)/2
length = int(length)
for i in range(0, 255):
if i <= 127:
labels[i][0] = 1
elif i > 127 :
labels[i][1] = 1
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.normalization import BatchNormalization
classifier = Sequential()
classifier.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),input_shape=(5, 64, 64, 3), padding='same', return_sequences=True))
classifier.add(BatchNormalization())
classifier.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
padding='same', return_sequences=True))
classifier.add(BatchNormalization())
classifier.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
padding='same'))
#Full Connection
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
#compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
train_data = tf.data.Dataset.from_tensor_slices((img_seq_list[0:127], labels[0:127]))
train_data = train_data.shuffle(100).batch(10)
test_data = tf.data.Dataset.from_tensor_slices((img_seq_list[128:254], labels[128:254]))
test_data = train_data.shuffle(100).batch(10)
history = classifier.fit(train_data, validation_data=test_data)
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.plot()
plt.legend()
plt.show()
test_loss, test_acc = classifier.evaluate(test_data)
print('test_acc:', test_acc)

I believe the problem is that the convolutional LSTM layer is expecting a temporal sequence of images, so it has to have 5 dimensions including the batch dimension, so something of shape (B, T, H, W, C). You've defined the input shape (ignoring batch dimension) to be (None, 64, 64, 3), so you'll need to input a batch tensor of shape (batch, timesteps, 64, 64, 3).
Also, I believe fit_generator() is deprecated in favor of passing a generator to fit().
EDIT: If you have a sequence of frames from a video stream, you can stack them into an array with one more dimension. You'll have to get the images manually in the correct order from the directories, and then make a data iterator:
images = <ordered list of 3-D numpy arrays>
img_seq = np.stack(images, axis=0)
# Do the above for each sequence of images in the training data to get N sequences
sequences = <list of sequences of images of length N>
labels = <array of labels of length N>
train_data = tf.data.Dataset.from_tensor_slices((sequence, labels))
train_data = train_data.shuffle(1000).batch(batch_size)
# Do similar for test data
You can then use the tf.data.Dataset in fit():
model.fit(train_data, validation_data=test_data)

Related

Why am I getting a Value error and negative dimension size on my python code (cnn_model)

Error :
File "C:\Users\Ayeh\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\Ayeh\AppData\Local\Programs\Python\Python310\lib\site-packages\tensorflow\python\framework\ops.py", line 1969, in _create_c_op
raise ValueError(e.message)
ValueError: Exception encountered when calling layer "conv2d_2" (type Conv2D).
Negative dimension size caused by subtracting 3 from 1 for '{{node conv2d_2/Conv2D}} = Conv2D[T=DT_FLOAT, data_format="NHWC", dilations=[1, 1, 1, 1], explicit_paddings=[], padding="VALID", strides=[1, 3, 3, 1], use_cudnn_on_gpu=true](Placeholder, conv2d_2/Conv2D/ReadVariableOp)' with input shapes: [?,1,1,32], [3,3,32,64].
Call arguments received by layer "conv2d_2" (type Conv2D):
� inputs=tf.Tensor(shape=(None, 1, 1, 32), dtype=float32)
This is my cnn_model
# Part 1 - Building the CNN
#importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense, Dropout
from keras import optimizers
# Initialing the CNN
classifier = Sequential()
# Step 1 - Convolutio Layer
classifier.add(Convolution2D(32, 3, 3, input_shape = (64, 64, 3), activation = 'relu'))
#step 2 - Pooling
classifier.add(MaxPooling2D(pool_size =(2,2)))
# Adding second convolution layer
classifier.add(Convolution2D(32, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size =(2,2)))
#Adding 3rd Concolution Layer
classifier.add(Convolution2D(64, 3, 3, activation = 'relu'))
classifier.add(MaxPooling2D(pool_size =(2,2)))
#Step 3 - Flattening
classifier.add(Flatten())
#Step 4 - Full Connection
classifier.add(Dense(256, activation = 'relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(26, activation = 'softmax'))
#Compiling The CNN
classifier.compile(
optimizer = optimizers.SGD(lr = 0.01),
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
#Part 2 Fittting the CNN to the image
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'mydata/training_set',
target_size=(64, 64),
batch_size=32,
class_mode='categorical')
test_set = test_datagen.flow_from_directory(
'mydata/test_set',
target_size=(64, 64),
batch_size=32,
class_mode='categorical')
model = classifier.fit_generator(
training_set,
steps_per_epoch=800,
epochs=25,
validation_data = test_set,
validation_steps = 6500)
'''#Saving the model
import h5py
classifier.save('Trained_model.h5')'''
print(model.history.keys())
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(model.history['acc'])
plt.plot(model.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(model.history['loss'])
plt.plot(model.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
If the padding argument is not specified, it will default to valid, which means that the automatic dimensionality reduction that occurs during convolution will be present. If you use this model to calculate that for a 64x64 image with automatic dimensionality reduction, you'll get negative dimensions. To ensure that you get the same dimensionality after convolution as you did before, set padding=same when specifying Conv2D and pooling layers. Kindly change the model like below
classifier = Sequential()
# Step 1 - Convolutio Layer
classifier.add(Convolution2D(32, 3, 3, input_shape = (64, 64, 3),padding='same', activation = 'relu'))
#step 2 - Pooling
classifier.add(MaxPooling2D(pool_size =(2,2)))
# Adding second convolution layer
classifier.add(Convolution2D(32, 3, 3, activation = 'relu',padding='same',))
classifier.add(MaxPooling2D(pool_size =(2,2)))
#Adding 3rd Concolution Layer
classifier.add(Convolution2D(64, 3, 3, activation = 'relu',padding='same',))
classifier.add(MaxPooling2D(pool_size =(2,2),padding='same',))
#Step 3 - Flattening
classifier.add(Flatten())
#Step 4 - Full Connection
classifier.add(Dense(256, activation = 'relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(2, activation = 'softmax'))
I have tried to replicate the error using covid xray dataset, please refer the gist here. Thank you!

How to Implement CNN2D+ LSTM Model For Image Classification in Keras?

There is 2550 images as train set and 1530 images as test set. to classify these images into two classes, a hybrid deep learning model including CNN2D+LSTM is used but there is an error is occurred during running the code as is shown below. i was wonder if someone help me to solve it. thanks in advance
ERROR:
RuntimeError: You must compile your model before using it
# importing libraries
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import TimeDistributed
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'D:\\thesis\\Paper 3\\Feature Extraction\\two_dimension_Feature_extraction\\stft_feature\\Training_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
test_set = test_datagen.flow_from_directory(
'D:\\thesis\\Paper 3\\Feature Extraction\\two_dimension_Feature_extraction\\stft_feature\\Test_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
#initializing the CNN
classifier = Sequential()
#convolution2D
classifier.add(TimeDistributed(Convolution2D(32,3,3, input_shape = (64,64,3), activation = 'relu'))) #32 feature detector with 3*3 dimensions, 64*64 is the used format with 3 channel because the image is colored
#adding maxpooling
classifier.add(TimeDistributed(MaxPooling2D(2, 2)))
#Flattening
classifier.add(TimeDistributed(Flatten()))
classifier.add(TimeDistributed(classifier))
classifier.add(LSTM(units= 20, input_shape = (1,5), return_sequences = True ))
classifier.add(LSTM(units = 20))
#Full Connection
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
#compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
#Fitting the CNN to the images
history = classifier.fit_generator(training_set,
steps_per_epoch=2550,
epochs=25,
validation_data= test_set,
validation_steps=510)
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.plot()
plt.legend()
plt.show()
test_loss, test_acc = classifier.evaluate(test_set)
print('test_acc:', test_acc)

With ResNet50 the validation accuracy and loss is not changing

I am trying to do image recognition with ResNet50 in Python (keras). I tried to do the same task with VGG16, and I got some results like these (which seem okay to me):
resultsVGG16 . The training and validation accuracy/loss functions are getting better with each step, so the network must learn.
However, with ResNet50 the training functions are betting better, while the validation functions are not changing: resultsResNet
I've used the same code and data in both of the times, only the model is changed.
So what are the reasons of ResNet50 learning only on the training data?
My ResNet model looks like this:
'''python
model = Sequential()
base_model = VGG16(weights='imagenet', include_top=False,input_shape=
(image_size,image_size,3))
for layer in base_model.layers[:-4]:
layer.trainable=False
model.add(base_model)
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(NUM_CLASSES, activation='softmax'))
The VGG is very similar:
model = Sequential()
base_model = ResNet50(include_top=False, weights='imagenet', input_shape=
(image_size,image_size,3))
for layer in base_model.layers[:-8]:
layer.trainable=False
model.add(base_model)
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(NUM_CLASSES, activation='softmax'))
There is no mistake in your Model but this might be the issue with ResNet as such, because there are many issues raised, 1,2,3, in Github and Stack Overflow, already regarding this Pre-Trained Model.
Having said that, I found out a workaround, which worked for me, and hopefully works for you as well.
Workaround was to replace the Data Augmentation step,
Train_Datagen = ImageDataGenerator(rescale=1./255, rotation_range=40, width_shift_range=0.2,
height_shift_range=0.2, brightness_range=(0.2, 0.7), shear_range=45.0, zoom_range=60.0,
horizontal_flip=True, vertical_flip=True)
Val_Datagen = ImageDataGenerator(rescale=1./255, rotation_range=40, width_shift_range=0.2,
height_shift_range=0.2, brightness_range=(0.2, 0.7), shear_range=45.0, zoom_range=60.0,
horizontal_flip=True, vertical_flip=True)
with tf.keras.applications.resnet.preprocess_input, as shown below:
Train_Datagen = ImageDataGenerator(dtype = 'float32', preprocessing_function=tf.keras.applications.resnet.preprocess_input)
Val_Datagen = ImageDataGenerator(dtype = 'float32', preprocessing_function=tf.keras.applications.resnet.preprocess_input)
By modifying the Data Augmentation as shown above, my Validation Accuracy, which got stuck at 50% increased gradually up to 97%. Reason for this might be that ResNet might expect specific Pre-Processing Operations (not quite sure).
Complete working code which resulted in more than 95% of both Train and Validation Accuracy (for Cat and Dog Dataset) using ResNet50 is shown below:
import tensorflow as tf
from tensorflow.keras.applications import ResNet50
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Sequential
# The Convolutional Base of the Pre-Trained Model will be added as a Layer in this Model
Conv_Base = ResNet50(include_top = False, weights = 'imagenet', input_shape = (150,150, 3))
for layer in Conv_Base.layers[:-8]:
layer.trainable = False
model = Sequential()
model.add(Conv_Base)
model.add(Flatten())
model.add(Dense(units = 256, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(units = 1, activation = 'sigmoid'))
model.summary()
base_dir = 'Deep_Learning_With_Python_Book/Dogs_Vs_Cats_Small'
if os.path.exists(base_dir):
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
else:
print("The Folder, {}, doesn't exist'".format(base_dir))
batch_size = 20
Train_Datagen = ImageDataGenerator(dtype = 'float32', preprocessing_function=tf.keras.applications.resnet.preprocess_input)
Val_Datagen = ImageDataGenerator(dtype = 'float32', preprocessing_function=tf.keras.applications.resnet.preprocess_input)
train_gen = Train_Datagen.flow_from_directory(directory = train_dir, target_size = (150,150),
batch_size = batch_size, class_mode = 'binary')
val_gen = Val_Datagen.flow_from_directory(directory = validation_dir, target_size = (150,150),
batch_size = batch_size, class_mode = 'binary')
epochs = 15
Number_Of_Training_Images = train_gen.classes.shape[0]
steps_per_epoch = Number_Of_Training_Images/batch_size
model.compile(optimizer = 'Adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
history = model.fit(train_gen, epochs = epochs,
#batch_size = batch_size,
validation_data = val_gen, steps_per_epoch = steps_per_epoch)
import matplotlib.pyplot as plt
train_acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
train_loss = history.history['loss']
val_loss = history.history['val_loss']
No_Of_Epochs = range(epochs)
plt.plot(No_Of_Epochs, train_acc, marker = 'o', color = 'blue', markersize = 12,
linewidth = 2, label = 'Training Accuracy')
plt.plot(No_Of_Epochs, val_acc, marker = '.', color = 'red', markersize = 12,
linewidth = 2, label = 'Validation Accuracy')
plt.title('Training Accuracy and Testing Accuracy w.r.t Number of Epochs')
plt.legend()
plt.figure()
plt.plot(No_Of_Epochs, train_loss, marker = 'o', color = 'blue', markersize = 12,
linewidth = 2, label = 'Training Loss')
plt.plot(No_Of_Epochs, val_acc, marker = '.', color = 'red', markersize = 12,
linewidth = 2, label = 'Validation Loss')
plt.title('Training Loss and Testing Loss w.r.t Number of Epochs')
plt.legend()
plt.show()
Metrics are shown in the below graph,

increase validation accuracy in CNN

Below is my code to classify 2 kind of classes.
The accuracy increase gradually till reach about 87%.
The problem is validation accuracy stuck between 0.5 and 0.6.
I know it is over fitting problem.
I tried to manipulate the number of parameters, but still got same problem.
Any idea about how the model can be improved?
Thanks so much
from keras.models import Sequential
from keras.layers import Conv2D, Dropout, Activation
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
classifier = Sequential()
classifier.add(Conv2D(16, (3, 3), input_shape = (110, 110, 3)))
classifier.add(Activation('relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Conv2D(32,(3,3)))
classifier.add(Activation('relu'))
classifier.add(MaxPooling2D(pool_size =(2,2)))
classifier.add(Conv2D(64,(3,3)))
classifier.add(Activation('relu'))
classifier.add(MaxPooling2D(pool_size =(2,2)))
classifier.add(Flatten())
classifier.add(Dense(64))
classifier.add(Activation('relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(1))
classifier.add(Activation('sigmoid'))
classifier.compile(optimizer = 'rmsprop', loss = 'binary_crossentropy', metrics = ['accuracy'])
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set =
train_datagen.flow_from_directory('/home/ccc/Downloads/Compressed/CNN/AD/train',
target_size = (110, 110),
batch_size = 10, #10
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('/home/ccc/Downloads/Compressed/CNN/AD/test',
target_size = (110, 110),
batch_size =6, # 6
class_mode = 'binary')
hist = classifier.fit_generator(training_set,
steps_per_epoch = 1160,
epochs = 50,
validation_data = test_set,
validation_steps = 300)
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show()
Add few more layers.Start with high learning rate and slowly decrease your learning rate. Try different optimizers. I recommend to use transfer learning technique for more validation accuracy.

Mobilenet for keras

Following is the architecture for my model.
# %%
# Defining the model
input_shape = img_data[0].shape
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.75))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
# model.add(Convolution2D(64, 3, 3))
# model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.75))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.75))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=["accuracy"])
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=["accuracy"])
The accuracy is bit low. so I want to transorm the architecture to mobilenet. Is there any keras based implementation to classify images using mobilenet?
May be this code snip will help you
from keras.applications.mobilenet import MobileNet
from keras.applications.mobilenetv2 import MobileNetV2
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras import Sequential
from keras.layers import Dense
from keras.optimizers import Adam, RMSprop, SGD
import keras
from tensorflow import confusion_matrix
from matplotlib import pyplot as plt
import config
import numpy as np
train_path = 'data/train'
val_batch = 'data/val'
test_batch = 'data/test'
train_batches = ImageDataGenerator(preprocessing_function=keras.applications.mobilenet.preprocess_input).flow_from_directory(train_path, target_size=(config.IMAGE_SIZE, config.IMAGE_SIZE),
class_mode='categorical', batch_size=20)
val_batches = ImageDataGenerator(preprocessing_function=keras.applications.mobilenet.preprocess_input).flow_from_directory(val_batch, target_size=(config.IMAGE_SIZE, config.IMAGE_SIZE),
class_mode='categorical', batch_size=20)
def prepare_image(file):
img = image.load_img(file, target_size=(config.IMAGE_SIZE, config.IMAGE_SIZE))
img_array = image.img_to_array(img)
img_expanded_dims = np.expand_dims(img_array, axis=0)
return keras.applications.mobilenet.preprocess_input(img_expanded_dims)
mobilenet = MobileNetV2()
# x = mobilenet.layers[-6].output
x = mobilenet.layers[-2].output
predictions = Dense(8, activation='softmax')(x)
from keras import Model
model = Model(inputs= mobilenet.input, outputs=predictions)
print(model.summary())
# for layer in model.layers[:-5]:
# layer.trainable = False
# for layer in model.layers[:-1]:
# layer.trainable = False
print(model.summary())
# exit(0)
model.compile(SGD(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit_generator(train_batches, steps_per_epoch=10,
validation_data=val_batches, validation_steps=10, epochs=300, verbose=2)
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'b', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# Get the ground truth from generator
ground_truth = train_batches.classes
# Get the label to class mapping from the generator
label2index = train_batches.class_indices
# Getting the mapping from class index to class label
idx2label = dict((v, k) for k, v in label2index.items())
print(idx2label)
# _, val_labels = next(val_batches)
#
# predictions = model.predict_generator(val_batches, steps=1, verbose=0)
#
# cm = confusion_matrix(val_batches, np.round(predictions[:,0]))
# cm_plot_labels = []
#
# for k, v in label2index.items():
# cm_plot_labels.append(v)
#
# print(cm)
# serialize model to JSON
model_json = model.to_json()
with open("mobilenet.json", "w") as json_file:
json_file.write(model_json)
from keras.models import save_model
save_model(model, 'mobilenet.h5')
import tensorflow as tf
# from tensorflow.contrib import lite
# tf.lite.TocoConverter
converter = tf.lite.TocoConverter.from_keras_model_file("mobilenet.h5")
tflite_model = converter.convert()
open("model/mobilenet.tflite", "wb").write(tflite_model)
Keras has a set of pretrained model for image classification purposes.
You can check the list and the usage here
You can also copy the implementation of the architecture on the github repository, here the link

Categories

Resources