There is 2550 images as train set and 1530 images as test set. to classify these images into two classes, a hybrid deep learning model including CNN2D+LSTM is used but there is an error is occurred during running the code as is shown below. i was wonder if someone help me to solve it. thanks in advance
ERROR:
RuntimeError: You must compile your model before using it
# importing libraries
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import TimeDistributed
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'D:\\thesis\\Paper 3\\Feature Extraction\\two_dimension_Feature_extraction\\stft_feature\\Training_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
test_set = test_datagen.flow_from_directory(
'D:\\thesis\\Paper 3\\Feature Extraction\\two_dimension_Feature_extraction\\stft_feature\\Test_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
#initializing the CNN
classifier = Sequential()
#convolution2D
classifier.add(TimeDistributed(Convolution2D(32,3,3, input_shape = (64,64,3), activation = 'relu'))) #32 feature detector with 3*3 dimensions, 64*64 is the used format with 3 channel because the image is colored
#adding maxpooling
classifier.add(TimeDistributed(MaxPooling2D(2, 2)))
#Flattening
classifier.add(TimeDistributed(Flatten()))
classifier.add(TimeDistributed(classifier))
classifier.add(LSTM(units= 20, input_shape = (1,5), return_sequences = True ))
classifier.add(LSTM(units = 20))
#Full Connection
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
#compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
#Fitting the CNN to the images
history = classifier.fit_generator(training_set,
steps_per_epoch=2550,
epochs=25,
validation_data= test_set,
validation_steps=510)
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.plot()
plt.legend()
plt.show()
test_loss, test_acc = classifier.evaluate(test_set)
print('test_acc:', test_acc)
Related
There is 2550 images as train set and 1530 images as test set. to classify these images into two classes, a hybrid deep learning model including is used but there is an error is occurred during running the code as is shown below. i was wonder if someone help me to know the ERROR reason. thank you
ERROR:
when checking input: expected conv_lst_m2d_39_input to have 5 dimensions, but got array with shape (32, 64, 64, 3)
# importing libraries
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
import tensorflow as tf
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.normalization import BatchNormalization
#Data_Prprocessing
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'D:\\thesis\\Paper 3\\Feature Extraction\\two_dimension_Feature_extraction\\stft_feature\\Training_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
test_set = test_datagen.flow_from_directory(
'D:\\thesis\\Paper 3\\Feature Extraction\\two_dimension_Feature_extraction\\stft_feature\\Test_set',
target_size=(64, 64),
batch_size=32,
class_mode='binary')
#initializing the CNN
classifier = Sequential()
classifier.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),input_shape=(None, 64, 64, 3), padding='same', return_sequences=True))
classifier.add(BatchNormalization())
classifier.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
padding='same', return_sequences=True))
classifier.add(BatchNormalization())
classifier.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
padding='same'))
#Full Connection
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
#compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
#Fitting the CNN to the images
history = classifier.fit_generator(training_set,
steps_per_epoch=2550,
epochs=25,
validation_data= test_set,
validation_steps=510)
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.plot()
plt.legend()
plt.show()
test_loss, test_acc = classifier.evaluate(test_set)
print('test_acc:', test_acc)
ERROR:
ValueError: Input 0 is incompatible with layer conv_lst_m2d_23: expected ndim=5, found ndim=6
New ERROR:
import numpy as np
import glob
from PIL import Image
import tensorflow as tf
images_png = glob.glob("*.png")
images = []
for pic in images_png:
image = tf.read_file(pic)
image = tf.image.decode_png(image, channels=3, dtype=tf.uint16)
image = tf.cast(image, tf.float32)
image = image / 256.0
images.append(image)
img_seq_list = []
for i in range(0, len(images) - 5, 5):
j = i+5
img_seq = np.stack(images[i:j], axis=0)
img_seq_list.append(img_seq)
labels = np.zeros((2,len(img_seq_list) + 1), dtype=int)
labels = np.transpose(labels)
length = len(img_seq_list)/2
length = int(length)
for i in range(0, 255):
if i <= 127:
labels[i][0] = 1
elif i > 127 :
labels[i][1] = 1
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.normalization import BatchNormalization
classifier = Sequential()
classifier.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),input_shape=(5, 64, 64, 3), padding='same', return_sequences=True))
classifier.add(BatchNormalization())
classifier.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
padding='same', return_sequences=True))
classifier.add(BatchNormalization())
classifier.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
padding='same'))
#Full Connection
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
#compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
train_data = tf.data.Dataset.from_tensor_slices((img_seq_list[0:127], labels[0:127]))
train_data = train_data.shuffle(100).batch(10)
test_data = tf.data.Dataset.from_tensor_slices((img_seq_list[128:254], labels[128:254]))
test_data = train_data.shuffle(100).batch(10)
history = classifier.fit(train_data, validation_data=test_data)
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.plot()
plt.legend()
plt.show()
test_loss, test_acc = classifier.evaluate(test_data)
print('test_acc:', test_acc)
I believe the problem is that the convolutional LSTM layer is expecting a temporal sequence of images, so it has to have 5 dimensions including the batch dimension, so something of shape (B, T, H, W, C). You've defined the input shape (ignoring batch dimension) to be (None, 64, 64, 3), so you'll need to input a batch tensor of shape (batch, timesteps, 64, 64, 3).
Also, I believe fit_generator() is deprecated in favor of passing a generator to fit().
EDIT: If you have a sequence of frames from a video stream, you can stack them into an array with one more dimension. You'll have to get the images manually in the correct order from the directories, and then make a data iterator:
images = <ordered list of 3-D numpy arrays>
img_seq = np.stack(images, axis=0)
# Do the above for each sequence of images in the training data to get N sequences
sequences = <list of sequences of images of length N>
labels = <array of labels of length N>
train_data = tf.data.Dataset.from_tensor_slices((sequence, labels))
train_data = train_data.shuffle(1000).batch(batch_size)
# Do similar for test data
You can then use the tf.data.Dataset in fit():
model.fit(train_data, validation_data=test_data)
Training and Validation curves have spikes for loss and accuracy when training vgg16. I am using transfer learning technique and have changed the classifier for binary class problem of classifying genders. Can someone suggest me why am i getting such spikes and how can i reduce it.
The code is as follows :
from keras.layers import Input, Lambda, Dense, Flatten, Dropout
from keras.models import Model
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
# re-size all the images to this
IMAGE_SIZE = [224, 224]
train_path = 'E:/decompressed_images/data_set/train'
valid_path = 'E:/decompressed_images/data_set/validation'
# add preprocessing layer to the front of VGG
vgg = VGG16(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
# don't train existing weights
for layer in vgg.layers:
layer.trainable = False
# useful for getting number of classes
folders = glob('E:/decompressed_images/data_set/train*')
x = Flatten()(vgg.output)
# x = Dense(1000, activation='relu')(x)
prediction = Dense(len(folders), activation='sigmoid')(x)
# create a model object
model = Model(inputs=vgg.input, outputs=prediction)
# view the structure of the model
model.summary()
# tell the model what cost and optimization method to use
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
horizontal_flip = True,
vertical_flip = True,
width_shift_range = 0.1,
height_shift_range = 0.1,
zoom_range = 0.1,
rotation_range = 10)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('E:/Ullu/new_trial__/balanced_dataset/train',
target_size = (224, 224),
batch_size = 64,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('E:/Ullu/new_trial__/balanced_dataset/test',
target_size = (224, 224),
batch_size = 64,
class_mode = 'binary')
r = model.fit_generator(
training_set,
validation_data=test_set,
epochs=100,
steps_per_epoch=len(training_set),
validation_steps=len(test_set)
)
plt.plot(r.history['loss'], label='train loss')
plt.plot(r.history['val_loss'], label='val loss')
plt.legend()
plt.show()
plt.savefig('E:/Model_128_30/LossVal_loss.png')
# accuracies
plt.plot(r.history['accuracy'], label='train acc')
plt.plot(r.history['val_accuracy'], label='val acc')
plt.legend()
plt.show()
plt.savefig('E:/Model_128_30/AccVal_acc.png')
import tensorflow as tf
from keras.models import load_model
model.save('E:/Model_128_30/128_30_wt.h5')
High and fluctuating training and validation accuracy image
High and fluctuating training and validation loss image
I tried using dropout layer(0.5) for the final layers but my accuracy and loss for training and validation are the same. Could anyone please suggest me where i am going wrong. Thanks.
I am trying to do image recognition with ResNet50 in Python (keras). I tried to do the same task with VGG16, and I got some results like these (which seem okay to me):
resultsVGG16 . The training and validation accuracy/loss functions are getting better with each step, so the network must learn.
However, with ResNet50 the training functions are betting better, while the validation functions are not changing: resultsResNet
I've used the same code and data in both of the times, only the model is changed.
So what are the reasons of ResNet50 learning only on the training data?
My ResNet model looks like this:
'''python
model = Sequential()
base_model = VGG16(weights='imagenet', include_top=False,input_shape=
(image_size,image_size,3))
for layer in base_model.layers[:-4]:
layer.trainable=False
model.add(base_model)
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(NUM_CLASSES, activation='softmax'))
The VGG is very similar:
model = Sequential()
base_model = ResNet50(include_top=False, weights='imagenet', input_shape=
(image_size,image_size,3))
for layer in base_model.layers[:-8]:
layer.trainable=False
model.add(base_model)
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(NUM_CLASSES, activation='softmax'))
There is no mistake in your Model but this might be the issue with ResNet as such, because there are many issues raised, 1,2,3, in Github and Stack Overflow, already regarding this Pre-Trained Model.
Having said that, I found out a workaround, which worked for me, and hopefully works for you as well.
Workaround was to replace the Data Augmentation step,
Train_Datagen = ImageDataGenerator(rescale=1./255, rotation_range=40, width_shift_range=0.2,
height_shift_range=0.2, brightness_range=(0.2, 0.7), shear_range=45.0, zoom_range=60.0,
horizontal_flip=True, vertical_flip=True)
Val_Datagen = ImageDataGenerator(rescale=1./255, rotation_range=40, width_shift_range=0.2,
height_shift_range=0.2, brightness_range=(0.2, 0.7), shear_range=45.0, zoom_range=60.0,
horizontal_flip=True, vertical_flip=True)
with tf.keras.applications.resnet.preprocess_input, as shown below:
Train_Datagen = ImageDataGenerator(dtype = 'float32', preprocessing_function=tf.keras.applications.resnet.preprocess_input)
Val_Datagen = ImageDataGenerator(dtype = 'float32', preprocessing_function=tf.keras.applications.resnet.preprocess_input)
By modifying the Data Augmentation as shown above, my Validation Accuracy, which got stuck at 50% increased gradually up to 97%. Reason for this might be that ResNet might expect specific Pre-Processing Operations (not quite sure).
Complete working code which resulted in more than 95% of both Train and Validation Accuracy (for Cat and Dog Dataset) using ResNet50 is shown below:
import tensorflow as tf
from tensorflow.keras.applications import ResNet50
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Sequential
# The Convolutional Base of the Pre-Trained Model will be added as a Layer in this Model
Conv_Base = ResNet50(include_top = False, weights = 'imagenet', input_shape = (150,150, 3))
for layer in Conv_Base.layers[:-8]:
layer.trainable = False
model = Sequential()
model.add(Conv_Base)
model.add(Flatten())
model.add(Dense(units = 256, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(units = 1, activation = 'sigmoid'))
model.summary()
base_dir = 'Deep_Learning_With_Python_Book/Dogs_Vs_Cats_Small'
if os.path.exists(base_dir):
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
else:
print("The Folder, {}, doesn't exist'".format(base_dir))
batch_size = 20
Train_Datagen = ImageDataGenerator(dtype = 'float32', preprocessing_function=tf.keras.applications.resnet.preprocess_input)
Val_Datagen = ImageDataGenerator(dtype = 'float32', preprocessing_function=tf.keras.applications.resnet.preprocess_input)
train_gen = Train_Datagen.flow_from_directory(directory = train_dir, target_size = (150,150),
batch_size = batch_size, class_mode = 'binary')
val_gen = Val_Datagen.flow_from_directory(directory = validation_dir, target_size = (150,150),
batch_size = batch_size, class_mode = 'binary')
epochs = 15
Number_Of_Training_Images = train_gen.classes.shape[0]
steps_per_epoch = Number_Of_Training_Images/batch_size
model.compile(optimizer = 'Adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
history = model.fit(train_gen, epochs = epochs,
#batch_size = batch_size,
validation_data = val_gen, steps_per_epoch = steps_per_epoch)
import matplotlib.pyplot as plt
train_acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
train_loss = history.history['loss']
val_loss = history.history['val_loss']
No_Of_Epochs = range(epochs)
plt.plot(No_Of_Epochs, train_acc, marker = 'o', color = 'blue', markersize = 12,
linewidth = 2, label = 'Training Accuracy')
plt.plot(No_Of_Epochs, val_acc, marker = '.', color = 'red', markersize = 12,
linewidth = 2, label = 'Validation Accuracy')
plt.title('Training Accuracy and Testing Accuracy w.r.t Number of Epochs')
plt.legend()
plt.figure()
plt.plot(No_Of_Epochs, train_loss, marker = 'o', color = 'blue', markersize = 12,
linewidth = 2, label = 'Training Loss')
plt.plot(No_Of_Epochs, val_acc, marker = '.', color = 'red', markersize = 12,
linewidth = 2, label = 'Validation Loss')
plt.title('Training Loss and Testing Loss w.r.t Number of Epochs')
plt.legend()
plt.show()
Metrics are shown in the below graph,
Below is my code to classify 2 kind of classes.
The accuracy increase gradually till reach about 87%.
The problem is validation accuracy stuck between 0.5 and 0.6.
I know it is over fitting problem.
I tried to manipulate the number of parameters, but still got same problem.
Any idea about how the model can be improved?
Thanks so much
from keras.models import Sequential
from keras.layers import Conv2D, Dropout, Activation
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
classifier = Sequential()
classifier.add(Conv2D(16, (3, 3), input_shape = (110, 110, 3)))
classifier.add(Activation('relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Conv2D(32,(3,3)))
classifier.add(Activation('relu'))
classifier.add(MaxPooling2D(pool_size =(2,2)))
classifier.add(Conv2D(64,(3,3)))
classifier.add(Activation('relu'))
classifier.add(MaxPooling2D(pool_size =(2,2)))
classifier.add(Flatten())
classifier.add(Dense(64))
classifier.add(Activation('relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(1))
classifier.add(Activation('sigmoid'))
classifier.compile(optimizer = 'rmsprop', loss = 'binary_crossentropy', metrics = ['accuracy'])
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set =
train_datagen.flow_from_directory('/home/ccc/Downloads/Compressed/CNN/AD/train',
target_size = (110, 110),
batch_size = 10, #10
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('/home/ccc/Downloads/Compressed/CNN/AD/test',
target_size = (110, 110),
batch_size =6, # 6
class_mode = 'binary')
hist = classifier.fit_generator(training_set,
steps_per_epoch = 1160,
epochs = 50,
validation_data = test_set,
validation_steps = 300)
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show()
Add few more layers.Start with high learning rate and slowly decrease your learning rate. Try different optimizers. I recommend to use transfer learning technique for more validation accuracy.
Following is the architecture for my model.
# %%
# Defining the model
input_shape = img_data[0].shape
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.75))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
# model.add(Convolution2D(64, 3, 3))
# model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.75))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.75))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=["accuracy"])
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=["accuracy"])
The accuracy is bit low. so I want to transorm the architecture to mobilenet. Is there any keras based implementation to classify images using mobilenet?
May be this code snip will help you
from keras.applications.mobilenet import MobileNet
from keras.applications.mobilenetv2 import MobileNetV2
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras import Sequential
from keras.layers import Dense
from keras.optimizers import Adam, RMSprop, SGD
import keras
from tensorflow import confusion_matrix
from matplotlib import pyplot as plt
import config
import numpy as np
train_path = 'data/train'
val_batch = 'data/val'
test_batch = 'data/test'
train_batches = ImageDataGenerator(preprocessing_function=keras.applications.mobilenet.preprocess_input).flow_from_directory(train_path, target_size=(config.IMAGE_SIZE, config.IMAGE_SIZE),
class_mode='categorical', batch_size=20)
val_batches = ImageDataGenerator(preprocessing_function=keras.applications.mobilenet.preprocess_input).flow_from_directory(val_batch, target_size=(config.IMAGE_SIZE, config.IMAGE_SIZE),
class_mode='categorical', batch_size=20)
def prepare_image(file):
img = image.load_img(file, target_size=(config.IMAGE_SIZE, config.IMAGE_SIZE))
img_array = image.img_to_array(img)
img_expanded_dims = np.expand_dims(img_array, axis=0)
return keras.applications.mobilenet.preprocess_input(img_expanded_dims)
mobilenet = MobileNetV2()
# x = mobilenet.layers[-6].output
x = mobilenet.layers[-2].output
predictions = Dense(8, activation='softmax')(x)
from keras import Model
model = Model(inputs= mobilenet.input, outputs=predictions)
print(model.summary())
# for layer in model.layers[:-5]:
# layer.trainable = False
# for layer in model.layers[:-1]:
# layer.trainable = False
print(model.summary())
# exit(0)
model.compile(SGD(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit_generator(train_batches, steps_per_epoch=10,
validation_data=val_batches, validation_steps=10, epochs=300, verbose=2)
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'b', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# Get the ground truth from generator
ground_truth = train_batches.classes
# Get the label to class mapping from the generator
label2index = train_batches.class_indices
# Getting the mapping from class index to class label
idx2label = dict((v, k) for k, v in label2index.items())
print(idx2label)
# _, val_labels = next(val_batches)
#
# predictions = model.predict_generator(val_batches, steps=1, verbose=0)
#
# cm = confusion_matrix(val_batches, np.round(predictions[:,0]))
# cm_plot_labels = []
#
# for k, v in label2index.items():
# cm_plot_labels.append(v)
#
# print(cm)
# serialize model to JSON
model_json = model.to_json()
with open("mobilenet.json", "w") as json_file:
json_file.write(model_json)
from keras.models import save_model
save_model(model, 'mobilenet.h5')
import tensorflow as tf
# from tensorflow.contrib import lite
# tf.lite.TocoConverter
converter = tf.lite.TocoConverter.from_keras_model_file("mobilenet.h5")
tflite_model = converter.convert()
open("model/mobilenet.tflite", "wb").write(tflite_model)
Keras has a set of pretrained model for image classification purposes.
You can check the list and the usage here
You can also copy the implementation of the architecture on the github repository, here the link