VGG16 Keras fine tuning: low accuracy - python

I've already asked similar question here, but now I have slightly different problem, therefore asking new question.
I decided to use slightly different approach instead of proposed among answers in the referenced question to train, and then fine-tune model.
Update: I've replaced old question provided here with more suitable version
Here is my sequence of actions:
Build VGG16 model and drop top layer (call it no-top model)
Generate bottleneck features using no-top model
Train a separate fully-connected model using bottleneck features
Build new VGG16 model, drop top layers, and attach pretrained top-model
Train concatenated model on dogs/cats data
And here is a code I use to implement aforementioned sequence of actions:
import warnings
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', DeprecationWarning)
from __future__ import print_function
from itertools import izip_longest as zip_longest
from pprint import pformat as pf
from pprint import pprint as pp
import os
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.layers import Conv2D, MaxPooling2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Dropout, Flatten, Dense, InputLayer, Lambda
from keras.models import Sequential, Model, load_model
from keras.utils.data_utils import get_file
from keras.optimizers import SGD
import keras.backend as K
import numpy as np
RANDOM_STATE = 1
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
BATCH_SIZE = 4
VGG_MEAN = np.array([123.68, 116.779, 103.939]).reshape((3, 1, 1))
VGG16_WEIGHTS_PATH = 'http://www.platform.ai/models/vgg16.h5'
DATA_ROOT = os.path.join(os.path.expanduser('~'), 'data', 'dogscats')
TRAIN_DIR = os.path.join(DATA_ROOT, 'train')
VALID_DIR = os.path.join(DATA_ROOT, 'valid')
SAMPLES_DIR = os.path.expanduser('~/dogscats_samples')
np.random.seed(RANDOM_STATE)
K.set_image_dim_ordering('th')
def get_batches(dirname, gen=ImageDataGenerator(), shuffle=True,
batch_size=BATCH_SIZE, class_mode='categorical'):
return gen.flow_from_directory(
os.path.join(SAMPLES_DIR, dirname),
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
class_mode=class_mode,
shuffle=shuffle,
batch_size=batch_size)
def vgg_preprocess(x):
x = x - VGG_MEAN
return x[:, ::-1]
def conv_block(model, n_layers, n_filters, name='block'):
for i in range(n_layers):
model.add(ZeroPadding2D((1, 1), name='%s_padding_%s' % (name, i)))
model.add(Conv2D(n_filters, (3, 3), activation='relu', name='%s_conv2d_%s' % (name, i)))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='%s_maxpool' % name))
def fc_block(model, name='block'):
model.add(Dense(4096, activation='relu', name=name + '_dense'))
model.add(Dropout(0.5))
def build_vgg_16():
model = Sequential()
input_shape = (3, IMAGE_WIDTH, IMAGE_HEIGHT)
model.add(InputLayer(input_shape=input_shape))
model.add(Lambda(vgg_preprocess))
conv_block(model, n_layers=2, n_filters=64, name='block1')
conv_block(model, n_layers=2, n_filters=128, name='block2')
conv_block(model, n_layers=3, n_filters=256, name='block3')
conv_block(model, n_layers=3, n_filters=512, name='block4')
conv_block(model, n_layers=3, n_filters=512, name='block5')
model.add(Flatten())
fc_block(model)
fc_block(model)
model.add(Dense(1000, activation='softmax'))
return model
def train_finetuned_model():
file_path = get_file('vgg16.h5', VGG16_WEIGHTS_PATH, cache_subdir='models')
print('Building VGG16 (no-top) model to generate bottleneck features')
vgg16_notop = build_vgg_16()
vgg16_notop.load_weights(file_path)
for _ in range(6):
vgg16_notop.pop()
vgg16_notop.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
train_batches = get_batches('train', shuffle=False, class_mode=None)
train_labels = np.array([0]*1000 + [1]*1000)
bottleneck_train = vgg16_notop.predict_generator(train_batches, steps=2000 // BATCH_SIZE)
valid_batches = get_batches('valid', shuffle=False, class_mode=None)
valid_labels = np.array([0]*400 + [1]*400)
bottleneck_valid = vgg16_notop.predict_generator(valid_batches, steps=800 // BATCH_SIZE)
print('Training top model on bottleneck features')
top_model = Sequential()
top_model.add(Flatten(input_shape=bottleneck_train.shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
top_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
top_model.fit(bottleneck_train, train_labels,
batch_size=32, epochs=50,
validation_data=(bottleneck_valid, valid_labels))
print('Concatenate new VGG16 (without top layer) with pretrained top model')
vgg16_fine = build_vgg_16()
vgg16_fine.load_weights(file_path)
for _ in range(6):
vgg16_fine.pop()
vgg16_fine.add(Flatten(name='top_flatten'))
vgg16_fine.add(Dense(256, activation='relu', name='top_dense'))
vgg16_fine.add(Dropout(0.5, name='top_dropout'))
vgg16_fine.add(Dense(1, activation='sigmoid', name='top_sigmoid'))
for i, layer in enumerate(reversed(top_model.layers), 1):
pretrained_weights = layer.get_weights()
vgg16_fine.layers[-i].set_weights(pretrained_weights)
for layer in vgg16_fine.layers[:26]:
layer.trainable = False
vgg16_fine.compile(optimizer=SGD(lr=1e-4, momentum=0.9),
loss='binary_crossentropy',
metrics=['accuracy'])
print('Train concatenated model on dogs/cats dataset sample')
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_batches = get_batches('train', gen=train_datagen, class_mode='binary')
valid_batches = get_batches('valid', gen=test_datagen, class_mode='binary')
vgg16_fine.fit_generator(train_batches,
steps_per_epoch=2000 // BATCH_SIZE,
epochs=50,
validation_data=valid_batches,
validation_steps=800 // BATCH_SIZE)
return vgg16_fine
final_model = train_finetuned_model()
But the problem is that model's accuracy drastically dropped. After 50 epochs, its accuracy is around 50%. Therefore, probably I've done something wrong.
Maybe something wrong with parameters, i.e. learning rate, batch size, etc.?

Your fully connected layers look totally different from the original VGG architecture.
# yours
Flatten()
Dense(256, activation='relu')
Dense(1, activation='sigmoid')
# original
Flatten()
Dense(4096, activation='relu')
Dense(4096, activation='relu')
Dense(2, activation='softmax')
Two points.
The last layer should be 2-class-softmax instead of sigmoid. The
accuracy is not computed as you expect if you use sigmoid, I guess.
Complexity (number of neurons and layers) seems to be too low.

Well, not sure if it is a right solution, but I was able to increase accuracy at least up to 70% with this code (probably the main reason is decreased learning rate and more epochs):
def train_finetuned_model(lr=1e-5, verbose=True):
file_path = get_file('vgg16.h5', VGG16_WEIGHTS_PATH, cache_subdir='models')
if verbose:
print('Building VGG16 (no-top) model to generate bottleneck features.')
vgg16_notop = build_vgg_16()
vgg16_notop.load_weights(file_path)
for _ in range(6):
vgg16_notop.pop()
vgg16_notop.compile(optimizer=RMSprop(lr=lr), loss='categorical_crossentropy', metrics=['accuracy'])
if verbose:
print('Bottleneck features generation.')
train_batches = get_batches('train', shuffle=False, class_mode=None, batch_size=BATCH_SIZE)
train_labels = np.array([0]*1000 + [1]*1000)
train_bottleneck = vgg16_notop.predict_generator(train_batches, steps=2000 // BATCH_SIZE)
valid_batches = get_batches('valid', shuffle=False, class_mode=None, batch_size=BATCH_SIZE)
valid_labels = np.array([0]*400 + [1]*400)
valid_bottleneck = vgg16_notop.predict_generator(valid_batches, steps=800 // BATCH_SIZE)
if verbose:
print('Training top model on bottleneck features.')
top_model = Sequential()
top_model.add(Flatten(input_shape=train_bottleneck.shape[1:]))
top_model.add(Dense(4096, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(4096, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(2, activation='softmax'))
top_model.compile(optimizer=RMSprop(lr=lr), loss='categorical_crossentropy', metrics=['accuracy'])
top_model.fit(train_bottleneck, to_categorical(train_labels),
batch_size=32, epochs=10,
validation_data=(valid_bottleneck, to_categorical(valid_labels)))
if verbose:
print('Concatenate new VGG16 (without top layer) with pretrained top model.')
vgg16_fine = build_vgg_16()
vgg16_fine.load_weights(file_path)
for _ in range(6):
vgg16_fine.pop()
vgg16_fine.add(Flatten(name='top_flatten'))
vgg16_fine.add(Dense(4096, activation='relu'))
vgg16_fine.add(Dropout(0.5))
vgg16_fine.add(Dense(4096, activation='relu'))
vgg16_fine.add(Dropout(0.5))
vgg16_fine.add(Dense(2, activation='softmax'))
vgg16_fine.compile(optimizer=RMSprop(lr=lr), loss='categorical_crossentropy', metrics=['accuracy'])
if verbose:
print('Loading pre-trained weights into concatenated model')
for i, layer in enumerate(reversed(top_model.layers), 1):
pretrained_weights = layer.get_weights()
vgg16_fine.layers[-i].set_weights(pretrained_weights)
for layer in vgg16_fine.layers[:26]:
layer.trainable = False
if verbose:
print('Layers training status:')
for layer in vgg16_fine.layers:
print('[%6s] %s' % ('' if layer.trainable else 'FROZEN', layer.name))
vgg16_fine.compile(optimizer=RMSprop(lr=1e-6), loss='binary_crossentropy', metrics=['accuracy'])
if verbose:
print('Train concatenated model on dogs/cats dataset sample.')
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_batches = get_batches('train', gen=train_datagen, class_mode='categorical', batch_size=BATCH_SIZE)
valid_batches = get_batches('valid', gen=test_datagen, class_mode='categorical', batch_size=BATCH_SIZE)
vgg16_fine.fit_generator(train_batches, epochs=100,
steps_per_epoch=2000 // BATCH_SIZE,
validation_data=valid_batches,
validation_steps=800 // BATCH_SIZE)
return vgg16_fine
I guess there is a way to achieve much better results with fine-tuning (up to 98%), but I wasn't able to achieve it with provided code.

Related

CNN accuracy: 0.0000e+00 for multi-classification on images

I have the following code that produces my horrible accuracy dilema, has anyone else encountered this issue for multi classification task(49 different images to classify)?
I am running resnet50 on top of my CNN model with softmax as last activation FN, my loss is categorical_crossentropy and my optimizer is Adam.
What might I be doing wrong?
## Build CNN architecture
model1 = Sequential()
model1.add(Conv2D(32, (3,3), strides=1, input_shape = (720, 720, 3)))
model1.add(Activation('relu'))
model1.add(Conv2D(32, (3,3), strides=1, padding="same"))
model1.add(Activation('relu'))
model1.add(MaxPooling2D(pool_size=(2,2)))
model1.add(Conv2D(64, (3,3), strides=1, padding="same"))
model1.add(Activation('relu'))
model1.add(Conv2D(64, (3,3), strides=1, padding="same"))
model1.add(Activation('relu'))
model1.add(MaxPooling2D(pool_size=(2,2)))
model1.add(Flatten())
model1.add(Dense(200))
model1.add(Activation('relu'))
model1.add(Dense(200))
model1.add(Dropout(0.24))
model1.add(Activation('relu'))
model1.add(Dense(49, activation='softmax'))
model1.summary()
# Image data generator for on the fly image augmentation
directory = '/home/carlini-TF2/data/train/'
batch_size = 64
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rotation_range=90.,
shear_range=0.2,
zoom_range=[0.8,1.2],
horizontal_flip=True,
validation_split=0.2,
preprocessing_function=tf.keras.applications.resnet50.preprocess_input)
train_generator = train_datagen.flow_from_directory(directory=directory,
subset='training',
target_size=(720, 720),
shuffle=True,
seed=42,
color_mode='rgb',
class_mode='categorical',
batch_size=batch_size)
valid_directory = '/home/carlini-TF2/data/test/'
valid_generator = train_datagen.flow_from_directory(directory=valid_directory,
target_size=(720, 720),
color_mode="rgb",
batch_size=batch_size,
class_mode="categorical",
subset='validation',
shuffle=True,
seed=42)
## Compile and train Neural Network
METRICS = [
tf.keras.metrics.Accuracy(name='accuracy'),
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall')]
# optimal optimizer FN | loss FN to work with accuracy metric
model1.compile(loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
metrics=METRICS)
# stop training when loss gets worse after consecutive epochs
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
# fit model with augmented training set and validation set | shuffle batch
history = model1.fit(train_generator,
validation_data = valid_generator,
steps_per_epoch = train_generator.n//batch_size,
validation_steps = valid_generator.n//batch_size,
shuffle=True, callbacks = [callback],
epochs=50)
The issue is that ResNet50 was being used for data augmentation and not in the CNN architecture. In order to reach somewhat robust model the following code is needed.
We can throw out the previous architecture and use a very simple model and the ResNet50 since this gives conclusive results.
We must use Functional API since ResNet50 was built on it
data_bias = np.log(1802./4657)
initializer = tf.keras.initializers.Constant(data_bias)
resnet50_imagenet_model = tf.keras.applications.ResNet50(weights='imagenet', include_top=False, input_shape=(720,720,3) )
resnet50_imagenet_model.trainable = False
#Flatten output layer of Resnet
flattened = tf.keras.layers.Flatten()(resnet50_imagenet_model.output)
#Fully connected layer, output layer with 49 diff labels
fc2 = tf.keras.layers.Dense(49, activation='softmax', bias_initializer=initializer, name="AddedDense2")(flattened)
model1 = tf.keras.models.Model(inputs=resnet50_imagenet_model.input, outputs=fc2)

Keras multiclass images classification and prediction

I am doing image classification with ImageDataGenerator. My data has this structure:
Train
101
102
103
104
Test
101
102
103
104
So, if I understood good, the ImageGenerator automatically does what is needed with labeling.
I train the model, and I get some kind of accuracy. Now I want to do the prediction.
- model.predict
- model.predict_proba
- model.predict_classes
All these give me the same value. Can you quickly explain or refer(I cannot find anything concerning my problem) how I should proceed, or maybe I did something terrible in the code. The biggest problem, I don't understand how the output will differ for 4 different classes. As predict_classes gives me an output [[1]], should not it give me the predicted class?
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, MaxPool2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.regularizers import l1, l2, l1_l2
model = Sequential()
model.add(Conv2D(60, (3, 3), input_shape=(480, 640,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(60, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(100, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(100, activation='relu', activity_regularizer=l1(0.001)))
#model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy',
optimizer='Adam',
metrics=['accuracy'])
batch_size = 32
# augmentation configuration for train
train_datagen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=False,
vertical_flip=True,
fill_mode = 'nearest')
# augmentation configuration for testing, only rescale
test_datagen = ImageDataGenerator(rescale=1./255)
# reading pictures and generating batches of augmented image data
train_generator = train_datagen.flow_from_directory(
'/media/data/working_dir/categories/readytotest/train',
target_size=(480, 640),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'/media/data/working_dir/categories/readytotest/test',
target_size=(480, 640),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=800 // batch_size,
epochs=15,
validation_data=validation_generator,
validation_steps=800 // batch_size)
Your model and the generators not for multi class but binary classification. First you need to fix your model last layer to get output with class size. Second you need to fix the generators to use in multi class.
(...)
model.add(Dense(CLS_SZ))
model.add(Activation('softmax'))
(...)
# I am not sure about this read some docs about generator you used.
train_generator = train_datagen.flow_from_directory(
'/media/data/working_dir/categories/readytotest/train',
target_size=(480, 640),
batch_size=batch_size,
class_mode=None)
validation_generator = test_datagen.flow_from_directory(
'/media/data/working_dir/categories/readytotest/test',
target_size=(480, 640),
batch_size=batch_size,
class_mode=None)

TensorFlow - Stop training when losses reach a defined value

I used the first example here as an example of network.
How to stop the training when the loss reach a fixed value ?
So, for example, I would like to fix a maximum of 3000 epochs and the training will stop when the loss will be under 0.2.
I read this topic but it is not the solution I found.
I would want to stop the training when the loss reach a value, not when there is no improvement like with this function proposed in the precedent topic.
Here is the code:
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
# Generate dummy data
import numpy as np
x_train = np.random.random((1000, 20))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
x_test = np.random.random((100, 20))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
model = Sequential()
# Dense(64) is a fully-connected layer with 64 hidden units.
# in the first layer, you must specify the expected input data shape:
# here, 20-dimensional vectors.
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=3000,
batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
You can use some method like this if you would switch to TensorFlow 2.0:
class haltCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('loss') <= 0.05):
print("\n\n\nReached 0.05 loss value so cancelling training!\n\n\n")
self.model.stop_training = True
You just need to create a callback like that and then add that callback to your model.fit so it becomes something like this:
model.fit(x_train, y_train,
epochs=3000,
batch_size=128,
callbacks=['trainingStopCallback'])
This way, the fitting should stop whenever it reaches down below 0.05 (or whatever value you put on while defining it).
Also, since it's been a long time you asked this question but it still has no actual answer for using it with TensorFlow 2.0, I updated your code snippet to TensorFlow 2.0 so everyone can now easily find and use it with their new projects.
import tensorflow as tf
# Generate dummy data
import numpy as np
x_train = np.random.random((1000, 20))
y_train = tf.keras.utils.to_categorical(
np.random.randint(10, size=(1000, 1)), num_classes=10)
x_test = np.random.random((100, 20))
y_test = tf.keras.utils.to_categorical(
np.random.randint(10, size=(100, 1)), num_classes=10)
model = tf.keras.models.Sequential()
# Dense(64) is a fully-connected layer with 64 hidden units.
# in the first layer, you must specify the expected input data shape:
# here, 20-dimensional vectors.
model.add(tf.keras.layers.Dense(64, activation='relu', input_dim=20))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
class haltCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('loss') <= 0.05):
print("\n\n\nReached 0.05 loss value so cancelling training!\n\n\n")
self.model.stop_training = True
trainingStopCallback = haltCallback()
sgd = tf.keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy', 'loss'])
model.fit(x_train, y_train,
epochs=3000,
batch_size=128,
callbacks=['trainingStopCallback'])
score = model.evaluate(x_test, y_test, batch_size=128)
Documentation here : EarlyStopping
keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto', baseline=None)
I solved it by doing this:
history = model.fit(training1, training2, epochs=100, verbose=True)
loss=history.history["loss"]
for x in loss:
if x <= 1:
print("Reached 1 loss value, cancelling training")
model.stop_training = True
break

Keras CNN dimension problems

I am trying to build a CNN using Keras for an image segmentation task, based on this article. Because my dataset is small, I wanted to use Keras ImageDataGenerator and feed it to fit_generator(). So, I followed the example on the Keras website. But, since zipping the image and mask generators didn't work, I followed this answer and created my own generator.
My input data is of size (701,256,1) and my problem is binary (foreground, background). For each image I have a label of the same shape.
Now, I am facing a dimensionality problem. This was also mentioned in the answer, but I am unsure of how to solve it.
The error:
ValueError: Error when checking target: expected dense_3 to have 2 dimensions, but got array with shape (2, 704, 256, 1)
I am pasting the entire code I have here:
import numpy
import pygpu
import theano
import keras
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, Reshape
from keras.layers import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras import backend as K
def superGenerator(image_gen, label_gen):
while True:
x = image_gen.next()
y = label_gen.next()
yield x[0], y[0]
img_height = 704
img_width = 256
train_data_dir = 'Dataset/Train/Images'
train_label_dir = 'Dataset/Train/Labels'
validation_data_dir = 'Dataset/Validation/Images'
validation_label_dir = 'Dataset/Validation/Labels'
n_train_samples = 1000
n_validation_samples = 500
epochs = 50
batch_size = 2
input_shape = (img_height, img_width,1)
target_shape = (img_height, img_width)
model = Sequential()
model.add(Conv2D(80,(28,28), input_shape=input_shape))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Conv2D(96,(18,18)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Conv2D(128,(13,13)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(160,(8,8)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(2, activation='softmax'))
model.summary()
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
data_gen_args = dict(
rescale=1./255,
horizontal_flip=True,
vertical_flip=True
)
train_datagen = ImageDataGenerator(**data_gen_args)
train_label_datagen = ImageDataGenerator(**data_gen_args)
test_datagen = ImageDataGenerator(**data_gen_args)
test_label_datagen = ImageDataGenerator(**data_gen_args)
seed = 1
train_image_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=target_shape,
color_mode='grayscale',
batch_size=batch_size,
class_mode = 'binary',
seed=seed)
train_label_generator = train_label_datagen.flow_from_directory(
train_label_dir,
target_size=target_shape,
color_mode='grayscale',
batch_size=batch_size,
class_mode = 'binary',
seed=seed)
validation_image_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=target_shape,
color_mode='grayscale',
batch_size=batch_size,
class_mode = 'binary',
seed=seed)
validation_label_generator = test_label_datagen.flow_from_directory(
validation_label_dir,
target_size=target_shape,
color_mode='grayscale',
batch_size=batch_size,
class_mode = 'binary',
seed=seed)
train_generator = superGenerator(train_image_generator, train_label_generator,batch_size)
test_generator = superGenerator(validation_image_generator, validation_label_generator,batch_size)
model.fit_generator(
train_generator,
steps_per_epoch= n_train_samples // batch_size,
epochs=50,
validation_data=test_generator,
validation_steps=n_validation_samples // batch_size)
model.save_weights('first_try.h5')
I am new to Keras (and CNNs), so any help would be very much appreciated.
Ok. I did some rubberduck-debugging and read a few more articles. Of course the dimensionality was a problem. This simple answer did it for me.
My labels are of shape same as the input image so the output of the model should be of that shape as well. I used Conv2DTranspose to solve this issue.

Extremely High Loss with Keras VGG16 transfer learning Image Classification

I'm retraining VGG16 and fine tuning the top 2 convolutional blocks for an image classification task. The retraining itself finished rather uneventfully with mediocre accuracy. The fine tuning program is currently running and it seems like it will take a couple of days at the least. However, the loss is ludicrously high at around 440 after 3 epochs, while accuracy hovers around 0.4. Please confirm whether there is any major flaw in the model so that I can kill the program. I have used some data augmentation and l1_l2 regularisation during the fine tuning stage. The data set is a small set of about 8000 images of tumors, divided into 8 classes. Thus, the data is both small and extremely unrelated to the imagenet images. This is my code for the transfer learning:
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras import applications
from keras.utils.np_utils import to_categorical
import math
img_width, img_height= 224, 224
weight_path= 'bottleneck_fc_model.h5'
train_dir= 'Cancer_Data/Train'
validation_dir= 'Cancer_Data/Validate'
epochs= 150
batch_size= 128
def save_bottleneck_features():
datagen = ImageDataGenerator(rescale= 1./255)
model = applications.VGG16(include_top=False, weights='imagenet')
print 'Extracting Bottleneck Training Features'
generator = datagen.flow_from_directory(
train_dir,
target_size= (img_width, img_height),
batch_size= batch_size,
class_mode= None,
shuffle= False)
nb_train_samples= len(generator.filenames)
predict_size_train= int(math.ceil(nb_train_samples / batch_size))
bottleneck_feature_train =model.predict_generator(generator,predict_size_train)
np.save(open('bottleneck_feature_train.npy', 'w'), bottleneck_feature_train)
print "Bottleneck Training Features Saved"
print "Extracting Bottleneck Validation Features"
generator2 = datagen.flow_from_directory(
validation_dir,
target_size= (img_width, img_height),
batch_size= batch_size,
class_mode= None,
shuffle= False)
nb_validation_samples= len(generator2.filenames)
predict_size_validation= int(math.ceil(nb_validation_samples / batch_size))
bottleneck_feature_validation = model.predict_generator(generator2, predict_size_validation)
np.save(open('bottleneck_feature_validation.npy', 'w'), bottleneck_feature_validation)
print "Bottleneck Validation Features Saved"
def train_top_model():
datagen_top = ImageDataGenerator(rescale=1./255)
generator_top = datagen_top.flow_from_directory(
train_dir,
target_size= (img_width,img_height),
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
nb_classes = len(generator_top.class_indices)
np.save('class_indices.npy', generator_top.class_indices)
train_data = np.load('bottleneck_feature_train.npy')
train_labels= to_categorical(generator_top.classes, num_classes= nb_classes)
generator_top2 = datagen_top.flow_from_directory(
validation_dir,
target_size=(img_width,img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
validation_data = np.load('bottleneck_feature_validation.npy')
validation_labels= to_categorical(generator_top2.classes, num_classes= nb_classes)
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation= 'softmax'))
model.compile(optimizer= 'adam', loss= 'categorical_crossentropy', metrics= ['accuracy'])
model.save_weights(weight_path)
model.fit(train_data, train_labels, epochs= epochs, batch_size= batch_size,
validation_data= (validation_data, validation_labels))
(eval_loss, eval_accuracy) = model.evaluate(
validation_data, validation_labels, batch_size=batch_size, verbose=1)
print("[INFO] accuracy: {:.2f}%".format(eval_accuracy * 100))
print("[INFO] Loss: {}".format(eval_loss))
save_bottleneck_features()
train_top_model()
The code for fine tuning the model is:
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.regularizers import l1_l2
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense
import PIL
import math
weight_path = 'fine_tuned.h5'
top_model_weight_path = 'top_model.h5'
img_width, img_height = 224, 224
train_dir = 'Cancer_Data/Train'
validation_dir = 'Cancer_Data/Validate'
epochs = 200
batch_size = 128
nb_train_samples = 6454
nb_validation_samples = 1464
base_model =applications.VGG16(weights= 'imagenet', include_top= False, input_shape=(224,224,3))
print "Model Loaded."
top_model= Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(256, activation='relu', kernel_regularizer= l1_l2(l1=0.01, l2= 0.01)))
top_model.add(Dense(8, activation= 'softmax'))
top_model.load_weights(top_model_weight_path)
model= Model(inputs= base_model.input, outputs= top_model(base_model.output))
for layer in model.layers[:18]:
layer.trainable=False
model.compile(optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
loss='categorical_crossentropy', metrics=['accuracy'])
train_datagen= ImageDataGenerator(
rescale=1./255,
shear_range=0.3,
zoom_range=0.3,
horizontal_flip=True)
test_datagen= ImageDataGenerator(rescale=1./255)
train_generator= train_datagen.flow_from_directory(
train_dir,
target_size=(img_height,img_width),
batch_size=batch_size,
class_mode='categorical')
validation_generator= test_datagen.flow_from_directory(
validation_dir,
target_size=(img_height,img_width),
batch_size=batch_size,
class_mode='categorical')
model.save(weight_path)
model.fit_generator(
train_generator,
steps_per_epoch = int(math.ceil(nb_train_samples / batch_size)),
epochs=epochs,
validation_data=validation_generator,
validation_steps = int(math.ceil(nb_validation_samples / batch_size)))
I'm fairly new to python and Keras so I haven't been able to figure out whether I have done something wrong or whether there is some structural issue with the model itself. What changes can I make and what techniques can I use to reduce the loss if it is the latter?
VGG 16 was not trained on data in range [0,1] rather in range [-128,128] (Approximately) obtained by subtracting corresponding mean pixels from respective color channels. Also, VGG 16 requires data in BGR format.
Use preprocessing as below (taken from keras source code)
def preprocess:
if data_format == 'channels_first':
if x.ndim == 3:
# 'RGB'->'BGR'
x = x[::-1, ...]
# Zero-center by mean pixel
x[0, :, :] -= 103.939
x[1, :, :] -= 116.779
x[2, :, :] -= 123.68
else:
x = x[:, ::-1, ...]
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
# Zero-center by mean pixel
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.68
return x

Categories

Resources