how do i use k-fold with flow_from_directory - python

It's a school projet.
I have split my dataset using Datagen
after compiling and fitting my models i want to apply the K Fold cross validation or use k-fold with flow_from_directory
from tensorflow import keras
# Forming datasets
datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1/255, validation_split=0.3)
# Training and validation dataset
train1 = datagen.flow_from_directory('C:/Users/hamza/Desktop/kkk/TrainD', target_size=(224,224), subset='training')
val = datagen.flow_from_directory('C:/Users/hamza/Desktop/kkk/TrainD', target_size=(224,224), subset='validation')
# Test dataset for evaluation
datagen2 = keras.preprocessing.image.ImageDataGenerator(rescale=1/255)
test = datagen2.flow_from_directory('C:/Users/hamza/Desktop/kkk/TestD')
from keras.layers import Dense,GlobalMaxPool2D,Dropout
from keras.models import Model
input_shape = (224,224,3)
# Function to initialize model (ResNet152V2)
base_model = keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False
)
base_model.trainable = False
x = base_model.output
x = GlobalMaxPool2D()(x)
x = Dense(1024, activation='relu')(x)
pred = Dense(3, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=pred)
model.summary()
from keras.optimizers import SGD
# Model Compiling
model.compile(loss='categorical_crossentropy', optimizer= SGD(lr=0.01, momentum=0.9), metrics='accuracy')
# Model Fitting
history=model.fit(train1, batch_size=32, epochs=20, validation_data=val)

Related

how to get the extracted feature vector from transfer learning models python?

I am trying to implement a classification model with ResNet50. I know, CNN or transfer learning models extract the features themselves. How can I get the extracted feature vector from the image dataset in python?
Code Snippet of model training:
base_model = ResNet152V2(
input_shape = (height, width, 3),
include_top=False,
weights='imagenet'
)
from tensorflow.keras.layers import MaxPool2D, BatchNormalization, GlobalAveragePooling2D
top_model = base_model.output
top_model = GlobalAveragePooling2D()(top_model)
top_model = Dense(1072, activation='relu')(top_model)
top_model = Dropout(0.6)(top_model)
top_model = Dense(256, activation='relu')(top_model)
top_model = Dropout(0.6)(top_model)
prediction = Dense(len(categories), activation='softmax')(top_model)
model = Model(inputs=base_model.input, outputs=prediction)
for layer in model.layers:
layer_trainable=False
from tensorflow.keras.optimizers import Adam
model.compile(
optimizer = Adam(learning_rate=0.00001),
loss='categorical_crossentropy',
metrics=['accuracy']
)
import keras
from keras.callbacks import EarlyStopping
es_callback = keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
datagen = ImageDataGenerator(rotation_range= 30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.3,
horizontal_flip=True
)
resnet152v2 = model.fit(datagen.flow(X_train, y_train),
validation_data = (X_val, y_val),
epochs = 100,
callbacks=[es_callback]
)
I followed a solution from here.
Code snippet of extracting features from the trained model:
extract = Model(model.inputs, model.layers[-3].output) #top_model = Dropout(0.6)(top_model)
features = extract.predict(X_test)
The shape of X_test is (120, 224, 224, 3)
Once you have trained your model, you can save it (or just directly use it in the same file) then cut off the top layer.
model = load_model("xxx.h5") #Or use model directly
#Use functional model
from tensorflow.keras.models import Model
featuresModel = Model(inputs=model.input,outputs=model.layers[-3].output) #Get rids of dropout too
#Inference directly
featureVector = featuresModel.predict(yourBatchData, batch_size=yourBatchLength)
If inferenced in batches, you would need to separate the output results to see the result for each input in the batch.

Our training/validation loss curves are great but the testing performance suffers

We are currently working on an image classification task for detecting tuberculosis from chest x-ray images. You can see our code below. We used 0.7 for the train set, 0.2 for the validation set, and 0.1 for the test set. Our training and validation loss is here
But when we try it on our test data set, this is what we got:
Is there something wrong with our code? Thank you in advance.
from tensorflow import keras
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.applications.mobilenet_v2 import preprocess_input
from keras.layers import Dense, Flatten
from keras.models import Sequential
from keras.losses import BinaryCrossentropy
from tensorflow.keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from datetime import datetime, date
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix, classification_report
import numpy as np
#Loading a pre-trained model
image_size = 224
base_model = MobileNetV2(input_shape=(image_size,image_size,3), weights='imagenet', include_top=False)
for layer in base_model.layers:
layer.trainable = False
model = Sequential()
model.add(base_model)
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(2, activation="sigmoid"))
loss_func = BinaryCrossentropy()
opt = Adam(learning_rate=0.001)
model.compile(loss=loss_func,
optimizer=opt,
metrics=['accuracy'])
#Training
test_path = '...'
val_path = '...'
datagen = ImageDataGenerator(rescale = 1./255,horizontal_flip = True, shear_range = 0.2, zoom_range=0.2)
batch_size=32
validation_size=8
train_set = datagen.flow_from_directory(test_path,
target_size = (image_size, image_size),
batch_size=batch_size,
class_mode = 'categorical')
validation_set = datagen.flow_from_directory(val_path,
target_size = (image_size, image_size),
batch_size=validation_size,
class_mode = 'categorical')
#Fitting the data to the model
model_name = 'MobileNetV2'
date_today= date.today().strftime('%m_%d_%Y')
checkpoint = ModelCheckpoint(filepath=f'Models/{model_name}_{date_today}.h5',
monitor='val_loss',
mode='min',
verbose=1,
save_best_only=True)
model_history = model.fit(train_set,
validation_data=validation_set,
epochs=100,
steps_per_epoch=len(train_set)//batch_size,
validation_steps=len(validation_set)//validation_size,
callbacks=[checkpoint],
verbose=1)
#Testing the model on the test set
test_path = '...'
test_datagen = ImageDataGenerator()
test_set = test_datagen.flow_from_directory(test_path,
target_size = (image_size, image_size),
class_mode = 'categorical')
predictions = model.predict(test_set, verbose=1)
y_pred = np.argmax(predictions, axis=1)
class_labels = list(test_set.class_indices.keys())
print('Classification Report')
clsf = classification_report(test_set.classes, y_pred, target_names=class_labels)
print(clsf)
print('\n')
print('Confusion Matrix')
cfm = confusion_matrix(test_set.classes, y_pred)
print(cfm)
The code is correct but, there is one little mistake I found and that is you have assigned 2 units in sigmoid output layer. That's not correct; there should be 1 unit because it's a binary classification problem. Like this:
model.add(Dense(1, activation="sigmoid"))
Tuberculosis is a complex object with sophisticated features. Therefore, the testing set may produce unexpected results. To circumvent this, you must modify your network and incorporate additional training images. You can experiment with transfer learning, but if the network from which you want to transfer the parameters was trained on objects entirely unrelated to tuberculosis, it might not be appropriate.

Cifar100 only has 16 training images and 16 training labels

I'm using Tensorflow with Python 3.7, and I am trying to make an image classifier with CIFAR-100. I want to stay away from Keras as much as possible because it only has a limited amount of datasets that I can use. This is my code:
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
import PIL.Image as Image
from tensorflow import keras
tf.compat.v1.enable_eager_execution()
shape = (224, 224)
labels = '/home/pi/tf/cifar_labels.txt'
labels = np.array(open(labels).read().splitlines())
img = '/home/pi/tf/lobster.jpeg'
img = Image.open(img).resize(shape)
img = np.array(img)/255.0
img = np.reshape(img, (224, 224, 3))
train = tfds.load(name="cifar100", split="train")
test = tfds.load(name="cifar100", split="test")
train = train.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
test = test.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
for features in train:
train_images, train_labels = features["image"], features["label"]
for features in test:
test_images, test_labels = features["image"], features["label"]
model = keras.Sequential([
keras.layers.Flatten(input_shape=(32, 32, 3)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(100, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=200, verbose=2)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
I'm guessing that there is something wrong with the for features in train for loop. When I print the len of the training images/labels, I get 16. Due to this, my model is getting a training accuracy of 0% and a loss of 16.1181%. Can anybody help?
To use CIFAR-100 in your keras model directly you should call tfds.load function with as_supervised=True parameter. It will then load the dataset with only 'image' and 'label' keys. You can see that CIFAR-100 dataset contains three keys:
FeaturesDict({
'coarse_label': ClassLabel(shape=(), dtype=tf.int64, num_classes=20),
'image': Image(shape=(32, 32, 3), dtype=tf.uint8),
'label': ClassLabel(shape=(), dtype=tf.int64, num_classes=100),
})
Therefore it cannot be fed into model.fit() directly. With as_supervised set as True, the returned dataset will only contain (u'image', u'label') keys.
To sum up,
import tensorflow_datasets as tfds
from tensorflow import keras
tf.compat.v1.enable_eager_execution()
train= tfds.load(name="cifar100", split="train", as_supervised=True)
test = tfds.load(name="cifar100", split="test", as_supervised=True)
train = train.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
test = test.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
model = keras.Sequential([
keras.layers.Flatten(input_shape=(32, 32, 3)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(100, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(train, epochs=200, verbose=1)
test_loss, test_acc = model.evaluate(test, verbose=1)
print('\nTest accuracy:', test_acc)
Note:
To use the dataset without as_supervised set to True, you can use model.train_on_batch function. e.g.
import tensorflow_datasets as tfds
from tensorflow import keras
tf.compat.v1.enable_eager_execution()
train= tfds.load(name="cifar100", split="train")
test = tfds.load(name="cifar100", split="test")
train = train.shuffle(1024).repeat(200).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
test = test.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
model = keras.Sequential([
keras.layers.Flatten(input_shape=(32, 32, 3)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(100, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
for epoch in range(200):
for features in train:
image_batch, label_batch = features["image"], features["label"]
loss, acc = model.train_on_batch(image_batch, label_batch)
for features in test:
image_batch, label_batch = features["image"], features["label"]
loss, acc = model.test_on_batch(image_batch, label_batch)

Working with multiple input (image,text) data in ResNet50 or any Deep Learning Models

This is an abstract idea, I dont know the correct pipeline for implementing; I have used a RestNet50 architecture for training a model to classify image into 3 categories; one of the ways i was thinking of exploring was using the textual data of the image;
train_gen = image.ImageDataGenerator().flow_from_directory(dataset_path_train, target_size=input_shape[:2], batch_size=batch_size, class_mode='categorical', shuffle=True, seed=seed)
test_gen = image.ImageDataGenerator().flow_from_directory(dataset_path_valid, target_size=input_shape[:2], batch_size=batch_size, class_mode='categorical', shuffle=True, seed=seed)
Data prep for model;
now for each image i also have {text},{label} as key value pair for individual image;
if i have to pass
1. WordtoVec
2. TFIDF
I have read about embedding layer in Keras; I am not sure how to embed the text-data along with test_gen and train_gen in the model( in any intermediate layer or after Flatten().
base_model = ResNet50(weights='imagenet', include_top=False,
input_shape=input_shape)
from keras.models import Model, load_model
x = base_model.output
x = Flatten(name='flatten')(x)
predictions = Dense(3, activation='softmax', name='predictions')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in model.layers[0:141]:
layer.trainable = True
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(train_gen,steps_per_epoch=1000 , epochs=2,validation_steps=100, validation_data=test_gen,verbose=1)
So this is solved after reading through this Multi-input and multi-output models
The text input were TFIDF'ed using sklearn tfidf it resulted in 26038 features for matrix ;
x = base_model.output
x = Flatten(name='flatten')(x)
x2= Input(shape=(26078,), dtype='float32', name='tfidf_input')
combinedInput = concatenate([x2, x])
x = Dense(1024, activation="relu")(combinedInput)
# adding a regularizer with x sch as x=Dense(1024,activation="relu",activity_regularizer=regularizers.l1(0.01))
#x1= Dense(512, activation='softmax', name='predictions')(x)
predictions = Dense(3, activation='softmax', name='predictions')(x)
model = Model(inputs=[base_model.input,x2], outputs=predictions)
for layer in model.layers[0:141]:
layer.trainable = True
I have added an Input layer of dimension (26078,) which would be from the text after transforming it to TFIDF ( now this would add as text feature)
While fitting the data on the model
model.fit([image_data, text_feature], label,epochs=50,validation_split=0.10,steps_per_epoch=100,validation_steps=8, shuffle = True)
It takes 2 inputs one is the ResNet input of image and the other an array of [datasize,26078].

VGG16 Keras fine tuning: low accuracy

I've already asked similar question here, but now I have slightly different problem, therefore asking new question.
I decided to use slightly different approach instead of proposed among answers in the referenced question to train, and then fine-tune model.
Update: I've replaced old question provided here with more suitable version
Here is my sequence of actions:
Build VGG16 model and drop top layer (call it no-top model)
Generate bottleneck features using no-top model
Train a separate fully-connected model using bottleneck features
Build new VGG16 model, drop top layers, and attach pretrained top-model
Train concatenated model on dogs/cats data
And here is a code I use to implement aforementioned sequence of actions:
import warnings
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', DeprecationWarning)
from __future__ import print_function
from itertools import izip_longest as zip_longest
from pprint import pformat as pf
from pprint import pprint as pp
import os
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.layers import Conv2D, MaxPooling2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Dropout, Flatten, Dense, InputLayer, Lambda
from keras.models import Sequential, Model, load_model
from keras.utils.data_utils import get_file
from keras.optimizers import SGD
import keras.backend as K
import numpy as np
RANDOM_STATE = 1
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
BATCH_SIZE = 4
VGG_MEAN = np.array([123.68, 116.779, 103.939]).reshape((3, 1, 1))
VGG16_WEIGHTS_PATH = 'http://www.platform.ai/models/vgg16.h5'
DATA_ROOT = os.path.join(os.path.expanduser('~'), 'data', 'dogscats')
TRAIN_DIR = os.path.join(DATA_ROOT, 'train')
VALID_DIR = os.path.join(DATA_ROOT, 'valid')
SAMPLES_DIR = os.path.expanduser('~/dogscats_samples')
np.random.seed(RANDOM_STATE)
K.set_image_dim_ordering('th')
def get_batches(dirname, gen=ImageDataGenerator(), shuffle=True,
batch_size=BATCH_SIZE, class_mode='categorical'):
return gen.flow_from_directory(
os.path.join(SAMPLES_DIR, dirname),
target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
class_mode=class_mode,
shuffle=shuffle,
batch_size=batch_size)
def vgg_preprocess(x):
x = x - VGG_MEAN
return x[:, ::-1]
def conv_block(model, n_layers, n_filters, name='block'):
for i in range(n_layers):
model.add(ZeroPadding2D((1, 1), name='%s_padding_%s' % (name, i)))
model.add(Conv2D(n_filters, (3, 3), activation='relu', name='%s_conv2d_%s' % (name, i)))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='%s_maxpool' % name))
def fc_block(model, name='block'):
model.add(Dense(4096, activation='relu', name=name + '_dense'))
model.add(Dropout(0.5))
def build_vgg_16():
model = Sequential()
input_shape = (3, IMAGE_WIDTH, IMAGE_HEIGHT)
model.add(InputLayer(input_shape=input_shape))
model.add(Lambda(vgg_preprocess))
conv_block(model, n_layers=2, n_filters=64, name='block1')
conv_block(model, n_layers=2, n_filters=128, name='block2')
conv_block(model, n_layers=3, n_filters=256, name='block3')
conv_block(model, n_layers=3, n_filters=512, name='block4')
conv_block(model, n_layers=3, n_filters=512, name='block5')
model.add(Flatten())
fc_block(model)
fc_block(model)
model.add(Dense(1000, activation='softmax'))
return model
def train_finetuned_model():
file_path = get_file('vgg16.h5', VGG16_WEIGHTS_PATH, cache_subdir='models')
print('Building VGG16 (no-top) model to generate bottleneck features')
vgg16_notop = build_vgg_16()
vgg16_notop.load_weights(file_path)
for _ in range(6):
vgg16_notop.pop()
vgg16_notop.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
train_batches = get_batches('train', shuffle=False, class_mode=None)
train_labels = np.array([0]*1000 + [1]*1000)
bottleneck_train = vgg16_notop.predict_generator(train_batches, steps=2000 // BATCH_SIZE)
valid_batches = get_batches('valid', shuffle=False, class_mode=None)
valid_labels = np.array([0]*400 + [1]*400)
bottleneck_valid = vgg16_notop.predict_generator(valid_batches, steps=800 // BATCH_SIZE)
print('Training top model on bottleneck features')
top_model = Sequential()
top_model.add(Flatten(input_shape=bottleneck_train.shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
top_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
top_model.fit(bottleneck_train, train_labels,
batch_size=32, epochs=50,
validation_data=(bottleneck_valid, valid_labels))
print('Concatenate new VGG16 (without top layer) with pretrained top model')
vgg16_fine = build_vgg_16()
vgg16_fine.load_weights(file_path)
for _ in range(6):
vgg16_fine.pop()
vgg16_fine.add(Flatten(name='top_flatten'))
vgg16_fine.add(Dense(256, activation='relu', name='top_dense'))
vgg16_fine.add(Dropout(0.5, name='top_dropout'))
vgg16_fine.add(Dense(1, activation='sigmoid', name='top_sigmoid'))
for i, layer in enumerate(reversed(top_model.layers), 1):
pretrained_weights = layer.get_weights()
vgg16_fine.layers[-i].set_weights(pretrained_weights)
for layer in vgg16_fine.layers[:26]:
layer.trainable = False
vgg16_fine.compile(optimizer=SGD(lr=1e-4, momentum=0.9),
loss='binary_crossentropy',
metrics=['accuracy'])
print('Train concatenated model on dogs/cats dataset sample')
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_batches = get_batches('train', gen=train_datagen, class_mode='binary')
valid_batches = get_batches('valid', gen=test_datagen, class_mode='binary')
vgg16_fine.fit_generator(train_batches,
steps_per_epoch=2000 // BATCH_SIZE,
epochs=50,
validation_data=valid_batches,
validation_steps=800 // BATCH_SIZE)
return vgg16_fine
final_model = train_finetuned_model()
But the problem is that model's accuracy drastically dropped. After 50 epochs, its accuracy is around 50%. Therefore, probably I've done something wrong.
Maybe something wrong with parameters, i.e. learning rate, batch size, etc.?
Your fully connected layers look totally different from the original VGG architecture.
# yours
Flatten()
Dense(256, activation='relu')
Dense(1, activation='sigmoid')
# original
Flatten()
Dense(4096, activation='relu')
Dense(4096, activation='relu')
Dense(2, activation='softmax')
Two points.
The last layer should be 2-class-softmax instead of sigmoid. The
accuracy is not computed as you expect if you use sigmoid, I guess.
Complexity (number of neurons and layers) seems to be too low.
Well, not sure if it is a right solution, but I was able to increase accuracy at least up to 70% with this code (probably the main reason is decreased learning rate and more epochs):
def train_finetuned_model(lr=1e-5, verbose=True):
file_path = get_file('vgg16.h5', VGG16_WEIGHTS_PATH, cache_subdir='models')
if verbose:
print('Building VGG16 (no-top) model to generate bottleneck features.')
vgg16_notop = build_vgg_16()
vgg16_notop.load_weights(file_path)
for _ in range(6):
vgg16_notop.pop()
vgg16_notop.compile(optimizer=RMSprop(lr=lr), loss='categorical_crossentropy', metrics=['accuracy'])
if verbose:
print('Bottleneck features generation.')
train_batches = get_batches('train', shuffle=False, class_mode=None, batch_size=BATCH_SIZE)
train_labels = np.array([0]*1000 + [1]*1000)
train_bottleneck = vgg16_notop.predict_generator(train_batches, steps=2000 // BATCH_SIZE)
valid_batches = get_batches('valid', shuffle=False, class_mode=None, batch_size=BATCH_SIZE)
valid_labels = np.array([0]*400 + [1]*400)
valid_bottleneck = vgg16_notop.predict_generator(valid_batches, steps=800 // BATCH_SIZE)
if verbose:
print('Training top model on bottleneck features.')
top_model = Sequential()
top_model.add(Flatten(input_shape=train_bottleneck.shape[1:]))
top_model.add(Dense(4096, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(4096, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(2, activation='softmax'))
top_model.compile(optimizer=RMSprop(lr=lr), loss='categorical_crossentropy', metrics=['accuracy'])
top_model.fit(train_bottleneck, to_categorical(train_labels),
batch_size=32, epochs=10,
validation_data=(valid_bottleneck, to_categorical(valid_labels)))
if verbose:
print('Concatenate new VGG16 (without top layer) with pretrained top model.')
vgg16_fine = build_vgg_16()
vgg16_fine.load_weights(file_path)
for _ in range(6):
vgg16_fine.pop()
vgg16_fine.add(Flatten(name='top_flatten'))
vgg16_fine.add(Dense(4096, activation='relu'))
vgg16_fine.add(Dropout(0.5))
vgg16_fine.add(Dense(4096, activation='relu'))
vgg16_fine.add(Dropout(0.5))
vgg16_fine.add(Dense(2, activation='softmax'))
vgg16_fine.compile(optimizer=RMSprop(lr=lr), loss='categorical_crossentropy', metrics=['accuracy'])
if verbose:
print('Loading pre-trained weights into concatenated model')
for i, layer in enumerate(reversed(top_model.layers), 1):
pretrained_weights = layer.get_weights()
vgg16_fine.layers[-i].set_weights(pretrained_weights)
for layer in vgg16_fine.layers[:26]:
layer.trainable = False
if verbose:
print('Layers training status:')
for layer in vgg16_fine.layers:
print('[%6s] %s' % ('' if layer.trainable else 'FROZEN', layer.name))
vgg16_fine.compile(optimizer=RMSprop(lr=1e-6), loss='binary_crossentropy', metrics=['accuracy'])
if verbose:
print('Train concatenated model on dogs/cats dataset sample.')
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_batches = get_batches('train', gen=train_datagen, class_mode='categorical', batch_size=BATCH_SIZE)
valid_batches = get_batches('valid', gen=test_datagen, class_mode='categorical', batch_size=BATCH_SIZE)
vgg16_fine.fit_generator(train_batches, epochs=100,
steps_per_epoch=2000 // BATCH_SIZE,
validation_data=valid_batches,
validation_steps=800 // BATCH_SIZE)
return vgg16_fine
I guess there is a way to achieve much better results with fine-tuning (up to 98%), but I wasn't able to achieve it with provided code.

Categories

Resources