I am trying to do a deep learning model using Keras and VGG16 algorithm on DICOM images.
I used thhe following data generator to process my images:
``
# tested on tf 2.1
from keras_preprocessing.image.dataframe_iterator import DataFrameIterator
class DCMDataFrameIterator(DataFrameIterator):
def __init__(self, *arg, **kwargs):
self.white_list_formats = ('dcm')
super(DCMDataFrameIterator, self).__init__(*arg, **kwargs)
self.dataframe = kwargs['dataframe']
self.x = self.dataframe[kwargs['x_col']]
self.y = self.dataframe[kwargs['y_col']]
self.color_mode = kwargs['color_mode']
self.target_size = kwargs['target_size']
def _get_batches_of_transformed_samples(self, indices_array):
# get batch of images
batch_x = np.array([self.read_dcm_as_array(dcm_path, self.target_size, color_mode=self.color_mode)
for dcm_path in self.x.iloc[indices_array]])
batch_y = np.array(self.y.iloc[indices_array].astype(np.uint8)) # astype because y was passed as str
# transform images
if self.image_data_generator is not None:
for i, (x, y) in enumerate(zip(batch_x, batch_y)):
transform_params = self.image_data_generator.get_random_transform(x.shape)
batch_x[i] = self.image_data_generator.apply_transform(x, transform_params)
# you can change y here as well, eg: in semantic segmentation you want to transform masks as well
# using the same image_data_generator transformations.
return batch_x, batch_y
#staticmethod
def read_dcm_as_array(dcm_path, target_size=(256, 256), color_mode='rgb'):
img = tf.io.read_file(dcm_path)
img = tfio.image.decode_dicom_image(img, dtype=tf.uint16)
img = tf.image.resize(img, target_size)
img = tf.image.grayscale_to_rgb(img, name=None) # convert image grayscale to rgb for model VG16
#img = np.expand_dims(img, -1)
return img
``
and:
``
# you can use preprocessing_function instead of rescale in all generators
# if you are using a pretrained network
train_augmentation_parameters = dict(
rescale=1.0/255.0,
rotation_range=10,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest',
brightness_range = [0.8, 1.2],
validation_split = 0.2
)
valid_augmentation_parameters = dict(
rescale=1.0/255.0,
validation_split = 0.2
)
test_augmentation_parameters = dict(
rescale=1.0/255.0
)
# training parameters
BATCH_SIZE = 32
CLASS_MODE = 'sparse'
COLOR_MODE = 'grayscale'
TARGET_SIZE = (300, 300)
EPOCHS = 10
SEED = 1337
train_consts = {
'seed': SEED,
'batch_size': BATCH_SIZE,
'class_mode': CLASS_MODE,
'color_mode': COLOR_MODE,
'target_size': TARGET_SIZE,
'subset': 'training'
}
valid_consts = {
'seed': SEED,
'batch_size': BATCH_SIZE,
'class_mode': CLASS_MODE,
'color_mode': COLOR_MODE,
'target_size': TARGET_SIZE,
'subset': 'validation'
}
test_consts = {
'batch_size': 1, # should be 1 in testing
'class_mode': CLASS_MODE,
'color_mode': COLOR_MODE,
'target_size': TARGET_SIZE, # resize input images
'shuffle': False
}
``
and:
``
# Using the training phase generators
train_augmenter = ImageDataGenerator(**train_augmentation_parameters)
valid_augmenter = ImageDataGenerator(**valid_augmentation_parameters)
train_generator = DCMDataFrameIterator(dataframe=df_merged,
x_col='files',
y_col='class',
image_data_generator=train_augmenter,
**train_consts)
valid_generator = DCMDataFrameIterator(dataframe=df_merged,
x_col='files',
y_col='class',
image_data_generator=valid_augmenter,
**valid_consts)
``
Result of last command:
``
Found 7828 validated image filenames belonging to 4 classes.
Found 1956 validated image filenames belonging to 4 classes.
``
After:
``
base_model = VGG16(weights='imagenet', include_top=False)
n_class = 4 #
# Freezer les couches du VGG16
for layer in base_model.layers:
layer.trainable = False
model = Sequential()
model.add(base_model) # Ajout du modèle VGG16
model.add(GlobalAveragePooling2D())
model.add(Dense(1024,activation='relu'))
model.add(Dropout(rate=0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(rate=0.2))
model.add(Dense(n_class, activation='softmax'))
``
I compile but but when I do .fit I have the following message:
ValueError: Input arrays must be multi-channel 2D images.
Could you help me ?
Thank you in advance,
Thibaut
I had a look on stackoverflow to try to find a solution !
Related
I am trying to predict a sequence of images. Before I was using only a CNN that took as input these concatenated images, but it didn't give me very good results with some databases.
I am using two types of databases, one takes a single image and classifies it. The other database takes a sequence of images and classifies it. So I use total_x_test_indexes=tf.expand_dims(total_x_test_indexes, axis=1) to generalize the model when only classifies one image.
As I saw I could better use a CNN and then apply a LSTM and I saw how to do it here.
But I'm only getting confusion matrices like this, classifying almost everything to a class.
My code is this:
inp = Input((None,size_image,size_image,1), ragged=True)
x = TimeDistributed(cnn)(inp)
x = LSTM(25)(x)
size_predictions=len(dicTiposNumbers)
print("tamaño ",size_predictions)
out = Dense(size_predictions)(x)
model = Model(inp, out)
print(model.summary())
opt = keras.optimizers.Adam(learning_rate=0.05)
opt = keras.optimizers.SGD(learning_rate=0.15)
# Compile the model
model.compile(optimizer=opt,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
print('------------------------------------------------------------------------')
print(f'Training for fold {fold_no} ...')
total_x_train_indexes=tf.gather(total_x,indices=train)
total_y_train_indexes=tf.gather(total_y,indices=train)
total_x_train_indexes=tf.expand_dims(total_x_train_indexes, axis=1)
print("shape after gather",np.shape(total_x_train_indexes[0]))
history = model.fit(total_x_train_indexes, total_y_train_indexes,
batch_size=512,
epochs=5)
But I'm getting this and similar with other databases with more classes:
From your question, determine the network's purpose and input data responsive. I created a simple custom layer telling you that the process layer is nothing than simple calculation at each layer data process output is from convolution layers and the dense layers.
Sample: Improving method is done by compare of input / output and try to expands the effects.
Confusion matrix, he tries to see the effects of overall model training and predict with input data.
import os
from os.path import exists
import tensorflow as tf
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
BATCH_SIZE = 1
IMAGE_SIZE = ( 21, 16 )
objects_classes = [ 'plane', 'helicopter', 'truck' ]
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Class / Definition
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class MyLSTMLayer( tf.keras.layers.LSTM ):
def __init__(self, units, return_sequences, return_state):
super(MyLSTMLayer, self).__init__( units, return_sequences=True, return_state=False )
self.num_units = units
def build(self, input_shape):
self.kernel = self.add_weight("kernel",
shape=[int(input_shape[-1]),
self.num_units])
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
def gen():
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
# rescale=1./255,
# shear_range=0.2,
# zoom_range=0.2,
# horizontal_flip=True
)
train_generator = train_generator.flow_from_directory(
'F:\\temp\\image_catagorize',
classes=[ 'plane', 'helicopter', 'truck' ],
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE,
color_mode='grayscale',
class_mode='sparse', # None # categorical # binary # sparse
subset='training')
return train_generator
train_generator = gen()
val_generator = train_generator
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if( logs['accuracy'] >= 0.97 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
mycustomlayer = MyLSTMLayer( 64, True, False )
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( IMAGE_SIZE[0], IMAGE_SIZE[1], 1 ), name="Input_01Layer"),
tf.keras.layers.UpSampling2D(size=(4, 4), name="UpSampling2DLayer_01"),
tf.keras.layers.Normalization(mean=3., variance=2., name="NormalizationLayer_01"),
tf.keras.layers.Normalization(mean=4., variance=6., name="NormalizationLayer_02"),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', name="Conv2DLayer_01"),
tf.keras.layers.MaxPooling2D((3, 3), name="MaxPooling2DLayer_01"),
tf.keras.layers.Conv2D(32, (2, 2), activation='relu', name="Conv2DLayer_02"),
tf.keras.layers.MaxPooling2D((2, 2), name="MaxPooling2DLayer_02"),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', name="Conv2DLayer_03"),
tf.keras.layers.Reshape(( 7 * 11, 64 )),
###
mycustomlayer,
###
tf.keras.layers.Dense(512, activation='relu', name="DenseLayer_01"),
tf.keras.layers.Flatten(name="FlattenLayer_01"),
tf.keras.layers.Dense(192, activation='relu', name="DenseLayer_02"),
tf.keras.layers.Dense(3, name="DenseLayer_03"),
], name="MyModelClassification")
model.summary()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.SGD(
learning_rate=0.000001,
momentum=0.5,
nesterov=True,
name='SGD',
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit(train_generator, validation_data=val_generator, batch_size=100, epochs=3, callbacks=[custom_callback] )
model.save_weights(checkpoint_path)
PATH = os.path.join('F:\\temp\\image_catagorize\\helicopter', '*.png')
files = tf.data.Dataset.list_files(PATH)
list_file = []
for file in files.take(20):
image = tf.io.read_file( file )
image = tf.io.decode_png( image, channels=1, dtype=tf.dtypes.uint8, name='decode_png' )
image = tf.keras.preprocessing.image.img_to_array(image)
image = tf.image.resize(image, IMAGE_SIZE, method='nearest')
list_file.append(image)
PATH = os.path.join('F:\\temp\\image_catagorize\\plane', '*.png')
files = tf.data.Dataset.list_files(PATH)
for file in files.take(8):
image = tf.io.read_file( file )
image = tf.io.decode_png( image, channels=1, dtype=tf.dtypes.uint8, name='decode_png' )
image = tf.keras.preprocessing.image.img_to_array(image)
image = tf.image.resize(image, IMAGE_SIZE, method='nearest')
list_file.append(image)
PATH = os.path.join('F:\\temp\\image_catagorize\\Truck', '*.png')
files = tf.data.Dataset.list_files(PATH)
for file in files.take(8):
image = tf.io.read_file( file )
image = tf.io.decode_png( image, channels=1, dtype=tf.dtypes.uint8, name='decode_png' )
image = tf.keras.preprocessing.image.img_to_array(image)
image = tf.image.resize(image, IMAGE_SIZE, method='nearest')
list_file.append(image)
plt.figure(figsize=(6, 6))
plt.title("Actors recognitions")
for i in range(len(list_file)):
img = tf.keras.preprocessing.image.array_to_img(
list_file[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(6, 6, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(objects_classes[tf.math.argmax(score)]))
plt.show()
I have some code for a mixed model, one that trains on an efficient net and the rest on some external data that I have combined. The following is an example for the model:
def create_model():
# Define parameters
inputShape = (256,256,3)
inputDim = 8
# define MLP network
model = Sequential()
model.add(Dense(8, input_dim=inputDim, activation="relu"))
model.add(Dense(4, activation="relu"))
cnnModel = Sequential()
cnnModel.add(EfficientNetB5(include_top = False, input_shape=inputShape))
cnnModel.add(Flatten())
cnnModel.add(Dense(units = 16, activation='relu'))
cnnModel.add(Dense(units = 4, activation='relu'))
# Concatenate them
fullModel = concatenate([cnnModel.output,model.output])
fullModel = Dense(4, activation="relu")(fullModel)
fullModel = Dense(1, activation="sigmoid")(fullModel)
model = Model(inputs=[cnnModel.input,model.input], outputs=fullModel)
return model
However, when I run this through the fit_generator function I recieve the following error:
batch_size = 16
train_steps = TrainData.shape[0]//batch_size
valid_steps = TrainData.shape[0]//batch_size
model = create_model()
opt = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="binary_crossentropy", optimizer=opt)
print("[INFO] training model...")
model.fit_generator(
train_dl,
epochs=3,
steps_per_epoch = train_steps
)
model.save("models/final_model")
InvalidArgumentError: Incompatible shapes: [16,3,256,256] vs. [1,1,1,3]
[[node model_47/efficientnetb5/normalization_52/sub (defined at <ipython-input-262-76be6a4af4a4>:11) ]] [Op:__inference_train_function_1072272]
I'm unsure where this error is coming from, either in the data loader or in the efficient net. Any ideas?
Edit to include data loader:
def data_generator(image_dir, dataframe, min_max, binary, category, transforms = None, batch_size = 16):
i = 0
samples_per_epoch = dataframe.shape[0]
number_of_batches = samples_per_epoch/batch_size
while True:
batch = {'images': [], 'data': [], 'labels': []} # use a dict for multiple inputs
# Randomly sample images in dataframe
idx = i
img_path = f"{image_dir}/{dataframe.iloc[idx]['image_name']}.jpg"
img = Image.open(img_path)
if transforms:
img = transforms(**{"image": np.array(img)})["image"]
img = np.asarray( img, dtype="int32" )
# make data into tensors
dataframe2 = dataframe.iloc[idx]
data_cont = min_max.transform(np.array(dataframe2['age_approx']).reshape(1, -1))
data_bina = binary.transform(dataframe2['sex'])
data_cate = category.transform(dataframe2['anatom_site_general_challenge'])
data_total = np.concatenate((data_cont, data_bina, data_cate), axis = 1)
label = dataframe2['target']
batch['images'].append(img)
batch['data'].append(data_total)
batch['labels'].append(label)
batch['images'] = np.array(batch['images']) # convert each list to array
batch['data'] = np.array(batch_x['data'])
batch['labels'] = np.array(batch['labels'])
i += 1
if counter >= number_of_batches:
counter = 0
yield [batch['images'], batch['data']], batch['labels']
def get_data(train_df, valid_df, train_tfms, test_tfms, batch_size, min_max, binary, category):
train_dl = data_generator(image_dir='train/', dataframe = train_df, batch_size = batch_size, min_max = min_max, binary = binary, category = category, transforms = train_tfms)
valid_dl = data_generator(image_dir='train/', dataframe = valid_df, batch_size = batch_size*2, min_max = min_max, binary = binary, category = category, transforms = test_tfms)
return train_dl, valid_dl
I seem to have the same issue when I just used the images and the efficient net. It seems like using the Keras inbuilt image data loader functions is the only way I can get it to work (with just images).
I trained a ResNetV2 model for face recognition and got a validation accuracy of about 90%. I am using 320 grayscale images from 40 classes for training. I am unable to preprocess the image so as to use model.predict() function. I did not use preprocess_input function because I was getting a low accuracy when using that to train the model. Please help
base_model = ResNet50V2(weights='imagenet',include_top=False,input_shape=(224, 224,3))
# don't train existing weights
for layer in base_model.layers:
layer.trainable = False
x=base_model.output
x=GlobalAveragePooling2D()(x)
x=Dense(1024,activation='relu')(x) #dense layer 1
x=Dropout(0.2)(x)
preds=Dense(40,activation='softmax')(x) #final layer with softmax activation
model=Model(inputs=base_model.input,outputs=preds)
model.summary()
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory(train_path,
target_size = (224,224),
batch_size = 8,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory(valid_path,
target_size = (224,224),
batch_size = 8,
class_mode = 'categorical')
model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
training = model.fit(
training_set,
validation_data=test_set,
epochs=40,
steps_per_epoch=8
)
img = image.load_img('/content/9.pgm.jpg.jpg', target_size=(224,224,3))
pixels = image.img_to_array(img)
pixels = pixels.astype('float32')
pixels /= 255.0
model.predict(pixels)
Edit: the code used for preprocessing, which I eventually discarded was -
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.resnet50 import preprocess_input
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
training_set = datagen.flow_from_directory(train_path,
target_size = (224,224),
batch_size = 8,
class_mode = 'categorical')
test_set = datagen.flow_from_directory(valid_path,
target_size = (224,224),
batch_size = 8,
class_mode = 'categorical')
The correct code to be used for prediction is as below:
from PIL import Image
from tensorflow.keras.utils import load_img
class_names=['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']
img = tf.keras.utils.load_img('./dataset/rose_test.jpeg', target_size=(224,224))
pixels =tf.keras.utils.img_to_array(img)
pixels=tf.expand_dims(pixels, 0)
#pixels = pixels.astype('float32')
pixels /= 255.0
pred = model.predict(pixels) # as I have used 5 classes dataset
pred
Output:
array([[1.0110709e-07, 6.0101044e-08, 9.4008398e-01, 4.7367696e-08,
5.9915919e-02]], dtype=float32)
To get class name of loaded image:
print(class_names[np.argmax(p)])
Output:
roses
I've been trying to train a CNN using Keras with data augmentation applied to a series of images and their segmentation masks. The online example says that in order to do this, I should create two separate generators using flow_from_directory() and then zip them.
But instead can I just have two numpy arrays for the images and masks, use the flow() function and instead do this:
# Create image generator
data_gen_args = dict(rotation_range=5,
width_shift_range=0.1,
height_shift_range=0.1,
validation_split=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
seed = 1
# Create training and validation generators including masks
train_generator = image_datagen.flow(images, masks, seed=seed, subset='training')
val_train_generator = image_datagen.flow(images, masks, seed=seed, subset='validation')
# Train model
model.fit_generator(train_generator, steps_per_epoch=50,
validation_data = val_train_generator,
validation_steps = 10, shuffle=True, epochs=20)
And if not, why not? It seems that if I run through the generator, I can only output the images and not the masks as well so I'm concerned it's not doing what I'd like it to.
You need a custom generator that applies the same augmentation to image and mask.
Keras ImageDataGenerator takes 2 arguments (image,label or mask) and apply transformations to only to first (image). You can use my generator below:
# Create image generator
data_gen_args = dict(rotation_range=5,
width_shift_range=0.1,
height_shift_range=0.1,
validation_split=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
seed = 1
def XYaugmentGenerator(X1, y, seed, batch_size):
genX1 = gen.flow(X1, y, batch_size=batch_size, seed=seed)
genX2 = gen.flow(y, X1, batch_size=batch_size, seed=seed)
while True:
X1i = genX1.next()
X2i = genX2.next()
yield X1i[0], X2i[0]
# Train model
model.fit_generator(XYaugmentGenerator(images, masks, seed, batch_size), steps_per_epoch=np.ceil(float(len(images)) / float(batch_size)),
validation_data = XYaugmentGenerator(images_valid, masks_valid, batch_size),
validation_steps = np.ceil(float(len(images_valid)) / float(batch_size))
, shuffle=True, epochs=20)
In deep learning, for segmentation problem, one can use customer 'DataGenerator' function instead of keras ImageDataGenerator.
How to write a customer DataGenerator?
Writing a customer DataGenerator helps when dealing with Image Segmentation problem.
Solution:
If your training and test images are in a folder and the masks and labels are in a csv, use the below function-- a custom-- DataGenerator:
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, df, target_df=None, mode='fit',
base_path='../train_images',
batch_size=16, dim=(1400, 2100), n_channels=3, reshape=None,
augment=False, n_classes=2, random_state=42, shuffle=True):
self.dim = dim
self.batch_size = batch_size
self.df = df
self.mode = mode
self.base_path = base_path
self.target_df = target_df
self.list_IDs = list_IDs
self.reshape = reshape
self.n_channels = n_channels
self.augment = augment
self.n_classes = n_classes
self.shuffle = shuffle
self.random_state = random_state
self.on_epoch_end()
np.random.seed(self.random_state)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_batch = [self.list_IDs[k] for k in indexes]
X = self.__generate_X(list_IDs_batch)
if self.mode == 'fit':
y = self.__generate_y(list_IDs_batch)
if self.augment:
X, y = self.__augment_batch(X, y)
return X, y
elif self.mode == 'predict':
return X
else:
raise AttributeError('The mode parameter should be set to "fit" or "predict".')
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.seed(self.random_state)
np.random.shuffle(self.indexes)
def __generate_X(self, list_IDs_batch):
'Generates data containing batch_size samples'
# Initialization
if self.reshape is None:
X = np.empty((self.batch_size, *self.dim, self.n_channels))
else:
X = np.empty((self.batch_size, *self.reshape, self.n_channels))
# Generate data
for i, ID in enumerate(list_IDs_batch):
im_name = self.df['ImageId'].iloc[ID]
img_path = f"{self.base_path}/{im_name}"
img = self.__load_rgb(img_path)
if self.reshape is not None:
img = np_resize(img, self.reshape)
# Store samples
X[i,] = img
return X
def __generate_y(self, list_IDs_batch):
if self.reshape is None:
y = np.empty((self.batch_size, *self.dim, self.n_classes), dtype=int)
else:
y = np.empty((self.batch_size, *self.reshape, self.n_classes), dtype=int)
for i, ID in enumerate(list_IDs_batch):
im_name = self.df['ImageId'].iloc[ID]
image_df = self.target_df[self.target_df['ImageId'] == im_name]
rles = image_df['EncodedPixels'].values
if self.reshape is not None:
masks = build_masks(rles, input_shape=self.dim, reshape=self.reshape)
else:
masks = build_masks(rles, input_shape=self.dim)
y[i, ] = masks
return y
def __load_grayscale(self, img_path):
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
img = img.astype(np.float32) / 255.
img = np.expand_dims(img, axis=-1)
return img
def __load_rgb(self, img_path):
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32) / 255.
return img
def __random_transform(self, img, masks):
composition = albu.Compose([
albu.HorizontalFlip(),
albu.VerticalFlip(),
albu.ShiftScaleRotate(rotate_limit=30, shift_limit=0.1)
#albu.ShiftScaleRotate(rotate_limit=90, shift_limit=0.2)
])
composed = composition(image=img, mask=masks)
aug_img = composed['image']
aug_masks = composed['mask']
return aug_img, aug_masks
def __augment_batch(self, img_batch, masks_batch):
for i in range(img_batch.shape[0]):
img_batch[i, ], masks_batch[i, ] = self.__random_transform(
img_batch[i, ], masks_batch[i, ])
return img_batch, masks_batch
Reference:
http://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
According my experiment, you can't just use zip(img_generator,mask_generator).Although error won't occur, it will run forever. Seems that it return a infinite generator. To solve this problem, you may use while true:yield(img_generator.next(),mask_generator.next()).
I implement a multiclass classifier with keras.
My problem now is to make predictions, because I obtain an error. I believe that it is related with the prediction part of the code.
The code is the following:
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras import applications
from keras.utils.np_utils import to_categorical
from PIL import Image
import matplotlib.pyplot as plt
import math
%matplotlib inline
# dimensions of our images.
img_width, img_height = 150, 150
top_model_weights_path = 'bottleneck_fc_model.h5'
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
epochs = 30
batch_size = 16
def save_bottleneck_features():
model = applications.VGG16(include_top=False, weights='imagenet')
datagen = ImageDataGenerator(rescale=1. / 255)
generator = datagen.flow_from_directory(train_data_dir, target_size=(img_width, img_height), \
batch_size=batch_size, class_mode=None, shuffle=False)
n_train_samples = len(generator.filenames)
n_classes = len(generator.class_indices)
print("Number of train files = {}".format(n_train_samples))
print("Number of classes = {}".format(n_classes))
predict_size_train = int(math.ceil(n_train_samples / batch_size))
bottleneck_features_train = model.predict_generator(generator, predict_size_train)
np.save('bottleneck_features_train.npy', bottleneck_features_train)
generator = datagen.flow_from_directory(validation_data_dir, target_size=(img_width, img_height), \
batch_size=batch_size, class_mode=None, shuffle=False)
n_validation_samples = len(generator.filenames)
predict_size_validation = int(math.ceil(n_validation_samples / batch_size))
bottleneck_features_validation = model.predict_generator(generator, predict_size_validation)
np.save('bottleneck_features_validation.npy', bottleneck_features_validation)
def train_top_model():
datagen_top = ImageDataGenerator(rescale=1./255)
generator_top = datagen_top.flow_from_directory(train_data_dir, target_size=(img_width, img_height),\
batch_size=batch_size, class_mode='categorical', \
shuffle=False)
n_train_samples = len(generator_top.filenames)
n_classes = len(generator_top.class_indices)
# load the bottleneck features saved earlier
train_data = np.load('bottleneck_features_train.npy')
# get the class lebels for the training data, in the original order
train_labels = generator_top.classes
# convert the training labels to categorical vectors
train_labels = to_categorical(train_labels, num_classes=n_classes)
generator_top = datagen_top.flow_from_directory(validation_data_dir, target_size=(img_width, img_height),\
batch_size=batch_size, class_mode=None, shuffle=False)
n_validation_samples = len(generator_top.filenames)
validation_data = np.load('bottleneck_features_validation.npy')
validation_labels = generator_top.classes
validation_labels = to_categorical(validation_labels, num_classes=n_classes)
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(train_data, train_labels, epochs=epochs, batch_size=batch_size,\
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
(eval_loss, eval_accuracy) = model.evaluate(validation_data, validation_labels, \
batch_size=batch_size, verbose=1)
print("[INFO] accuracy: {:.2f}%".format(eval_accuracy * 100))
print("[INFO] Loss: {}".format(eval_loss))
return model
To execute the program we do:
save_bottleneck_features()
model = train_top_model()
when I try to make a prediction, using the following code:
img_path = 'image_test/bird.jpg'
# predicting images
img = load_img(img_path, target_size=(img_width, img_height))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict_classes(images, batch_size=10)
print (classes)
it gives me the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-44-c3652addeabc> in <module>()
8
9 images = np.vstack([x])
---> 10 classes = model.predict_classes(images, batch_size=10)
11 print (classes)
~/anaconda/lib/python3.6/site-packages/keras/models.py in predict_classes(self, x, batch_size, verbose)
1016 A numpy array of class predictions.
1017 """
-> 1018 proba = self.predict(x, batch_size=batch_size, verbose=verbose)
1019 if proba.shape[-1] > 1:
1020 return proba.argmax(axis=-1)
~/anaconda/lib/python3.6/site-packages/keras/models.py in predict(self, x, batch_size, verbose)
911 if not self.built:
912 self.build()
--> 913 return self.model.predict(x, batch_size=batch_size, verbose=verbose)
914
915 def predict_on_batch(self, x):
~/anaconda/lib/python3.6/site-packages/keras/engine/training.py in predict(self, x, batch_size, verbose, steps)
1693 x = _standardize_input_data(x, self._feed_input_names,
1694 self._feed_input_shapes,
-> 1695 check_batch_axis=False)
1696 if self.stateful:
1697 if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
~/anaconda/lib/python3.6/site-packages/keras/engine/training.py in _standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
142 ' to have shape ' + str(shapes[i]) +
143 ' but got array with shape ' +
--> 144 str(array.shape))
145 return arrays
146
ValueError: Error when checking : expected flatten_8_input to have shape (None, 7, 7, 512) but got array with shape (1, 150, 150, 3)
I finally found the answer.
In order to predict the class of an image, we need to run it through the same pipeline as before.
The prediction function must be:
image_path = 'image_test/bird.jpg'
orig = cv2.imread(image_path)
print("[INFO] loading and preprocessing image...")
image = load_img(image_path, target_size=(img_width, img_height))
image = img_to_array(image)
# important! otherwise the predictions will be '0'
image = image / 255
image = np.expand_dims(image, axis=0)
# build the VGG16 network
model = applications.VGG16(include_top=False, weights='imagenet')
# get the bottleneck prediction from the pre-trained VGG16 model
bottleneck_prediction = model.predict(image)
# build top model
model = Sequential()
model.add(Flatten(input_shape=bottleneck_prediction.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))
model.load_weights(top_model_weights_path)
# use the bottleneck prediction on the top model to get the final classification
class_predicted = model.predict_classes(bottleneck_prediction)
inID = class_predicted[0]
class_dictionary = generator_top.class_indices
inv_map = {v: k for k, v in class_dictionary.items()}
label = inv_map[inID]
# get the prediction label
print("Image ID: {}, Label: {}".format(inID, label))