Multiple ImageDataGenerator - python

I'm trying to generate two parameters from ImageDataGenerator for input to my model.fit_generator() but that don't work, I don't now if is the best way to do that.
My structure is:
enter image description here
input_imgen1 = ImageDataGenerator(rescale = 1./255,
vertical_flip=True,
validation_split=0.2,
horizontal_flip = True)
input_imgen2 = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
rotation_range=5.)
testgenerator = ImageDataGenerator(rescale = 1./255)
def generate_generator_multiple(generator1, generator2, train_data_dir, batch_size, img_height,
img_width):
genX1 = generator1.flow_from_directory(train_data_dir,
target_size = (img_height, img_width),
class_mode = 'categorical',
batch_size = batch_size,
shuffle=False,
seed=7)
genX2 = generator2.flow_from_directory(train_data_dir,
target_size = (img_height, img_width),
class_mode = 'categorical',
batch_size = batch_size,
shuffle=False,
seed=7)
while True:
X1i = genX1.next()
X2i = genX2.next()
yield [X1i[0], X2i[0]], X2i[1] #Yield both images and their mutual label
data_gen_train=generate_generator_multiple(generator1=input_imgen1,
generator2=input_imgen2,
train_data_dir=train_dir,
batch_size=batch_size,
img_height=IMG_HEIGHT,
img_width=IMG_WIDTH)
history = model.fit_generator(
data_gen_train,
epochs=epochs,
steps_per_epoch=25,
verbose=1,
validation_data=testgenerator,
validation_steps=25,
callbacks=[checkpoint, early_stop, tensor_board]
)
Error when I fit:
enter image description here

As evident from the logs your error is caused during validation data_gen_valid should be constructed the same way as data_gen_train .
So if your training data has been the concatenation of two generators so should be your validation data.

Related

how to extract features from an image using VGG16 and predict images by SVM

i want to extract feature by using vgg16 to train ML like (SVM, RF,...) for i can classifie images and visualise them.
i already creat a model by vgg16 but i have diffecult to visualise test images and predict them by using ML.any help please
VGG16 = VGG16(input_shape=IMAGE_SIZE + [3], weights='imagenet',
include_top=False)
for layer in VGG16.layers:
layer.trainable = False
folders = glob('')
x = Flatten()(VGG16.output)
x = Dense(4096, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(4096, activation='relu')(x)
x = Dropout(0.3)(x)
prediction = Dense(len(folders), activation='sigmoid')(x)
model = Model(inputs=VGG16.input, outputs=prediction)
model.summer()
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('
apple_disease_classification/Train',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('
apple_disease_classification/Test',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
r = model.fit_generator(
training_set,
validation_data=test_set,
epochs=50,
steps_per_epoch=len(training_set),
validation_steps=len(test_set))

Is there a way to resolve the decode error in keras tensorflow for loading the model?

I am trying to load multiple models using for loop on the Azure ml notebooks cluster. However, I see that I am receiving the following error:
Can you kindly let me know if there is a solution this?
The code below is used for running and saving the model
def train_model_naive_split():
all_history = {}
inp_train_gen = ImageDataGenerator(rescale = 1./255,
rotation_range = 260,
width_shift_range=0.4,
height_shift_range=0.4,
shear_range=0.2,
zoom_range=0.4,
horizontal_flip = True,
vertical_flip=True,
fill_mode="nearest")
train_data = pd.read_csv("glaucoma.csv")
train_data['Glaucoma'] = train_data['Glaucoma'].astype(str)
Y = train_data[['Glaucoma']]
skf = StratifiedKFold(n_splits = 5, random_state = 7, shuffle = True)
fold = 1
for train_index, val_index in skf.split(np.zeros(len(train_data)),Y):
training_data = train_data.iloc[train_index]
validation_data = train_data.iloc[val_index]
train_iterator = inp_train_gen.flow_from_dataframe(training_data,
x_col='Filename',
y_col='Glaucoma',
directory='ORIGA/ORIGA/Images',
target_size=(256,256),
batch_size=20,
class_mode='binary',
shuffle=True)
validation_iterator = inp_train_gen.flow_from_dataframe(validation_data,
x_col='Filename',
y_col='Glaucoma',
directory='ORIGA/ORIGA/Images',
target_size=(256,256),
batch_size=20,
class_mode='binary',
shuffle=True)
model = create_cnn_model()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model_name = f'outputs/best-model-kfold-{fold}.h5'
history = model.fit(train_iterator,
validation_data=validation_iterator,
epochs=5,
)
model.save(model_name)
all_history[f'history-fold-{fold}'] = history
fold += 1
return all_history
The code below is used for loading the models from the saved folder:
model_files = os.listdir('outputs/')
models = []
for model_file in model_files[2:]:
print('loading model /outputs/'+model_file)
models.append(keras.models.load_model('outputs/'+model_file))

How to plot my confusion matrix after fitting my model?

How to plot the confusion matrix after fitting my model?
this is how my database is:
train_path = '/content/drive/MyDrive/TCC_FILES/New_Dataset/train'
test_path = '/content/drive/MyDrive/TCC_FILES/New_Dataset/test'
valid_path = '/content/drive/MyDrive/TCC_FILES/New_Dataset/valid'
#New code
train_batches = ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet50.preprocess_input, horizontal_flip = True,
width_shift_range = 0.2,
height_shift_range = 0.2) \
.flow_from_directory(directory=train_path, target_size=(IMG_WIDTH,IMG_HEIGHT), classes=['0_NoCancer', '1_Cancer'], batch_size=BATCH_SIZE)
valid_batches = ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet50.preprocess_input) \
.flow_from_directory(directory=valid_path, target_size=(IMG_WIDTH,IMG_HEIGHT), classes=['0_NoCancer', '1_Cancer'], batch_size=BATCH_SIZE)
test_batches = ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet50.preprocess_input) \
.flow_from_directory(directory=test_path, target_size=(IMG_WIDTH,IMG_HEIGHT), classes=['0_NoCancer', '1_Cancer'], batch_size=BATCH_SIZE, shuffle=False)
model.fit(train_batches,
steps_per_epoch=np.ceil(float(18522) / float(BATCH_SIZE)),
epochs = 20,callbacks=[early_stop,reduce_lr,checkpoint_callback],
validation_steps=np.ceil(float(2058) / float(BATCH_SIZE)),
validation_data = valid_batches)```
hey my friend if you wand evaluated your model you should train it before
and after use this code in sklearn
confusion_matrix(predictions.argmax(axis=1), y_test.argmax(axis=1))
ites difrent in code

Wrapping image_data_generator.flow_from_dataframe in tf.data pipeline.. what step should I take?

train_generator=datagen.flow_from_dataframe(dataframe=train_df, #directory=data_path,
x_col="Path", y_col="feature_string", seed = 42, classes = chexpert_targets,
class_mode="categorical", target_size=(image_size,image_size), batch_size=32, subset = "training")
validation_generator = datagen.flow_from_dataframe(dataframe=train_df, #directory=data_path,
x_col="Path", y_col="feature_string", seed = 42, classes = chexpert_targets,
class_mode="categorical", target_size=(image_size,image_size), batch_size=16, subset = "validation")
test_generator = test_datagen.flow_from_dataframe(dataframe=valid_only_df, #directory=data_path,
target_size=(image_size,image_size),class_mode='categorical',
batch_size=1, shuffle=False, classes = chexpert_targets,
x_col="Path", y_col="feature_string")
x_col, y_col = next(train_generator)
ds = tf.data.Dataset.from_generator(
lambda: train_generator,
output_types=(tf.float32, tf.float32),
output_shapes=([32,320,320,3], [32,14])
)
I am trying to wrap the image_data_generator.flow_from_dataframe using tf.data but I am finding difficulties, I would really appreciate some help?

Multi input model with flow from directory

I am trying to learn how to use multi-models input with flow_from_directory, but there is something I can't figure out. thanks for your help.
The way I understand is that we supply the fit_generator with the two_image_generator and the and the fit method will infer the labels.... what I am missing...
def two_image_generator(generator,
directory,
batch_size,
shuffle = False,
img_size1 = (224,224),
img_size2 = (299,299)):
gen1 = generator.flow_from_directory(
# This is the target directory
directory,
# All images will be resized to target height and width.
target_size=img_size1,
batch_size=batch_size,
# Since we use categorical_crossentropy loss, we need categorical labels
class_mode='categorical',
shuffle = shuffle,
seed = 1)
gen2 = generator.flow_from_directory(
# This is the target directory
directory,
# All images will be resized to target height and width.
target_size=img_size2,
batch_size=batch_size,
# Since we use categorical_crossentropy loss, we need categorical labels
class_mode='categorical',
shuffle = shuffle,
seed = 1)
while True:
X1i = gen1.next()
X2i = gen2.next()
if y_col:
yield [X1i[0], X2i[0]], X1i[1] #X1i[1] is the label
else:
yield [X1i, X2i]
#add data_augmentation
train_aug_datagen = ImageDataGenerator(
rotation_range = 20,
shear_range = 0.1,
zoom_range = 0.2,
width_shift_range = 0.1,
height_shift_range = 0.1,
horizontal_flip = True
)
train_generator = two_image_generator(train_aug_datagen,
train_dir,
batch_size = batch_size,
shuffle = True)
validation_datagen = ImageDataGenerator()
validation_generator = two_image_generator(validation_datagen,
validation_dir,
batch_size = batch_size,
shuffle = True)
def create_base_model(MODEL, img_size, lambda_fun = None):
inp = Input(shape = (img_size[0], img_size[1], 3))
x = inp
if lambda_fun:
x = Lambda(lambda_fun)(x)
base_model = MODEL(input_tensor = x, weights = 'imagenet',
include_top = False, pooling = 'avg')
model = Model(inp, base_model.output)
return model
#define vgg + resnet50 + densenet
model1 = create_base_model(vgg16.VGG16, (224, 224), vgg16.preprocess_input)
model2 = create_base_model(resnet50.ResNet50, (224, 224), resnet50.preprocess_input)
model3 = create_base_model(inception_v3.InceptionV3, (299, 299), inception_v3.preprocess_input)
model1.trainable = False
model2.trainable = False
model3.trainable = False
inpA = Input(shape = (224, 224, 3))
inpB = Input(shape = (299, 299, 3))
out1 = model1(inpA)
out2 = model2(inpA)
out3 = model3(inpB)
x = Concatenate()([out1, out2, out3])
x = Dropout(0.2)(x)
x = Dense(2, activation='softmax')(x)
model = Model([inpA, inpB], x)
############################################################################
trained_models_path = './models/VggFace_best_model'
model_names = trained_models_path + '_epoch_{epoch:02d}_val_acc_{val_accuracy:.4f}.hdf5'
checkpoint = ModelCheckpoint(model_names, 'val_accuracy', verbose=1, save_best_only=True)
############################################################################
early = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=1, mode='auto')
callbacks = [checkpoint,early]
history = model.fit_generator(train_generator,
steps_per_epoch= NUM_TRAIN //batch_size,
epochs=100,
validation_data=validation_generator,
validation_steps= NUM_TEST //batch_size,
verbose=1,
use_multiprocessing=True,
workers=14,
callbacks=callbacks )
NameError: name 'y_col' is not defined
#add data_augmentation
train_aug_datagen = ImageDataGenerator(
rescale = 1./255,
rotation_range = 20,
shear_range = 0.1,
zoom_range = 0.2,
width_shift_range = 0.1,
height_shift_range = 0.1,
horizontal_flip = True
)
validation_datagen = ImageDataGenerator(rescale = 1./255)
def two_image_generator(generator,
directory,
batch_size,
shuffle = False,
img_size1 = (224,224),
img_size2 = (299,299)):
gen1 = generator.flow_from_directory(
# This is the target directory
directory,
# All images will be resized to target height and width.
target_size=img_size1,
batch_size=batch_size,
# Since we use categorical_crossentropy loss, we need categorical labels
class_mode='categorical',
shuffle = shuffle,
seed = 7)
gen2 = generator.flow_from_directory(
# This is the target directory
directory,
# All images will be resized to target height and width.
target_size=img_size2,
batch_size=batch_size,
# Since we use categorical_crossentropy loss, we need categorical labels
class_mode='categorical',
shuffle = shuffle,
seed = 7)
while True:
X1i = gen1.next()
X2i = gen2.next()
yield [X1i[0], X2i[0]], X2i[1] #Yield both images and their mutual label
train_generator = two_image_generator(train_aug_datagen,
train_dir,
batch_size = batch_size,
shuffle = True)
validation_generator = two_image_generator(validation_datagen,
validation_dir,
batch_size = batch_size,
shuffle = True)
############################################################################
trained_models_path = './models/VggFace_best_model'
model_names = trained_models_path + '_epoch_{epoch:02d}_val_acc_{val_accuracy:.4f}.hdf5'
checkpoint = ModelCheckpoint(model_names, 'val_accuracy', verbose=1, save_best_only=True)
############################################################################
early = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=1, mode='auto')
callbacks = [checkpoint,early]
history = model.fit_generator(train_generator,
steps_per_epoch= NUM_TRAIN //batch_size,
epochs=100,
validation_data=validation_generator,
validation_steps= NUM_TEST //batch_size,
verbose=1,
use_multiprocessing=True,
# workers=14,
callbacks=callbacks )
Epoch 1/100
Found 5000 images belonging to 2 classes.
Found 5000 images belonging to 2 classes.
Found 52700 images belonging to 2 classes.
Found 52700 images belonging to 2 classes.
340/625 [===============>..............] - ETA: 4:37 - loss: 7.7634 - acc: 0.4926
import pandas as pd

Categories

Resources