How to compute Receiving Operating Characteristic (ROC) and AUC using Keras Tuner? - python

I want to add additional ROC and AUC custom metrics to my Keras model. I tried to adapt the solution provided here but the code gave me an error.
The PerformanceVisualizationCallback plots the ROC-AUC curve.
from sklearn.metrics import roc_auc_score
from keras.callbacks import Callback
Callback:
class PerformanceVisualizationCallback(Callback):
def __init__(self, model, validation_data, dat_dir):
super().__init__()
self.model = model
self.X = X_train
self.y = y_train
self.validation_data = validation_data
os.makedirs(dat_dir, exist_ok=True)
self.dat_dir = dat_dir
def on_epoch_end(self, epoch, logs={}):
y_pred_train = self.model.predict_proba(self.x)
roc_train = auc_roc_score(self.y, y_pred_train)
y_pred_val = self.model.predict_proba(self.validation_data[0])
roc_val = auc_roc_score(self.y_val, y_pred_val)
#y_pred = tf.constant(self.model.predict(self.validation_data[0])).numpy()
#y_true = self.validation_data[1]
#y_pred_class = tf.math.argmax(y_pred, axis=1).numpy()
clf = SVC(random_state=0)
clf.fit(tf.constant( self.validation_data).numpy(), tf.cast( tf.linspace( 0, 19, 20, name='linspace', axis=0 ), dtype=tf.int64 ).numpy())
predictions = clf.predict(tf.constant( self.validation_data).numpy())
cm = sklearn.metrics.confusion_matrix(
[tf.math.argmax(self.validation_data[1], axis=1).numpy()[0], tf.math.argmax(self.validation_data[2], axis=1).numpy()[0],
tf.math.argmax(self.validation_data[3], axis=1).numpy()[0], tf.math.argmax(self.validation_data[4], axis=1).numpy()[0], tf.math.argmax(self.validation_data[5], axis=1).numpy()[0]],
[1, 2, 3, 4, 5], labels=clf.classes_)
disp = sklearn.metrics.ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=clf.classes_)
disp.plot()
plt.show()
fig.savefig(os.path.join(self.dat_dir, f'confusion_matrix_epoch_{epoch}'))
clf = sklearn.svm.SVC(random_state=0)
clf.fit(tf.constant( self.validation_data).numpy(), tf.linspace( 0, 19, 20, name='linspace', axis=0 ).numpy())
fpr, tpr, thresholds = sklearn.metrics.roc_curve([0, 0, 1, 1], [0, list_auc_roc[0], list_auc_roc[1], list_auc_roc[2]])
auc_roc = sklearn.metrics.auc(fpr, tpr)
display = sklearn.metrics.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=auc_roc, estimator_name='example estimator')
display.plot()
plt.show()
fig.savefig(os.path.join(self.dat_dir, f'roc_curve_epoch_{epoch}'))
fig, ax = plt.subplots(figsize=(8,4))
plt.scatter(y_val, y_pred_val, alpha=0.6, color='#FF0000', lw=1, ec='black')
plt.show()
fig.savefig(os.path.join(self.dat_dir, f'dl_scatterplot'))
Build the model:
def model_builder(hp):
model = Sequential()
for i in range(hp.Int("num_layers", 1, 50)):
model.add(
layers.Dense(
# Tune number of units separately.
units=hp.Int(f"units_{i}", min_value=1, max_value=200, step=5),
#activation=hp.Choice("activation", ["relu", "tanh"])
activation=hp.Choice("activation", ["relu", "tanh", "sigmoid", "softmax", "softplus", "softsign", "selu", "elu", "exponential"])
))
model.add(Dense(4, kernel_initializer='normal', activation='linear')) # output layer
if hp.Boolean("dropout"):
model.add(layers.Dropout(rate=0.1))
model.add(layers.Dense(10, activation="softmax"))
# Tune the learning rate for the optimizer
# Choose an optimal value from 0.01, 0.001, or 0.0001
hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])
# Performance visualization callback
performance_viz_cbk = PerformanceVisualizationCallback(
model=model,
validation_data=X_val,
dat_dir='c:\performance_charts')
model.compile(optimizer=Adam(learning_rate=hp_learning_rate),
loss=SparseCategoricalCrossentropy(from_logits=True),
metrics=['auc_roc'])
return model
Search for optimal hyperparameters:
tuner = kt.Hyperband(model_builder,
objective=kt.Objective('val_auc', direction='max'),
max_epochs=200,
factor=3,
directory='my_dir',
overwrite=True,
project_name='intro_to_kt')
stop_early = EarlyStopping(monitor='auc_roc', patience=5)
tuner.search(X_train, y_train, epochs=10, validation_split=0.2, callbacks=[stop_early])
Traceback:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/ipykernel_11015/3831715835.py in <module>
----> 1 tuner.search(X_train, y_train, epochs=10, validation_split=0.2, callbacks=[stop_early])
/opt/conda/lib/python3.7/site-packages/keras_tuner/engine/base_tuner.py in search(self, *fit_args, **fit_kwargs)
181
182 self.on_trial_begin(trial)
--> 183 results = self.run_trial(trial, *fit_args, **fit_kwargs)
184 # `results` is None indicates user updated oracle in `run_trial()`.
185 if results is None:
/opt/conda/lib/python3.7/site-packages/keras_tuner/tuners/hyperband.py in run_trial(self, trial, *fit_args, **fit_kwargs)
382 fit_kwargs["epochs"] = hp.values["tuner/epochs"]
383 fit_kwargs["initial_epoch"] = hp.values["tuner/initial_epoch"]
--> 384 return super(Hyperband, self).run_trial(trial, *fit_args, **fit_kwargs)
385
386 def _build_model(self, hp):
/opt/conda/lib/python3.7/site-packages/keras_tuner/engine/tuner.py in run_trial(self, trial, *args, **kwargs)
293 callbacks.append(model_checkpoint)
294 copied_kwargs["callbacks"] = callbacks
--> 295 obj_value = self._build_and_fit_model(trial, *args, **copied_kwargs)
296
297 histories.append(obj_value)
/opt/conda/lib/python3.7/site-packages/keras_tuner/engine/tuner.py in _build_and_fit_model(self, trial, *args, **kwargs)
220 hp = trial.hyperparameters
221 model = self._try_build(hp)
--> 222 results = self.hypermodel.fit(hp, model, *args, **kwargs)
223 tuner_utils.validate_trial_results(
224 results, self.oracle.objective, "HyperModel.fit()"
/opt/conda/lib/python3.7/site-packages/keras_tuner/engine/hypermodel.py in fit(self, hp, model, *args, **kwargs)
138 If return a float, it should be the `objective` value.
139 """
--> 140 return model.fit(*args, **kwargs)
141
142
/opt/conda/lib/python3.7/site-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
/opt/conda/lib/python3.7/site-packages/keras/engine/training.py in tf__train_function(iterator)
13 try:
14 do_return = True
---> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
16 except:
17 do_return = False
ValueError: in user code:
File "/opt/conda/lib/python3.7/site-packages/keras/engine/training.py", line 1160, in train_function *
return step_function(self, iterator)
File "/opt/conda/lib/python3.7/site-packages/keras/engine/training.py", line 1146, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/opt/conda/lib/python3.7/site-packages/keras/engine/training.py", line 1135, in run_step **
outputs = model.train_step(data)
File "/opt/conda/lib/python3.7/site-packages/keras/engine/training.py", line 998, in train_step
return self.compute_metrics(x, y, y_pred, sample_weight)
File "/opt/conda/lib/python3.7/site-packages/keras/engine/training.py", line 1092, in compute_metrics
self.compiled_metrics.update_state(y, y_pred, sample_weight)
File "/opt/conda/lib/python3.7/site-packages/keras/engine/compile_utils.py", line 577, in update_state
self.build(y_pred, y_true)
File "/opt/conda/lib/python3.7/site-packages/keras/engine/compile_utils.py", line 484, in build
y_pred, self._get_metric_objects, self._metrics, y_true, y_pred
File "/opt/conda/lib/python3.7/site-packages/keras/engine/compile_utils.py", line 631, in _get_metric_objects
return [self._get_metric_object(m, y_t, y_p) for m in metrics]
File "/opt/conda/lib/python3.7/site-packages/keras/engine/compile_utils.py", line 631, in <listcomp>
return [self._get_metric_object(m, y_t, y_p) for m in metrics]
File "/opt/conda/lib/python3.7/site-packages/keras/engine/compile_utils.py", line 650, in _get_metric_object
metric_obj = metrics_mod.get(metric)
File "/opt/conda/lib/python3.7/site-packages/keras/metrics/__init__.py", line 181, in get
return deserialize(str(identifier))
File "/opt/conda/lib/python3.7/site-packages/keras/metrics/__init__.py", line 140, in deserialize
printable_module_name="metric function",
File "/opt/conda/lib/python3.7/site-packages/keras/utils/generic_utils.py", line 770, in deserialize_keras_object
f"Unknown {printable_module_name}: {object_name}. Please "
ValueError: Unknown metric function: auc_roc. Please ensure this object is passed to the `custom_objects` argument. See https://www.tensorflow.org/guide/keras/save_and_serialize#registering_the_custom_object for details.

Related

Error generated during hyperparameter tuning

My trainX is (3350, 1, 8) and my TrainY is (3350, 2). I am getting some errors, but I don't understand what's the problem. I am getting this error when tuning the hyperparameters using the LSTM layer.
Output exceeds the size limit. Open the full output data in a text editor
--------------------------------------------------------------------------- InvalidArgumentError Traceback (most recent call last) /tmp/ipykernel_4525/3330873115.py in <module>
----> 1 bayesian_opt_tuner.search(trainX, trainY,epochs=100,
2 #validation_data=(X_test, y_test)
3 validation_split=0.2,verbose=1)
~/anaconda3/lib/python3.9/site-packages/keras_tuner/engine/base_tuner.py in search(self, *fit_args, **fit_kwargs)
181
182 self.on_trial_begin(trial)
--> 183 results = self.run_trial(trial, *fit_args, **fit_kwargs)
184 # `results` is None indicates user updated oracle in `run_trial()`.
185 if result is None:
~/anaconda3/lib/python3.9/site-packages/keras_tuner/engine/tuner.py in run_trial(self, trial, *args, **kwargs)
293 callbacks.append(model_checkpoint)
294 copied_kwargs["callbacks"] = callbacks
--> 295 obj_value = self._build_and_fit_model(trial, *args, **copied_kwargs)
296
297 histories.append(obj_value)
~/anaconda3/lib/python3.9/site-packages/keras_tuner/engine/tuner.py in
_build_and_fit_model(self, trial, *args, **kwargs)
220 hp = trial.hyperparameters
221 model = self._try_build(hp)
--> 222 results = self.hypermodel.fit(hp, model, *args, **kwargs) ...
File "/home/vareeshadki/anaconda3/lib/python3.9/site-packages/keras/backend.py", line 5238, in sparse_categorical_crossentropy
res = cf.nn.sparse_softmax_cross_entropy_with_logits( Node: 'sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits' logits and labels must have the same first dimension, got logits shape [32,2] and labels shape [64] [[{{node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits}}]] [Op:__inference_train_function_1763748]
My code:
import os
from kerastuner.tuners import BayesianOptimization
def build_model(hp):
model = tf.keras.Sequential()
hp_units = hp.Int('units', min_value=32, max_value=512, step=10)
model.add(LSTM(hp_units,activation='relu'))
model.add(Dense(2))
hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
return model
bayesian_opt_tuner = BayesianOptimization(
build_model,
objective='mse',
max_trials=3,
executions_per_trial=2,
directory=os.path.normpath('C:/keras_tuning'),
project_name='kerastuner_bayesian_poc',
overwrite=True)
bayesian_opt_tuner.search(trainX, trainY,epochs=100,
#validation_data=(X_test, y_test)
validation_split=0.2,verbose=1)

ValueError in model.fit keras and user code

I'm learning deep-learning python using keras and tensorflow. I am using efficientnetb0 from imagenet dataset. I had divided the training and testing sets and performed one hot encoding. I have 17 folders or classifications of images.
effnet = EfficientNetB0(weights='imagenet',include_top=False,input_shape=(image_size,image_size,3))
model = effnet.output
model = tf.keras.layers.GlobalAveragePooling2D()(model)
model = tf.keras.layers.Dropout(rate=0.5)(model)
model = tf.keras.layers.Dense(17,activation='softmax')(model)
model = tf.keras.models.Model(inputs=effnet.input, outputs = model)
model.compile(loss='categorical_crossentropy',optimizer = 'Adam', metrics= ['accuracy'])
Everything ran smooth until training the model
history = model.fit(X_train,y_train,validation_split=0.1, epochs =10, verbose=1,batch_size=32, callbacks=[tensorboard,checkpoint,reduce_lr])
ValueError Traceback (most recent call last)
Input In [22], in <cell line: 1>()
----> 1 history = model.fit(X_train,y_train,validation_split=0.1, epochs =20, verbose=1,batch_size=32, callbacks=[tensorboard,checkpoint,reduce_lr])
File C:\Ken\Conda\lib\site-packages\keras\utils\traceback_utils.py:67, in filter_traceback.<locals>.error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
File ~\AppData\Local\Temp\__autograph_generated_filexkmxbaog.py:15, in outer_factory.<locals>.inner_factory.<locals>.tf__train_function(iterator)
13 try:
14 do_return = True
---> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
16 except:
17 do_return = False
ValueError: in user code:
File "C:\Ken\Conda\lib\site-packages\keras\engine\training.py", line 1051, in train_function *
return step_function(self, iterator)
File "C:\Ken\Conda\lib\site-packages\keras\engine\training.py", line 1040, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Ken\Conda\lib\site-packages\keras\engine\training.py", line 1030, in run_step **
outputs = model.train_step(data)
File "C:\Ken\Conda\lib\site-packages\keras\engine\training.py", line 890, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "C:\Ken\Conda\lib\site-packages\keras\engine\training.py", line 948, in compute_loss
return self.compiled_loss(
File "C:\Ken\Conda\lib\site-packages\keras\engine\compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "C:\Ken\Conda\lib\site-packages\keras\losses.py", line 139, in __call__
losses = call_fn(y_true, y_pred)
File "C:\Ken\Conda\lib\site-packages\keras\losses.py", line 243, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "C:\Ken\Conda\lib\site-packages\keras\losses.py", line 1787, in categorical_crossentropy
return backend.categorical_crossentropy(
File "C:\Ken\Conda\lib\site-packages\keras\backend.py", line 5119, in categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
ValueError: Shapes (None, 18) and (None, 17) are incompatible
I fixed the error!
I just changed dense layer to 18 and deleted dropout layer. I don't know why. I will determine if it is because of the dense layer or the dropout layer. Maybe dropout helps the model not to overfit. Maybe it was the dense layer's error.

Convolution neural network for image segmentation with tensorflow

I am trying to make my first neural network using Tensorflow. I have some medical images and my goal is to segment them. I can't find what I am doing wrong. Here is the error :
2021-05-08 14:33:15.249134: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:116] None of the MLIR optimization passes are enabled (registered 2)
Epoch 1/50
Traceback (most recent call last):
File "C:/Users/tompi/PycharmProjects/ProjetDeepLearning/test.py", line 185, in <module>
history = model.fit(X_train, Y_train, epochs=epochs,
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1100, in fit
tmp_logs = self.train_function(iterator)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\def_function.py", line 828, in __call__
result = self._call(*args, **kwds)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\def_function.py", line 871, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\def_function.py", line 725, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\function.py", line 2969, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\function.py", line 3361, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\function.py", line 3196, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\framework\func_graph.py", line 990, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\def_function.py", line 634, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\framework\func_graph.py", line 977, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\training.py:805 train_function *
return step_function(self, iterator)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\training.py:795 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\training.py:788 run_step **
outputs = model.train_step(data)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\training.py:754 train_step
y_pred = self(x, training=True)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:998 __call__
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\input_spec.py:204 assert_input_compatibility
raise ValueError('Layer ' + layer_name + ' expects ' +
ValueError: Layer sequential expects 1 input(s), but it received 44 input tensors. Inputs received: ...
Below my code :
import tensorflow as tf
import pandas as pd
import numpy as np
import tensorflow.keras
import segmentation_models as sm
import os
import cv2
import Metrics as metrics # a python file
import matplotlib.pyplot as plt
from tensorflow.keras import datasets, layers, models
from sklearn.model_selection import train_test_split
width = 672
height = 448
dataframe = []
def normalize(path):
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (width, height))
# newSize = np.zeros((height, width, 3))
# newSize[:, :, 0] = image[:, :]
# newSize[:, :, 1] = image[:, :]
# newSize[:, :, 2] = image[:, :]
return image
def createDataset():
for folder in os.listdir(imagesPath):
for imageName in os.listdir(imagesPath + folder):
image = normalize(imagesPath + folder + "/" + imageName)
dataframe.append([folder, imageName, image])
createDataset()
df = pd.DataFrame(dataframe, columns=['Folder', 'Name', 'Image'])
def getImagesFromFolder(folder):
L = []
n, p = np.shape(df)
for i in range(n):
if df['Folder'][i] == folder:
L.append(df.iloc[i][2])
return L
originalImages = getImagesFromFolder('Original')
maskImages = getImagesFromFolder('Mask')
X_train, X_test, Y_train, Y_test = train_test_split(originalImages, maskImages, train_size=0.8, random_state=42)
classes = 3
activation = "softmax"
lr = 0.0001
loss = sm.losses.jaccard_loss
metrics = training_metrics = [
sm.metrics.IOUScore(threshold=0.5),
sm.metrics.FScore(threshold=0.5),
sm.metrics.Precision(),
sm.metrics.Recall(),
metrics.dice_coef
]
batch_size = 3
epochs = 50
callbacks = [tensorflow.keras.callbacks.ReduceLROnPlateau()]
I am using a very simple Unet :
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(height, width, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.summary()
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
model.summary()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(X_train, Y_train, epochs=epochs,
validation_data=(X_test, Y_test))
The error says that the input receives 44 tensors which is the number of images in X_train and Y_train (44, 448, 672, 3) but I don't know what I am doing wrong, I saw several posts having the same shape and it worked. Can anyone help we. It would be greatly appreciated.
Thanks.
I found out what was the error. The type of my variables X_train, Y_train, X_test, Y_test was list and not numpy.ndarray because of my function getImagesFromFolder. I had to return np.array(L) to make it run.

InvalidArgumentError: Can not squeeze dim[2], expected a dimension of 1, got 10

I am doing Covid19 facemask detection project and when I train my image dataset I find a error which I can't understand. So, please help me to solve this problem. the error is given below.
Epoch 1/20
Traceback (most recent call last):
File "Mask_detection.py", line 108, in <module>
epochs=Epoch)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\engine\training.py", line 108, in _method_wrapper
return method(self, *args, **kwargs)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1098, in fit
tmp_logs = train_function(iterator)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\def_function.py", line 780, in __call__
result = self._call(*args, **kwds)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\def_function.py", line 840, in _call
return self._stateless_fn(*args, **kwds)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\function.py", line 2829, in __call__
return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\function.py", line 1848, in _filtered_call
cancellation_manager=cancellation_manager)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\function.py", line 1924, in _call_flat
ctx, args, cancellation_manager=cancellation_manager))
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\function.py", line 550, in call
ctx=ctx)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\execute.py", line 60, in quick_execute
inputs, attrs, num_outputs)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Can not squeeze dim[2], expected a dimension of 1, got 10
[[node categorical_crossentropy/remove_squeezable_dimensions/Squeeze (defined at Mask_detection.py:108) ]] [Op:__inference_train_function_889]
Function call stack:
train_function
2020-09-28 12:37:31.761507: W tensorflow/core/kernels/data/generator_dataset_op.cc:103] Error occurred when finalizing GeneratorDataset iterator: Failed precondition: Python interpreter s
tate is not initialized. The process may be terminated.
[[{{node PyFunc}}]]
I provide my python code that helps you to understand the problem. My system is without GPU so this error is related with GPU.
DIRECTORY = 'images'
Categories = ["With_mask","Without_mask"]
batch_size= 10
num_class = 10
Epoch= 20
data = []
label =[]
for category in Categories:
path = os.path.join(DIRECTORY,category)
for img in os.listdir(path):
img_path = os.path.join(path,img)
image = load_img(img_path,target_size =(64,64))
image = img_to_array(image)
image = preprocess_input(image)
data.append(image)
label.append(category)
lb = LabelBinarizer()
label = lb.fit_transform(label)
label = to_categorical(label)
data = numpy.asarray(data,dtype = 'float32')
label = numpy.array(label)
print("////")
x_train,x_test,y_train,y_test = train_test_split(data,label,stratify=label,test_size=0.2,random_state=3)
y_train = utils.to_categorical(y_train, num_class)
y_test = utils.to_categorical(y_test, num_class)
mask_model = Sequential()
mask_model.add(Conv2D(32,kernel_size=(3,3),activation= 'linear',padding ="same",input_shape=(64,64,3)))
mask_model.add(LeakyReLU(alpha = 0.3))
mask_model.add(Conv2D(32,kernel_size=(3,3),activation= 'linear',padding ="same",input_shape=(64,64,3)))
mask_model.add(LeakyReLU(alpha=0.3))
mask_model.add(MaxPooling2D(pool_size =(2,2)))
mask_model.add(Conv2D(32,kernel_size=(3,3),activation= 'linear',padding ="same",input_shape=(64,64,3)))
mask_model.add(LeakyReLU(alpha=0.3))
mask_model.add(MaxPooling2D(pool_size =(2,2)))
mask_model.add(Flatten())
mask_model.add(Dense(128,activation = "linear"))
mask_model.add(LeakyReLU(alpha=0.3))
mask_model.add(Dense(10,activation= "softmax"))
mask_model.compile(optimizer ='adam',loss = 'categorical_crossentropy',metrics =['accuracy'] )
mask_model.summary()
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
datagen.fit(x_train)
mask_model.fit(datagen.flow(x_train,y_train,batch_size=10),
steps_per_epoch=len(x_train),
validation_data=(x_test, y_test),
validation_steps=len(x_test) // batch_size,
workers=0,
epochs=Epoch)
print("//")
for e in range(Epoch):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
mask_model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
break
mask_model.save("Mask_model/mask_model.h5")

Issues when applying model.predict() inside data generator with fit_generator()

Basically I am implementing a model that employs perceptual loss to perform single image super-resolution. I constructed my full model such that the input will first pass through the main model, then feed into a pretrained VGG16, and give the output from layer[5] of VGG16 as the final output of the full model.
I tried to pass a pre-trained VGG16 model to my data generator in order to prepare my ground truth images for the computation of perceptual loss on the fly. However I have encountered value issues during the training with fit_generator.
I have tried write my own loop to generate data for each batch and use the train_on_batch function instead, and it is working fine. However I do want the benefit of use_multiprocessing with fit_generator.
Here is the generator I have written.I pass lossModel to the generator and use it to generate the output for training with perceptual loss.
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, x_train, y_train, lossModel, batch_size=4, shuffle=True):
'Initialization'
self.x_train = x_train
self.y_train = y_train
self.lossModel = lossModel
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.x_train) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate batch of data
idx = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
x = self.x_train[idx,]
y = self.lossModel.predict_on_batch(self.y_train[idx,])
return x, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.x_train))
if self.shuffle == True:
np.random.shuffle(self.indexes)
And here I construct the model.
### Create Image Transformation Model ###
mainModel = ResnetBuilder.build((3,72,72), 5, basic_block, [1, 1, 1, 1, 1])
### Create Loss Model (VGG16) ###
lossModel = VGG16(include_top=False, weights='imagenet', input_tensor=None, input_shape=(288,288,3))
lossModel.trainable=False
for layer in lossModel.layers:
layer.trainable=False
### Create New Loss Model (Use Relu2-2 layer output for perceptual loss)
lossModel = Model(lossModel.inputs,lossModel.layers[5].output)
lossModelOutputs = lossModel(mainModel.output)
### Create Full Model ###
fullModel = Model(mainModel.input, lossModelOutputs)
### Compile FUll Model
fullModel.compile(loss='mse', optimizer='adam',metrics=['mse'])
trained_epochs=0
Error occurs during fit_generator(). Notice the dimension of my input is (72,72,3) and outputs from VGG.layer[5] are in (144,144,128), my y_train is ground truth image in (288,288,3).
# Generators
training_generator = DataGenerator(x_train, y_train, lossModel, batch_size=4, shuffle=True)
# Train model on dataset
fullModel.fit_generator(generator=training_generator, use_multiprocessing=True, workers=6)
Epoch 1/1
---------------------------------------------------------------------------
RemoteTraceback Traceback (most recent call last)
RemoteTraceback:
"""
Traceback (most recent call last):
File "/home/lucien/anaconda3/envs/fyp/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/home/lucien/anaconda3/envs/fyp/lib/python3.6/site-packages/keras/utils/data_utils.py", line 401, in get_index
return _SHARED_SEQUENCES[uid][i]
File "/home/lucien/sr-perceptual/my_classes.py", line 26, in __getitem__
y = self.lossModel.predict_on_batch(self.y_train[idx,])
File "/home/lucien/anaconda3/envs/fyp/lib/python3.6/site-packages/keras/engine/training.py", line 1273, in predict_on_batch
self._make_predict_function()
File "/home/lucien/anaconda3/envs/fyp/lib/python3.6/site-packages/keras/engine/training.py", line 554, in _make_predict_function
**kwargs)
File "/home/lucien/anaconda3/envs/fyp/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2744, in function
return Function(inputs, outputs, updates=updates, **kwargs)
File "/home/lucien/anaconda3/envs/fyp/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2546, in __init__
with tf.control_dependencies(self.outputs):
File "/home/lucien/anaconda3/envs/fyp/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 5004, in control_dependencies
return get_default_graph().control_dependencies(control_inputs)
File "/home/lucien/anaconda3/envs/fyp/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 4543, in control_dependencies
c = self.as_graph_element(c)
File "/home/lucien/anaconda3/envs/fyp/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3490, in as_graph_element
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
File "/home/lucien/anaconda3/envs/fyp/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3569, in _as_graph_element_locked
raise ValueError("Tensor %s is not an element of this graph." % obj)
ValueError: Tensor Tensor("block2_conv2/Relu:0", shape=(?, 144, 144, 128), dtype=float32) is not an element of this graph.
"""
The above exception was the direct cause of the following exception:
ValueError Traceback (most recent call last)
<ipython-input-10-4a040e0935cf> in <module>
1 # Train model on dataset
----> 2 fullModel.fit_generator(generator=training_generator, use_multiprocessing=True, workers=6)
~/anaconda3/envs/fyp/lib/python3.6/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name + '` call to the ' +
90 'Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
~/anaconda3/envs/fyp/lib/python3.6/site-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
1416 use_multiprocessing=use_multiprocessing,
1417 shuffle=shuffle,
-> 1418 initial_epoch=initial_epoch)
1419
1420 #interfaces.legacy_generator_methods_support
~/anaconda3/envs/fyp/lib/python3.6/site-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
179 batch_index = 0
180 while steps_done < steps_per_epoch:
--> 181 generator_output = next(output_generator)
182
183 if not hasattr(generator_output, '__len__'):
~/anaconda3/envs/fyp/lib/python3.6/site-packages/keras/utils/data_utils.py in get(self)
599 except Exception as e:
600 self.stop()
--> 601 six.reraise(*sys.exc_info())
602
603
~/anaconda3/envs/fyp/lib/python3.6/site-packages/six.py in reraise(tp, value, tb)
691 if value.__traceback__ is not tb:
692 raise value.with_traceback(tb)
--> 693 raise value
694 finally:
695 value = None
~/anaconda3/envs/fyp/lib/python3.6/site-packages/keras/utils/data_utils.py in get(self)
593 try:
594 while self.is_running():
--> 595 inputs = self.queue.get(block=True).get()
596 self.queue.task_done()
597 if inputs is not None:
~/anaconda3/envs/fyp/lib/python3.6/multiprocessing/pool.py in get(self, timeout)
642 return self._value
643 else:
--> 644 raise self._value
645
646 def _set(self, i, obj):
ValueError: Tensor Tensor("block2_conv2/Relu:0", shape=(?, 144, 144, 128), dtype=float32) is not an element of this graph.
The problem here is mutli-threading. When you are calling the 6 workers, block2_conv2/Relu:0 is created after graph is terminated.
The problem is with _make_predict_function(). You can check this file in your PC for the reasons(i got this from your error text) File "/home/lucien/anaconda3/envs/fyp/lib/python3.6/site-packages/keras/engine/training.py", line 1273, in predict_on_batch self._make_predict_function().
Some ways in which you can remove the errors are :
Use theano backend.
call model._make_predict_function() right after loading the trained model.
Use global model :
Functions :
def load_model():
global model
model = yourmodel(weights=xx111122)
# this is key : save the graph after loading the model
global graph
graph = tf.get_default_graph()
While predicting:
with graph.as_default():
preds = model.predict(image)
#... etc

Categories

Resources