I've downloaded dataset for Malaria detection from this website. Afterwards, I've updated images to my google drive and tried to train neural net with in-built fit() function as follows:
train_gen = train_aug.flow_from_directory(
training_data_dir,
class_mode="categorical",
target_size=(64, 64),
color_mode="rgb",
shuffle=True,
batch_size=BATCH_SIZE)
val_gen = val_aug.flow_from_directory(
validation_data_dir,
class_mode="categorical",
target_size=(64, 64),
color_mode="rgb",
shuffle=False,
batch_size=BATCH_SIZE)
history = model.fit(x=train_gen, steps_per_epoch=steps_per_epoch, epochs=EPOCH_NUM,
validation_data=val_gen, validation_steps=val_steps, callbacks=CALLBACKS)
In the middle of training I am getting following error message:
Epoch 1/100
302/603 [==============>...............] - ETA: 44:54 - loss: 8.3442 - binary_accuracy: 0.4935
---------------------------------------------------------------------------
UnknownError Traceback (most recent call last)
<ipython-input-45-2fe1e94cba86> in <module>()
1 history = model.fit(x=train_gen, steps_per_epoch=steps_per_epoch, epochs=EPOCH_NUM,
----> 2 validation_data=val_gen, validation_steps=val_steps, callbacks=CALLBACKS)
8 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
58 ctx.ensure_initialized()
59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
62 if name is not None:
UnknownError: UnidentifiedImageError: cannot identify image file <_io.BytesIO object at 0x7f42ff5c2518>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/script_ops.py", line 243, in __call__
ret = func(*args)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py", line 309, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/data/ops/dataset_ops.py", line 785, in generator_py_func
values = next(generator_state.get_iterator(iterator_id))
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/data_adapter.py", line 801, in wrapped_generator
for data in generator_fn():
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/data_adapter.py", line 932, in generator_fn
yield x[i]
File "/usr/local/lib/python3.6/dist-packages/keras_preprocessing/image/iterator.py", line 65, in __getitem__
return self._get_batches_of_transformed_samples(index_array)
File "/usr/local/lib/python3.6/dist-packages/keras_preprocessing/image/iterator.py", line 230, in _get_batches_of_transformed_samples
interpolation=self.interpolation)
File "/usr/local/lib/python3.6/dist-packages/keras_preprocessing/image/utils.py", line 114, in load_img
img = pil_image.open(io.BytesIO(f.read()))
File "/usr/local/lib/python3.6/dist-packages/PIL/Image.py", line 2862, in open
"cannot identify image file %r" % (filename if filename else fp)
PIL.UnidentifiedImageError: cannot identify image file <_io.BytesIO object at 0x7f42ff5c2518>
[[{{node PyFunc}}]]
[[IteratorGetNext]] [Op:__inference_train_function_35711]
Function call stack:
train_function
What is this error exactly, and how can I properly handle it? Do I need to use custom training loops with GradientTape object and then use try/catch blocks or is there another way?
Thing that confuses me is that it seems like some image cannot be decoded or something like that. But, how come ImageDataGenerator did not report any error before training?
Deleting all images and re-uploading them did the trick for me. Closing this question.
Reducing the number of workers in model.fit helped me solve this issue.
Related
model.fit(X_train, y_train, batch_size=128, epochs=30)
i am using this and i got this error
Epoch 1/30
Output exceeds the size limit. Open the full output data in a text editor
UnimplementedError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_1768\4221927022.py in
----> 1 model.fit(X_train, y_train, batch_size=128, epochs=30)
c:\Users\decil\anaconda3\lib\site-packages\keras\utils\traceback_utils.py in error_handler(*args, **kwargs)
68 # To get the full stack trace, call:
69 # tf.debugging.disable_traceback_filtering()
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
c:\Users\decil\anaconda3\lib\site-packages\tensorflow\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
50 try:
51 ctx.ensure_initialized()
---> 52 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
53 inputs, attrs, num_outputs)
54 except core._NotOkStatusException as e:
UnimplementedError: Graph execution error:
Detected at node 'sequential/Cast' defined at (most recent call last):
File "c:\Users\decil\anaconda3\lib\runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "c:\Users\decil\anaconda3\lib\runpy.py", line 87, in _run_code
...
File "c:\Users\decil\anaconda3\lib\site-packages\keras\engine\functional.py", line 762, in _conform_to_reference_input
tensor = tf.cast(tensor, dtype=ref_input.dtype)
Node: 'sequential/Cast'
Cast string to float is not supported
[[{{node sequential/Cast}}]] [Op:__inference_train_function_529]
Please help me in this issue
I see the problem is here Cast string to float is not supported basically you're trying to pass a string (maybe the labels?) when the model expects a number (float). But I don't have enough info to help you any further.
I am working on a project to recognise facial expressions and train the facial expression recognition model by using convolutional neural network(CNN). In this project, I am using Tensorflow 2.4 version and Python 3.8.8 version
The output:
Found 18282 images belonging to 5 classes.
Found 7178 images belonging to 7 classes.
Below is the error that I got:
2023-01-11 00:09:29.625187: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
c:/Users/Documents/Bachelor of Computer Science/FYP/Code/Program/Backup Test/TrainEmotionDetector.py:53: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
emotion_model_info = emotion_model.fit_generator(
Epoch 1/20
2023-01-11 00:09:31.756943: W tensorflow/tsl/framework/cpu_allocator_impl.cc:82] Allocation of 31719424 exceeds 10% of free system memory.
Traceback (most recent call last):
File "c:/Users/Documents/Bachelor of Computer Science/FYP/Code/Program/Backup Test/TrainEmotionDetector.py", line 53, in <module>
emotion_model_info = emotion_model.fit_generator(
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 2604, in fit_generator
return self.fit(
File "C:\Users\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\anaconda3\lib\site-packages\tensorflow\python\eager\execute.py", line 52, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Graph execution error:
Detected at node 'categorical_crossentropy/softmax_cross_entropy_with_logits' defined at (most recent call last):
File "c:/Users/Documents/Code/Program/Backup Test/TrainEmotionDetector.py", line 53, in <module>
emotion_model_info = emotion_model.fit_generator(
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 2604, in fit_generator
return self.fit(
File "C:\Users\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1650, in fit
tmp_logs = self.train_function(iterator)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1249, in train_function
return step_function(self, iterator)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1233, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1222, in run_step
outputs = model.train_step(data)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1024, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1082, in compute_loss
return self.compiled_loss(
File "C:\Users\anaconda3\lib\site-packages\keras\engine\compile_utils.py", line 265, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "C:\Users\anaconda3\lib\site-packages\keras\losses.py", line 152, in __call__
losses = call_fn(y_true, y_pred)
File "C:\Users\anaconda3\lib\site-packages\keras\losses.py", line 284, in call
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "C:\Users\anaconda3\lib\site-packages\keras\losses.py", line 2004, in categorical_crossentropy
return backend.categorical_crossentropy(
File "C:\Users\anaconda3\lib\site-packages\keras\backend.py", line 5538, in categorical_crossentropy
return tf.nn.softmax_cross_entropy_with_logits(
Node: 'categorical_crossentropy/softmax_cross_entropy_with_logits'
logits and labels must be broadcastable: logits_size=[64,7] labels_size=[64,5]
[[{{node categorical_crossentropy/softmax_cross_entropy_with_logits}}]] [Op:__inference_train_function_1181]
2023-01-11 00:09:32.976764: W tensorflow/core/kernels/data/generator_dataset_op.cc:108] Error occurred when finalizing GeneratorDataset iterator: FAILED_PRECONDITION: Python interpreter state is not initialized. The process may be terminated.
[[{{node PyFunc}}]]
Below is the full code:
# import required packages
import cv2
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Flatten
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
# Initialize image data generator with rescaling
train_data_gen = ImageDataGenerator(rescale=1./255)
validation_data_gen = ImageDataGenerator(rescale=1./255)
# Preprocess all test images
train_generator = train_data_gen.flow_from_directory(
'data/train',
target_size=(48, 48),
batch_size=64,
color_mode="grayscale",
class_mode='categorical')
# Preprocess all train images
validation_generator = validation_data_gen.flow_from_directory(
'data/test',
target_size=(48, 48),
batch_size=64,
color_mode="grayscale",
class_mode='categorical')
# create model structure
emotion_model = Sequential()
emotion_model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48, 48, 1)))
emotion_model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Dropout(0.25))
emotion_model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Dropout(0.25))
emotion_model.add(Flatten())
emotion_model.add(Dense(1024, activation='relu'))
emotion_model.add(Dropout(0.5))
emotion_model.add(Dense(7, activation='softmax'))
cv2.ocl.setUseOpenCL(False)
emotion_model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.0001, decay=1e-6), metrics=['accuracy'])
# Train the neural network/model
emotion_model_info = emotion_model.fit_generator(
train_generator,
steps_per_epoch=28709 // 64,
epochs=20,
validation_data=validation_generator,
validation_steps=7178 // 64)
# save model structure in jason file
model_json = emotion_model.to_json()
with open("model/emotion_model.json", "w") as json_file:
json_file.write(model_json)
# save trained model weight in .h5 file
emotion_model.save_weights('model/emotion_model.h5')
I have upgraded the tensorflow to the latest version by using pip install --upgrade tensorflow but nothing works. It is expected to write the saved model into the emotion_model.json and emotion_model.h5. Please help to solve this problem.
logits and labels must be broadcastable:
logits_size=[64,7] labels_size=[64,5]
Well, you kind of gave it away when you mentioned
the first set of images had just 5 distinct labels
while the next set had 7.
Prune two of those label classes and you'll be back in business.
I am new in computer vision, and I want to fine tune VGG16 with imagenet64 dataset. But after downloading imagenet64 dataset with below code and then fine tuning, I got an error. Please help me to solve this error. thank you very much.
import os
import shutil
from keras.preprocessing.image import ImageDataGenerator
from keras.applications import VGG16
from keras import models, layers, optimizers
import matplotlib.image as mpimg
import math
checkpoints = '/content/drive/MyDrive/colab_files/imagenet64/'
if not os.path.exists('imagenet64'):
if not os.path.exists(checkpoints + 'imagenet64.tar'):
print("Downloading archive...")
os.chdir(checkpoints)
!wget https://pjreddie.com/media/files/imagenet64.tar
os.chdir('/content/')
print("Copying to local runtime...")
shutil.copy(checkpoints + 'imagenet64.tar', './imagenet64.tar')
print("Uncompressing...")
!tar -xf imagenet64.tar
print("Data ready!")
train_dir='/content/imagenet64/train'
val_dir='/content/imagenet64/val'
batch_size=40
epochs=30
train_datagen= ImageDataGenerator()
val_datagen= ImageDataGenerator()
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size = (224,224),
batch_size = batch_size,
class_mode = 'categorical',
subset='training'
)
validation_generator = val_datagen.flow_from_directory(
val_dir,
target_size = (224,224),
batch_size = batch_size,
class_mode = 'categorical',
subset='training'
)
model_fine=model_n
model_fine.trainable = True
set_trainable = False
for layer in model_fine.layers:
if layer.name=='block5_conv1':
set_trainable = True
if set_trainable == True:
layer.trainable = True
else:
layer.trainable = False
model_fine.compile(loss='categorical_crossentropy', optimizer=optimizers.Nadam(lr=1e-5), metrics=['acc'])
h = model_fine.fit(
train_generator,
steps_per_epoch = train_generator.samples // batch_size,
epochs = epochs,
validation_data = validation_generator,
validation_steps = validation_generator.samples // batch_size,
)
I tried to fine tune VGG16 with imagenet64 dataset and I expect to fine tune model. But I got an error:
Found 1281166 images belonging to 1000 classes.
Found 50000 images belonging to 1000 classes.
/usr/local/lib/python3.8/dist-packages/keras/optimizers/optimizer_v2/nadam.py:78: UserWarning: The lr argument is deprecated, use learning_rate instead.
super(Nadam, self).init(name, **kwargs)
Epoch 1/30
StagingError Traceback (most recent call last)
in
34
35 model_fine.compile(loss='categorical_crossentropy', optimizer=optimizers.Nadam(lr=1e-5), metrics=['acc'])
---> 36 h = model_fine.fit(
37 train_generator,
38 steps_per_epoch = train_generator.samples // batch_size,
1 frames
/usr/local/lib/python3.8/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1125 except Exception as e: # pylint:disable=broad-except
1126 if hasattr(e, "ag_error_metadata"):
-> 1127 raise e.ag_error_metadata.to_exception(e)
1128 else:
1129 raise
StagingError: in user code:
File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 1051, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 1040, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 1030, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 893, in train_step
self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
File "/usr/local/lib/python3.8/dist-packages/keras/optimizers/optimizer_v2/optimizer_v2.py", line 539, in minimize
return self.apply_gradients(grads_and_vars, name=name)
File "/usr/local/lib/python3.8/dist-packages/keras/optimizers/optimizer_v2/optimizer_v2.py", line 646, in apply_gradients
self._create_all_weights(var_list)
File "/usr/local/lib/python3.8/dist-packages/keras/optimizers/optimizer_v2/optimizer_v2.py", line 860, in _create_all_weights
self._create_slots(var_list)
File "/usr/local/lib/python3.8/dist-packages/keras/optimizers/optimizer_v2/nadam.py", line 87, in _create_slots
var_dtype = var_list[0].dtype.base_dtype
IndexError: list index out of range
def tf_data(path, batch_size=32):
paths = tf.data.Dataset.list_files(path)
paths = paths.batch(64)
dataset = paths.map(prepare_data, tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
dataset = dataset.unbatch()
dataset = dataset.batch(batch_size)
dataset = dataset.repeat()
return dataset
data_train = tf_data('C:/Users/krajat/Desktop/New folder/FYP/New folder/output/train/*/*.jpg', batch_size)
data_test = tf_data('C:/Users/krajat/Desktop/New folder/FYP/New folder/output/test/*/*.jpg', batch_size)
data_train is of RepeatDataset Type.
history = model.fit(data_train,
epochs=5,
steps_per_epoch = p[0]//batch_size,
validation_data = data_test,
validation_steps = p[2]//batch_size,
callbacks=[cp, csv_logger, reduce_lr])
After running model.fit(), it throws an error :
Epoch 1/5
---------------------------------------------------------------------------
> UnknownError Traceback (most recent call last) ~\AppData\Local\Temp/ipykernel_20152/1127474368.py in <module>
----> 1 history = model.fit(data_train,
2 epochs=5,
3 steps_per_epoch = p[0]//batch_size,
4 validation_data = data_test,
5 validation_steps = p[2]//batch_size,
> ~\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py in error_handler(*args, **kwargs)
> ~\Anaconda3\lib\site-packages\tensorflow\python\eager\execute.py in
quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
52 try:
53 ctx.ensure_initialized()
---> 54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
> **UnknownError: Graph execution error:**
> **AttributeError: 'int' object has no attribute 'ndim'** Traceback (most recent call last):
> File "C:\Users\krajat\Anaconda3\lib\site-packages\tensorflow\python\ops\script_ops.py", line 271, in __call__
ret = func(*args)
> File "C:\Users\krajat\Anaconda3\lib\site-packages\tensorflow\python\autograph\impl\api.py", line 642, in wrapper
return func(*args, **kwargs)
> File "C:\Users\krajat\Anaconda3\lib\site-packages\numpy\lib\function_base.py", line 2113, in __call__
return self._vectorize_call(func=func, args=vargs)
> File "C:\Users\krajat\Anaconda3\lib\site-packages\numpy\lib\function_base.py", line 2187, in _vectorize_call
res = self._vectorize_call_with_signature(func, args)
> File "C:\Users\krajat\Anaconda3\lib\site-packages\numpy\lib\function_base.py", line 2242, in _vectorize_call_with_signature
_update_dim_sizes(dim_sizes, result, core_dims)
> File "C:\Users\krajat\Anaconda3\lib\site-packages\numpy\lib\function_base.py", line 1841, in _update_dim_sizes
if arg.ndim < num_core_dims:
> **AttributeError: 'int' object has no attribute 'ndim'**
[[{{node PyFunc}}]] [[IteratorGetNext]] [Op:__inference_train_function_400484]
I am doing Covid19 facemask detection project and when I train my image dataset I find a error which I can't understand. So, please help me to solve this problem. the error is given below.
Epoch 1/20
Traceback (most recent call last):
File "Mask_detection.py", line 108, in <module>
epochs=Epoch)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\engine\training.py", line 108, in _method_wrapper
return method(self, *args, **kwargs)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1098, in fit
tmp_logs = train_function(iterator)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\def_function.py", line 780, in __call__
result = self._call(*args, **kwds)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\def_function.py", line 840, in _call
return self._stateless_fn(*args, **kwds)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\function.py", line 2829, in __call__
return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\function.py", line 1848, in _filtered_call
cancellation_manager=cancellation_manager)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\function.py", line 1924, in _call_flat
ctx, args, cancellation_manager=cancellation_manager))
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\function.py", line 550, in call
ctx=ctx)
File "C:\Users\ABDEALIVORA\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\execute.py", line 60, in quick_execute
inputs, attrs, num_outputs)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Can not squeeze dim[2], expected a dimension of 1, got 10
[[node categorical_crossentropy/remove_squeezable_dimensions/Squeeze (defined at Mask_detection.py:108) ]] [Op:__inference_train_function_889]
Function call stack:
train_function
2020-09-28 12:37:31.761507: W tensorflow/core/kernels/data/generator_dataset_op.cc:103] Error occurred when finalizing GeneratorDataset iterator: Failed precondition: Python interpreter s
tate is not initialized. The process may be terminated.
[[{{node PyFunc}}]]
I provide my python code that helps you to understand the problem. My system is without GPU so this error is related with GPU.
DIRECTORY = 'images'
Categories = ["With_mask","Without_mask"]
batch_size= 10
num_class = 10
Epoch= 20
data = []
label =[]
for category in Categories:
path = os.path.join(DIRECTORY,category)
for img in os.listdir(path):
img_path = os.path.join(path,img)
image = load_img(img_path,target_size =(64,64))
image = img_to_array(image)
image = preprocess_input(image)
data.append(image)
label.append(category)
lb = LabelBinarizer()
label = lb.fit_transform(label)
label = to_categorical(label)
data = numpy.asarray(data,dtype = 'float32')
label = numpy.array(label)
print("////")
x_train,x_test,y_train,y_test = train_test_split(data,label,stratify=label,test_size=0.2,random_state=3)
y_train = utils.to_categorical(y_train, num_class)
y_test = utils.to_categorical(y_test, num_class)
mask_model = Sequential()
mask_model.add(Conv2D(32,kernel_size=(3,3),activation= 'linear',padding ="same",input_shape=(64,64,3)))
mask_model.add(LeakyReLU(alpha = 0.3))
mask_model.add(Conv2D(32,kernel_size=(3,3),activation= 'linear',padding ="same",input_shape=(64,64,3)))
mask_model.add(LeakyReLU(alpha=0.3))
mask_model.add(MaxPooling2D(pool_size =(2,2)))
mask_model.add(Conv2D(32,kernel_size=(3,3),activation= 'linear',padding ="same",input_shape=(64,64,3)))
mask_model.add(LeakyReLU(alpha=0.3))
mask_model.add(MaxPooling2D(pool_size =(2,2)))
mask_model.add(Flatten())
mask_model.add(Dense(128,activation = "linear"))
mask_model.add(LeakyReLU(alpha=0.3))
mask_model.add(Dense(10,activation= "softmax"))
mask_model.compile(optimizer ='adam',loss = 'categorical_crossentropy',metrics =['accuracy'] )
mask_model.summary()
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
datagen.fit(x_train)
mask_model.fit(datagen.flow(x_train,y_train,batch_size=10),
steps_per_epoch=len(x_train),
validation_data=(x_test, y_test),
validation_steps=len(x_test) // batch_size,
workers=0,
epochs=Epoch)
print("//")
for e in range(Epoch):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
mask_model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
break
mask_model.save("Mask_model/mask_model.h5")