I want to load the training model and resume it from last checkpoint any can help me with that?
I am using tensorflow 2.0 . I have low specs pc so I can't train my model at once.
import tensorflow as tf
from tensorflow.keras import models, layers
import matplotlib.pyplot as plt
from tensorflow.python.keras.metrics import acc
import datetime
from tensorflow.keras.callbacks import TensorBoard
IMAGE_SIZE = 224
CHANNELS = 3
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=10,
horizontal_flip=True
)
train_generator = train_datagen.flow_from_directory(
'data/train/',
color_mode="rgb",
target_size=(IMAGE_SIZE,IMAGE_SIZE),
batch_size=32,
class_mode="sparse",
)
print(train_generator.class_indices)
class_names = list(train_generator.class_indices.keys())
print(class_names)
validation_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=10,
horizontal_flip=True)
validation_generator = validation_datagen.flow_from_directory(
'data/validation/',
target_size=(IMAGE_SIZE,IMAGE_SIZE),
batch_size=32,
class_mode="sparse"
)
test_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=10,
horizontal_flip=True)
test_generator = test_datagen.flow_from_directory(
'data/test/',
target_size=(IMAGE_SIZE,IMAGE_SIZE),
batch_size=32,
class_mode="sparse"
)
input_shape = (IMAGE_SIZE, IMAGE_SIZE, CHANNELS)
n_classes = 2
model = models.Sequential([
layers.InputLayer(input_shape=input_shape),
layers.Conv2D(32, kernel_size = (3,3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, kernel_size = (3,3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, kernel_size = (3,3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(n_classes, activation='softmax'),
])
model.summary()
model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy']
)
import os
checkpoint_path = "teta/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path,save_weights_only=True,verbose=1)
history = model.fit(
train_generator,
steps_per_epoch=30,
batch_size=32,
validation_data=validation_generator,
validation_steps=22,
verbose=1,
callbacks=[cp_callback],
epochs=2,
)
I want to load the training model and resume it from last checkpoint any can help me with that?
I am using tensorflow 2.0 . I have low specs pc so I can't train my model at once.
I would recommend saving the whole model with model.save(*) and then loading it again with model.load(*). See this documentation for more information. In your case, you can just run:
model.load_weights('teta/your_checkpoint')
before calling model.fit(*) again.
Related
I want to predict a time series using cnn-lstm model.This is my model:
def generate_model():
model = keras.models.Sequential([
Conv1D(64, 3, padding='causal', activation='relu', input_shape=(24, 20)),
BatchNormalization(),
Conv1D(64, 3, padding='causal', activation='relu'),
BatchNormalization(),
Conv1D(32, 3, padding='causal', activation='relu'),
MaxPool1D(3),
LSTM(100, dropout=0.2, return_sequences=True),
LSTM(50, dropout=0.3),
Dense(1, activation='relu')
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='mean_squared_error',
metrics=[tf.keras.metrics.MeanAbsoluteError(), tf.keras.metrics.RootMeanSquaredError(), RSquare()])
return model
Then I use this line of code to train my model:
history1 = model1.fit(X1_train, y1_train, epochs=200, batch_size=32, validation_data=(X1_test, y1_test), verbose=2, callbacks=callbacks)
But values of loss and metrics stays the same and does not change. This is how they look.
These are my callbacks, just in case:
from keras.callbacks import LearningRateScheduler
def decay_schedule(epoch, lr):
lr = lr - 0.0001
return lr
lr_scheduler = LearningRateScheduler(decay_schedule)
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='max', min_delta=1e-3, patience=50)
callbacks=[lr_scheduler, callback]
Thank you in advance.
The code :
from google.colab import drive
import tensorflow as tf
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Conv2D, MaxPool2D, Flatten
from tensorflow.python.keras.optimizer_v1 import Adam
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tensorflow.python.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
device_list = tf.test.gpu_device_name()
if device_list != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_list))
datagen_train = tf.keras.preprocessing.image.ImageDataGenerator()
datagen_val = tf.keras.preprocessing.image.ImageDataGenerator()
datagen_test = tf.keras.preprocessing.image.ImageDataGenerator()
size = 128
batch_size=20
tf.compat.v1.disable_eager_execution()
train_set = datagen_train.flow_from_directory("drive/MyDrive/train",
target_size = (size,size),
color_mode = "grayscale",
batch_size = batch_size,
class_mode='categorical',
shuffle=True)
val_set = datagen_val.flow_from_directory("drive/MyDrive/valid",
target_size = (size,size),
color_mode = "grayscale",
batch_size = batch_size,
class_mode='categorical',
shuffle=True)
test_set = datagen_train.flow_from_directory("drive/MyDrive/test",
target_size = (size,size),
color_mode = "grayscale",
batch_size = batch_size,
class_mode='categorical',
shuffle=True)
imgs,labels = next(test_set)
model = Sequential([
Conv2D(filters=64, kernel_size=(3,3), padding='same', activation='relu', input_shape=(128,128,1)),
MaxPool2D(pool_size =(2,2), strides=2),
Conv2D(filters=128, kernel_size=(3,3), padding='same', activation='relu'),
MaxPool2D(pool_size =(2,2), strides=2),
Conv2D(filters=256, kernel_size=(3,3), padding='same', activation='relu'),
MaxPool2D(pool_size =(2,2), strides=2),
Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'),
MaxPool2D(pool_size =(2,2), strides=2),
Flatten(),
Dense(units=256, activation='relu'),
Dense(units=512, activation='relu'),
Dense(units=2, activation='softmax')
])
checkpoint = ModelCheckpoint("./model.h5", monitor = 'val_acc', verbose=1, save_best_only = True, mode='max')
earlystopping = EarlyStopping(monitor='vall_loss', min_delta=0, patience=3, verbose=1,restore_best_weights= True)
reducelearningrate = ReduceLROnPlateau(monitor='val_loss', factor=0.2,patience=3,verbose=1,min_delta=0.0001)
callbacks_list = [earlystopping,checkpoint,reducelearningrate]
ep = 30
opt = Adam(lr=0.0001)
model.summary()
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x=train_set, epochs=ep,steps_per_epoch=601, validation_data = val_set, validation_steps = 209, verbose=1)
model.save('Drowsines_Detector2.h5')
model.evaluate(x = imgs, y = labels, verbose = 2)
The program on first run in google colab takes 1hour and 30 mins in first epoch. Then if gets stuck on the first epoch step 601/601. Then by cancelling it ang rerunning it, it completes the first epoch very fast, like in 15 or 16 secs. Then sometimes it gets stuck on step 600/601. And sometimes gets stuck on step 601/601. But it does not continue to second epoch. How can I fix this.
I am trying to take my CNN model and add a LSTM layer. It would be beneficial to do so given my images are ordered in time series. I've loaded each of my images using ImageDataGenerator and flow_from_directory. I am unable to add a TimeDistributed layer to make my model work. Any help would be greatly appreciated!
model = Sequential()
model.add(TimeDistributed(Conv2D(16, (3,3), padding='same', strides=(2,2),
activation='relu', input_shape = (224,224,3))))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
model.add(Dropout(0.5))
model.add(TimeDistributed(Conv2D(32, (3,3), padding='same', strides=(2,2),
activation='relu')))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
model.add(Dropout(0.5))
model.add(TimeDistributed(Conv2D(64, (3,3), padding='same', strides=(2,2),
activation='relu')))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
model.add(Dropout(0.5))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(units=128, return_sequences=False))
model.add(LSTM(units=64, return_sequences=False))
model.add(Dense(32))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
filenames = os.listdir("train/")
categories = []
for f_name in filenames:
decision = f_name.split('.')[0]
if decision == 'cat':
categories.append(0)
if decision == 'dog':
categories.append(1)
dataset = pd.DataFrame({
'filename':filenames,
'category':categories
})
dataset["category"] = dataset["category"].replace({0: 'cat', 1: 'dog'})
train_df,validate_df = train_test_split(dataset, test_size=0.20,random_state=42)
train_df = train_df.reset_index(drop=True)
validate_df = validate_df.reset_index(drop=True)
total_train = train_df.shape[0]
total_validate = validate_df.shape[0]
train_datagen = ImageDataGenerator(
rescale=1./255,
horizontal_flip=False)
train_generator = train_datagen.flow_from_directory(train_df,
"train/",
x_col='filename',
y_col='category',
target_size=img_size,
color_mode='rgb',
class_mode='categorical',
shuffle=True,
batch_size=batch_size)
validation_datagen = ImageDataGenerator(
rescale=1./255,
horizontal_flip=False)
validation_generator = validation_datagen.flow_from_directory(
validate_df,
"train/",
x_col='filename',
y_col='category',
target_size=img_size,
color_mode='rgb',
class_mode='categorical',
shuffle=False,
batch_size=batch_size
)
model.fit_generator(
train_generator,
epochs=epochs,
validation_data=validation_generator,
validation_steps=total_validate//batch_size,
steps_per_epoch=total_train//batch_size,
callbacks=callbacks,
class_weight=class_weight
)
I'm trying to use ImageDataGenerator() for my image datasets.
Here is my image augmentation code:
batch_size = 16
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
# Use flow from dataframe
train_generator = train_datagen.flow_from_dataframe(
dataframe=train,
directory="data/train",
x_col="id",
y_col=["not_ready", "ready"],
target_size=(300, 300),
batch_size=batch_size,
class_mode="raw",
validate_filenames=False)
validation_generator = test_datagen.flow_from_dataframe(
dataframe=validation,
directory="data/validation",
x_col="id",
y_col=["not_ready", "ready"],
target_size=(300, 300),
batch_size=batch_size,
class_mode="raw",
validate_filenames=False)
Then use that plug into my model:
model = Sequential([
layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=(300, 300, 1)),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Dropout(0.5),
layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu'),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Dropout(0.5),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dropout(0.5),
layers.Dense(32, activation='relu'),
layers.Dropout(0.5),
layers.Dense(2, activation='sigmoid')
])
Use EarlyStopping:
early_stopping = EarlyStopping(monitor='val_loss',mode='min',verbose=1,patience=10, restore_best_weights=True)
Compile and Fit the model:
model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(
train_generator,
steps_per_epoch=train_generator.n // batch_size,
epochs=100,
validation_data=validation_generator,
validation_steps=validation_generator.n // batch_size,
callbacks=[early_stopping])
That is when the code crash, and gives this error message.
/AppleInternal/Library/BuildRoots/8d3bda53-8d9c-11ec-abd7-fa6a1964e34e/Library/Caches/com.apple.xbs/Sources/MetalPerformanceShaders/MPSNDArray/Kernels/MPSNDArrayConvolution.mm:2317: failed assertion `output channels should be divisible by group'
I try to change the output neurons but that doesn't work. I don't know what to do anymore. Please help me. Thank you so much.
Got it. Because I use grayscale images. So I have to add color_mode keyword argument in both flow_from_dataframe() and set it equal to "grayscale"
train_generator = train_datagen.flow_from_dataframe(
dataframe=train,
directory="data/train",
x_col="id",
y_col=["not_ready", "ready"],
target_size=(300, 300),
batch_size=batch_size,
class_mode="raw",
color_mode="grayscale")
validation_generator = test_datagen.flow_from_dataframe(
dataframe=validation,
directory="data/validation",
x_col="id",
y_col=["not_ready", "ready"],
target_size=(300, 300),
batch_size=batch_size,
class_mode="raw",
color_mode="grayscale")
I want to work this project on github : https://github.com/kesaroid/Glaucoma-Detection
And here is a CNN.py file to create keras h5 file:
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from os import path, environ
from imgaug import augmenters as iaa
from keras import backend as K
from keras import optimizers
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras.layers import BatchNormalization, Activation
from keras.layers import Input, Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
img_width, img_height = 256, 256
channels = 3
input_shape = channels, img_width, img_height if K.image_data_format() == 'channels_first' \
else img_width, img_height, channels
train_data_dir = path.join('data', 'train')
validation_data_dir = path.join('data', 'validation')
nb_train_samples = int(environ.get('TRAINING_SAMPLES', 20))
nb_validation_samples = int(environ.get('VALIDATION_SAMPLES', 20))
batch_size = 16
epochs = 100
input_tensor = Input(shape=input_shape)
block1 = BatchNormalization(name='norm_0')(input_tensor)
# Block 1
block1 = Conv2D(8, (3, 3), name='conv_11', activation='relu')(block1)
block1 = Conv2D(16, (3, 3), name='conv_12', activation='relu')(block1)
block1 = Conv2D(32, (3, 3), name='conv_13', activation='relu')(block1)
block1 = Conv2D(64, (3, 3), name='conv_14', activation='relu')(block1)
block1 = MaxPooling2D(pool_size=(2, 2))(block1)
block1 = BatchNormalization(name='norm_1')(block1)
block1 = Conv2D(16, 1)(block1)
# Block 2
block2 = Conv2D(32, (3, 3), name='conv_21', activation='relu')(block1)
block2 = Conv2D(64, (3, 3), name='conv_22', activation='relu')(block2)
block2 = Conv2D(64, (3, 3), name='conv_23', activation='relu')(block2)
block2 = Conv2D(128, (3, 3), name='conv_24', activation='relu')(block2)
block2 = MaxPooling2D(pool_size=(2, 2))(block2)
block2 = BatchNormalization(name='norm_2')(block2)
block2 = Conv2D(64, 1)(block2)
# Block 3
block3 = Conv2D(64, (3, 3), name='conv_31', activation='relu')(block2)
block3 = Conv2D(128, (3, 3), name='conv_32', activation='relu')(block3)
block3 = Conv2D(128, (3, 3), name='conv_33', activation='relu')(block3)
block3 = Conv2D(64, (3, 3), name='conv_34', activation='relu')(block3)
block3 = MaxPooling2D(pool_size=(2, 2))(block3)
block3 = BatchNormalization(name='norm_3')(block3)
# Block 4
block4 = Conv2D(64, (3, 3), name='conv_41', activation='relu')(block3)
block4 = Conv2D(32, (3, 3), name='conv_42', activation='relu')(block4)
block4 = Conv2D(16, (3, 3), name='conv_43', activation='relu')(block4)
block4 = Conv2D(8, (2, 2), name='conv_44', activation='relu')(block4)
block4 = MaxPooling2D(pool_size=(2, 2))(block4)
block4 = BatchNormalization(name='norm_4')(block4)
block4 = Conv2D(2, 1)(block4)
block5 = GlobalAveragePooling2D()(block4)
output = Activation('softmax')(block5)
model = Model(inputs=[input_tensor], outputs=[output])
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),
metrics=['accuracy'])
# Initiate the train and test generators with data Augmentation
sometimes = lambda aug: iaa.Sometimes(0.6, aug)
seq = iaa.Sequential([
iaa.GaussianBlur(sigma=(0, 1.0)),
iaa.Sharpen(alpha=1, lightness=0),
iaa.CoarseDropout(p=0.1, size_percent=0.15),
sometimes(iaa.Affine(
scale={'x': (0.8, 1.2), 'y': (0.8, 1.2)},
translate_percent={'x': (-0.2, 0.2), 'y': (-0.2, 0.2)},
rotate=(-30, 30),
shear=(-16, 16)))
])
train_datagen = ImageDataGenerator(
rescale=1. / 255,
preprocessing_function=seq.augment_image,
horizontal_flip=True,
vertical_flip=True)
test_datagen = ImageDataGenerator(
rescale=1. / 255,
horizontal_flip=True,
vertical_flip=True)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
class_mode='categorical')
checkpoint = ModelCheckpoint('f1.h5', monitor='acc', verbose=1, save_best_only=True, save_weights_only=False,
mode='auto', period=1)
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.1, patience=2, verbose=0, mode='auto', cooldown=0, min_lr=0)
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size,
callbacks=[checkpoint, reduce_lr]
)
But i get the error message:
C:\anaconda3\envs\tf2.7\python.exe
C:/Users/yigit/Documents/GitHub/Glaucoma-Detection/CNN.py
Traceback (most recent call last):
File "C:\Users\yigit\Documents\GitHub\Glaucoma-Detection\CNN.py", line 38, in
block1 = MaxPooling2D(pool_size=(2, 2))(block1)
File "C:\anaconda3\envs\tf2.7\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\anaconda3\envs\tf2.7\lib\site-packages\keras\engine\input_spec.py", line 213, in assert_input_compatibility
raise ValueError(f'Input {input_index} of layer "{layer_name}" '
ValueError: Input 0 of layer "max_pooling2d" is incompatible with the layer: expected ndim=4, found ndim=6. Full shape received: (None, 3, 256, 248, 248, 64)
Process finished with exit code 1
What should i do guys ?
tf.keras.layers.MaxPool2D expects input shape 4D tensor with shape (batch_size, rows, cols, channels). In this case you are adding extra dimension. Make sure your inputs are of 4D.
Working sample code
import tensorflow as tf
input_shape = (4, 28, 28, 3)
x = tf.random.normal(input_shape)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=input_shape[1:]))
model.add(tf.keras.layers.Conv2D(filters=32,
kernel_size=2,
activation='relu'))
model.add(tf.keras.layers.MaxPool2D(pool_size=2))
output = model(x)
Output
TensorShape([4, 13, 13, 32])