Keras neural net assertion error - python

I am having trouble with a Keras assertion Error and would like to ask if anyone can please help:
I have ran Keras nn before with 2D convolution and never seen this error.
#-----------------BEGIN FUNCTION 1-----------------
def create_model(input_size1, num_labels, conv1_num_filters, conv1_filter_size1, conv2_num_filters, conv2_filter_size1, pool1_1, dropout1, pool2_1, dropout2, neurons1, reg_l2, neurons2, reg_l2_2):
model = Sequential()
model.add(Convolution1D(conv1_num_filters, conv1_filter_size1, init = 'glorot_uniform', border_mode='same',
input_shape=(1, input_size1),
activation = 'relu'))
model.add(MaxPooling1D(pool_length=(pool1_1),border_mode='same'))
model.add(BatchNormalization(epsilon=0.001, mode=0, axis=1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(Convolution1D(conv2_num_filters, conv2_filter_size1, init = 'glorot_uniform', activation = 'relu', border_mode='same'))
model.add(MaxPooling1D(pool_length=(pool1_1),border_mode='same'))
model.add(Dropout(dropout1))
model.add(Flatten())
model.add(BatchNormalization(epsilon=0.001, mode=0, axis=1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(Dense(neurons1, W_regularizer=l2(reg_l2), init = 'glorot_uniform', activation = 'relu'))
model.add(Dropout(dropout2))
model.add(BatchNormalization(epsilon=0.001, mode=0, axis=1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
model.add(Dense(neurons2, W_regularizer=l2(reg_l2_2), init = 'glorot_uniform', activation = 'relu'))
model.add(Dense(num_labels, init = 'glorot_uniform', activation = 'tanh'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) #0.01
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
print(model.summary())
#exit()
return model
#-----------------END FUNCTION 1-----------------
model2 = create_model(input_size1, num_labels, conv1_num_filters,
conv1_filter_size1, conv2_num_filters,
conv2_filter_size1, pool1_1, dropout1, pool2_1,
dropout2, neurons1, reg_l2, neurons2, reg_l2_2);
x_train_ex = np.expand_dims(x_train, 1)
x_test_ex = np.expand_dims(x_test, 1)
from keras.utils.np_utils import to_categorical
y_train_ex = to_categorical(y_train, len(np.unique(y_train)))
y_test_ex = to_categorical(y_test, len(np.unique(y_train)))
model2.fit(x_train_ex, y_train_ex, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(x_test_ex, y_test_ex)
)
I get an error saying:
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-41-c4780c441db5> in <module>()
26
27 model2.fit(x_train_ex, y_train_ex, batch_size=batch_size, nb_epoch=nb_epoch,
---> 28 verbose=1, validation_data=(x_test_ex, y_test_ex))
29 #print(model2.score(x_train_ex, y_train))
30 #print(model2.score(x_test_ex, y_test))
.........(Lots more error messages)
AssertionError:
Thank you very much!

Problem seems to have gone away when I upgraded from Keras 1.1.1 to 1.2.0. May be a version problem.

Related

Loss does not change during training of my model

I want to predict a time series using cnn-lstm model.This is my model:
def generate_model():
model = keras.models.Sequential([
Conv1D(64, 3, padding='causal', activation='relu', input_shape=(24, 20)),
BatchNormalization(),
Conv1D(64, 3, padding='causal', activation='relu'),
BatchNormalization(),
Conv1D(32, 3, padding='causal', activation='relu'),
MaxPool1D(3),
LSTM(100, dropout=0.2, return_sequences=True),
LSTM(50, dropout=0.3),
Dense(1, activation='relu')
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='mean_squared_error',
metrics=[tf.keras.metrics.MeanAbsoluteError(), tf.keras.metrics.RootMeanSquaredError(), RSquare()])
return model
Then I use this line of code to train my model:
history1 = model1.fit(X1_train, y1_train, epochs=200, batch_size=32, validation_data=(X1_test, y1_test), verbose=2, callbacks=callbacks)
But values of loss and metrics stays the same and does not change. This is how they look.
These are my callbacks, just in case:
from keras.callbacks import LearningRateScheduler
def decay_schedule(epoch, lr):
lr = lr - 0.0001
return lr
lr_scheduler = LearningRateScheduler(decay_schedule)
callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='max', min_delta=1e-3, patience=50)
callbacks=[lr_scheduler, callback]
Thank you in advance.

Keras, Google colab freezes on the last step of first epoch

The code :
from google.colab import drive
import tensorflow as tf
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Conv2D, MaxPool2D, Flatten
from tensorflow.python.keras.optimizer_v1 import Adam
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tensorflow.python.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
device_list = tf.test.gpu_device_name()
if device_list != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_list))
datagen_train = tf.keras.preprocessing.image.ImageDataGenerator()
datagen_val = tf.keras.preprocessing.image.ImageDataGenerator()
datagen_test = tf.keras.preprocessing.image.ImageDataGenerator()
size = 128
batch_size=20
tf.compat.v1.disable_eager_execution()
train_set = datagen_train.flow_from_directory("drive/MyDrive/train",
target_size = (size,size),
color_mode = "grayscale",
batch_size = batch_size,
class_mode='categorical',
shuffle=True)
val_set = datagen_val.flow_from_directory("drive/MyDrive/valid",
target_size = (size,size),
color_mode = "grayscale",
batch_size = batch_size,
class_mode='categorical',
shuffle=True)
test_set = datagen_train.flow_from_directory("drive/MyDrive/test",
target_size = (size,size),
color_mode = "grayscale",
batch_size = batch_size,
class_mode='categorical',
shuffle=True)
imgs,labels = next(test_set)
model = Sequential([
Conv2D(filters=64, kernel_size=(3,3), padding='same', activation='relu', input_shape=(128,128,1)),
MaxPool2D(pool_size =(2,2), strides=2),
Conv2D(filters=128, kernel_size=(3,3), padding='same', activation='relu'),
MaxPool2D(pool_size =(2,2), strides=2),
Conv2D(filters=256, kernel_size=(3,3), padding='same', activation='relu'),
MaxPool2D(pool_size =(2,2), strides=2),
Conv2D(filters=512, kernel_size=(3,3), padding='same', activation='relu'),
MaxPool2D(pool_size =(2,2), strides=2),
Flatten(),
Dense(units=256, activation='relu'),
Dense(units=512, activation='relu'),
Dense(units=2, activation='softmax')
])
checkpoint = ModelCheckpoint("./model.h5", monitor = 'val_acc', verbose=1, save_best_only = True, mode='max')
earlystopping = EarlyStopping(monitor='vall_loss', min_delta=0, patience=3, verbose=1,restore_best_weights= True)
reducelearningrate = ReduceLROnPlateau(monitor='val_loss', factor=0.2,patience=3,verbose=1,min_delta=0.0001)
callbacks_list = [earlystopping,checkpoint,reducelearningrate]
ep = 30
opt = Adam(lr=0.0001)
model.summary()
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x=train_set, epochs=ep,steps_per_epoch=601, validation_data = val_set, validation_steps = 209, verbose=1)
model.save('Drowsines_Detector2.h5')
model.evaluate(x = imgs, y = labels, verbose = 2)
The program on first run in google colab takes 1hour and 30 mins in first epoch. Then if gets stuck on the first epoch step 601/601. Then by cancelling it ang rerunning it, it completes the first epoch very fast, like in 15 or 16 secs. Then sometimes it gets stuck on step 600/601. And sometimes gets stuck on step 601/601. But it does not continue to second epoch. How can I fix this.

Keep getting NaNs value for scoring when tuning on KerasRegressor

I am trying to tune hyperparameter on the KerasRegressor
However, i only get the result of NaN's which is shown below, may i know what cause the issue?
everything works fine when i try to compile my model... but the scoring for the best parameters it always show NaNs, metrics that i used is RMSE
code snippet at below:
def create_model(optimizer,activation,lstm_unit_1,lstm_unit_2,lstm_unit_3, init='glorot_uniform'):
model = Sequential()
model.add(Conv1D(lstm_unit_1, kernel_size=1, activation=activation, input_shape = (trainX.shape[1], trainX.shape[2])))
model.add(GRU(lstm_unit_2, activation = activation, return_sequences=True, input_shape = (trainX.shape[1], trainX.shape[2])))
model.add(GRU(lstm_unit_3, activation = activation, return_sequences=True, input_shape = (trainX.shape[1], trainX.shape[2])))
model.add(Dense(units = 1))
model.add(Flatten())
model.compile(optimizer = optimizer, loss = 'mse', metrics = ['mean_squared_error'])
return model
model = tf.keras.wrappers.scikit_learn.KerasRegressor(build_fn = create_model,
epochs = 150,
verbose=False)
batch_size = [16,32,64,128]
lstm_unit_1 = [128,256,512]
lstm_unit_2 = lstm_unit_1.copy()
lstm_unit_3 = lstm_unit_1.copy()
optimizer = ['SGD','Adam','Adamax','RMSprop']
activation = ['relu','linear','sigmoid',]
param_grid = dict(lstm_unit_1=lstm_unit_1,
lstm_unit_2=lstm_unit_2,
lstm_unit_3=lstm_unit_3,
optimizer=optimizer,
activation=activation,
batch_size = batch_size)
warnings.filterwarnings("ignore")
random = RandomizedSearchCV(estimator=model, param_distributions=param_grid, n_jobs=-1, scoring='neg_mean_squared_error')
random_result = random.fit(trainX,trainY)
print(random_result.best_score_)
print(random_result.best_params_)

Keras Model asks for compiling even after compile call

I have a simple Keras model:
model_2 = Sequential()
model_2.add(Dense(32, input_shape=(500,)))
model_2.add(Dense(4))
#answer = concatenate([response, question_encoded])
model_1 = Sequential()
model_1.add(LSTM(32, dropout_U = 0.2, dropout_W = 0.2, return_sequences=True, input_shape=(None, 2048)))
model_1.add(LSTM(16, dropout_U = 0.2, dropout_W = 0.2, return_sequences=False))
#model.add(LSTM(16, return_sequences=False))
merged = Merge([model_1, model_2])
model = Sequential()
model.add(merged)
model.add(Dense(8, activation='softmax'))
#model.build()
#print(model.summary(90))
print("Compiled")
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
The code fails with the error when calling fit():
raise RuntimeError('You must compile your model before using it.')
RuntimeError: You must compile your model before using it.
Clearly, I have called compile. How could I resolve the error?
It looks like the problem is that you are creating 3 instances of the Sequential model but only compile the 3rd one (the merged).
It might be easier to use a different structure for a multi-modal network:
input_2 = Input(shape=(500,))
model_2 = Dense(32)(input_2 )
model_2 = Dense(4)(model_2)
input_1 = Input(shape=(None, 2048))
model_1 = LSTM(32, dropout_U = 0.2, dropout_W = 0.2, return_sequences=True)(input_1 )
model_1 = LSTM(16, dropout_U = 0.2, dropout_W = 0.2, return_sequences=False)(model_1)
merged = concatenate([model_2, model_1])
merged = Dense(8, activation='softmax')(merged)
model = Model(inputs=[input_2 , input_1], outputs=merged)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
Hope this helps!

error when checking target expected dense_2 to have shape (13 ) but got array with shape ( 40)

Hello I am writing a neuron for determining the figures counting
def get_image_size():
img = cv2.imread('gestures/0/100.jpg', 0)
return img.shape // 50*50
def get_num_of_classes():
return len(os.listdir('gestures/')) //13classes
image_x, image_y = get_image_size()
The CNN Model
def cnn_model():
num_of_classes = get_num_of_classes()
model = Sequential()
model.add(Conv2D(32, (5,5), input_shape=(image_x, image_y, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
model.add(Conv2D(64, (5,5), activation='relu'))
model.add(MaxPooling2D(pool_size=(5, 5), strides=(5, 5), padding='same'))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(num_of_classes, activation='softmax'))
sgd = optimizers.SGD(lr=1e-4)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
filepath="cnn_model_keras2.h5"
checkpoint1 = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
#checkpoint2 = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint1]
return model, callbacks_list
Trainning
def train():
with open("train_images", "rb") as f:
train_images = np.array(pickle.load(f))
with open("train_labels", "rb") as f:
train_labels = np.array(pickle.load(f), dtype=np.int32)
with open("test_images", "rb") as f:
test_images = np.array(pickle.load(f))
with open("test_labels", "rb") as f:
test_labels = np.array(pickle.load(f), dtype=np.int32)
train_images = np.reshape(train_images, (train_images.shape[0], image_x, image_y, 1))
test_images = np.reshape(test_images, (test_images.shape[0], image_x, image_y, 1))
train_labels = np_utils.to_categorical(train_labels)
test_labels = np_utils.to_categorical(test_labels)
model, callbacks_list = cnn_model()
model.fit(train_images, train_labels, validation_data=(test_images, test_labels), epochs=50, batch_size=100, callbacks=callbacks_list)
scores = model.evaluate(test_images, test_labels, verbose=0)
print("CNN Error: %.2f%%" % (100-scores[1]*100))
but i'm getting this error : ValueError: Error when checking target: expected dense_1 to have shape (13,) but got array with shape (40,) and i searched about some solutions but nothing work , if any one have an idea how to solve it please
The error says that the number of labels you have for training is not the same as you are predicting. Looking at the code, it seems like num_of_classes != train_labels.shape[1]. If you have just a vector of classes labels such as [3, 7, 1] then you can use loss='sparse_categorical_crossentropy' which will encode the targets when training for you.

Categories

Resources