Few errors occured when training the facial expression recognition model - python

I am working on a project to recognise facial expressions and train the facial expression recognition model by using convolutional neural network(CNN). In this project, I am using Tensorflow 2.4 version and Python 3.8.8 version
The output:
Found 18282 images belonging to 5 classes.
Found 7178 images belonging to 7 classes.
Below is the error that I got:
2023-01-11 00:09:29.625187: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
c:/Users/Documents/Bachelor of Computer Science/FYP/Code/Program/Backup Test/TrainEmotionDetector.py:53: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
emotion_model_info = emotion_model.fit_generator(
Epoch 1/20
2023-01-11 00:09:31.756943: W tensorflow/tsl/framework/cpu_allocator_impl.cc:82] Allocation of 31719424 exceeds 10% of free system memory.
Traceback (most recent call last):
File "c:/Users/Documents/Bachelor of Computer Science/FYP/Code/Program/Backup Test/TrainEmotionDetector.py", line 53, in <module>
emotion_model_info = emotion_model.fit_generator(
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 2604, in fit_generator
return self.fit(
File "C:\Users\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\anaconda3\lib\site-packages\tensorflow\python\eager\execute.py", line 52, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Graph execution error:
Detected at node 'categorical_crossentropy/softmax_cross_entropy_with_logits' defined at (most recent call last):
File "c:/Users/Documents/Code/Program/Backup Test/TrainEmotionDetector.py", line 53, in <module>
emotion_model_info = emotion_model.fit_generator(
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 2604, in fit_generator
return self.fit(
File "C:\Users\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1650, in fit
tmp_logs = self.train_function(iterator)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1249, in train_function
return step_function(self, iterator)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1233, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1222, in run_step
outputs = model.train_step(data)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1024, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "C:\Users\anaconda3\lib\site-packages\keras\engine\training.py", line 1082, in compute_loss
return self.compiled_loss(
File "C:\Users\anaconda3\lib\site-packages\keras\engine\compile_utils.py", line 265, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "C:\Users\anaconda3\lib\site-packages\keras\losses.py", line 152, in __call__
losses = call_fn(y_true, y_pred)
File "C:\Users\anaconda3\lib\site-packages\keras\losses.py", line 284, in call
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "C:\Users\anaconda3\lib\site-packages\keras\losses.py", line 2004, in categorical_crossentropy
return backend.categorical_crossentropy(
File "C:\Users\anaconda3\lib\site-packages\keras\backend.py", line 5538, in categorical_crossentropy
return tf.nn.softmax_cross_entropy_with_logits(
Node: 'categorical_crossentropy/softmax_cross_entropy_with_logits'
logits and labels must be broadcastable: logits_size=[64,7] labels_size=[64,5]
[[{{node categorical_crossentropy/softmax_cross_entropy_with_logits}}]] [Op:__inference_train_function_1181]
2023-01-11 00:09:32.976764: W tensorflow/core/kernels/data/generator_dataset_op.cc:108] Error occurred when finalizing GeneratorDataset iterator: FAILED_PRECONDITION: Python interpreter state is not initialized. The process may be terminated.
[[{{node PyFunc}}]]
Below is the full code:
# import required packages
import cv2
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Flatten
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
# Initialize image data generator with rescaling
train_data_gen = ImageDataGenerator(rescale=1./255)
validation_data_gen = ImageDataGenerator(rescale=1./255)
# Preprocess all test images
train_generator = train_data_gen.flow_from_directory(
'data/train',
target_size=(48, 48),
batch_size=64,
color_mode="grayscale",
class_mode='categorical')
# Preprocess all train images
validation_generator = validation_data_gen.flow_from_directory(
'data/test',
target_size=(48, 48),
batch_size=64,
color_mode="grayscale",
class_mode='categorical')
# create model structure
emotion_model = Sequential()
emotion_model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48, 48, 1)))
emotion_model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Dropout(0.25))
emotion_model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Dropout(0.25))
emotion_model.add(Flatten())
emotion_model.add(Dense(1024, activation='relu'))
emotion_model.add(Dropout(0.5))
emotion_model.add(Dense(7, activation='softmax'))
cv2.ocl.setUseOpenCL(False)
emotion_model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.0001, decay=1e-6), metrics=['accuracy'])
# Train the neural network/model
emotion_model_info = emotion_model.fit_generator(
train_generator,
steps_per_epoch=28709 // 64,
epochs=20,
validation_data=validation_generator,
validation_steps=7178 // 64)
# save model structure in jason file
model_json = emotion_model.to_json()
with open("model/emotion_model.json", "w") as json_file:
json_file.write(model_json)
# save trained model weight in .h5 file
emotion_model.save_weights('model/emotion_model.h5')
I have upgraded the tensorflow to the latest version by using pip install --upgrade tensorflow but nothing works. It is expected to write the saved model into the emotion_model.json and emotion_model.h5. Please help to solve this problem.

logits and labels must be broadcastable:
logits_size=[64,7] labels_size=[64,5]
Well, you kind of gave it away when you mentioned
the first set of images had just 5 distinct labels
while the next set had 7.
Prune two of those label classes and you'll be back in business.

Related

bad zip file error while loading the trained deep learning model

I trained my model in colab and save it with torch.save('model.pth')
and then when i wanted to load it in my pycharm i get this error:
File "C:\Users\Amin\AppData\Local\Programs\Python\Python310\lib\zipfile.py", line 1334, in _RealGetContents
raise BadZipFile("File is not a zip file")
zipfile.BadZipFile: File is not a zip file`
can anyone help me to fix this error please
i could not find any solution for it on internet
i used tensorflow for training my model and used these imports :
from tensorflow.keras.preprocessing.text import text_to_word_sequence
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import Conv1D, GlobalMaxPooling1D
my program can load the tokenizer that i have built but it wont load the model
this is my model :
max_features = 1000
maxlen = 650
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
model5 = Sequential()
model5.add(Embedding(max_features, embedding_dims ))
model5.add(Dropout(0.2))
model5.add(Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1))
model5.add(GlobalMaxPooling1D())
model5.add(Dense(hidden_dims)) model5.add(Dropout(0.2)) model5.add(Activation('relu'))
model5.add(Dense(5)) model5.add(Activation('softmax'))
model5.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model5.fit(X_train, y_train,
batch_size=32,
epochs=14,
validation_data=(X_test, y_test))
torch.save(model5,'model.pth')
i loaded my model in colab and it was fine but it didn't work in pycharm
relative_model_path = "model.pth"
full_model_path = os.path.join(absolute_path, relative_model_path)
model = torch.load(full_model_path)
Traceback (most recent call last):
File "C:\\Users\\Amin\\PycharmProjects\\src\\model\\categorizer.py", line 25, in \<module\>
model = torch.load(full_model_path)
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\torch\\serialization.py", line 789, in load
return \_load(opened_zipfile, map_location, pickle_module, \*\*pickle_load_args)
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\torch\\serialization.py", line 1131, in \_load
result = unpickler.load()
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\keras\\saving\\pickle_utils.py", line 48, in deserialize_model_from_bytecode
raise e
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\keras\\saving\\pickle_utils.py", line 46, in deserialize_model_from_bytecode
model = saving_lib.load_model(filepath)
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\keras\\saving\\experimental\\saving_lib.py", line 196, in load_model
raise e
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\keras\\saving\\experimental\\saving_lib.py", line 173, in load_model
with zipfile.ZipFile(filepath, "r") as zipfile_to_load:
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\zipfile.py", line 1267, in __init__
self.\_RealGetContents()
File "C:\\Users\\Amin\\AppData\\Local\\Programs\\Python\\Python310\\lib\\zipfile.py", line 1334, in \_RealGetContents
raise BadZipFile("File is not a zip file")`your text`
zipfile.BadZipFile: File is not a zip file
I just needed to save the model with
keras.save('model')
not torch because the model was built in tensorflow keras

ValueError of Input and Output values during LSTM training

I was trying to implement a basic LSTM network using some random data, and I got the following error during execution of the code
'''
Traceback (most recent call last):
File "C:/Users/dell/Desktop/test run for LSTM thingy.py", line 39, in <module>
history = model.fit(x_train, y_train, epochs=1, batch_size=16, verbose=1)
File "C:\Users\dell\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\dell\AppData\Local\Temp\__autograph_generated_fileu1zdna1b.py", line 15, in tf__train_function
retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
ValueError: in user code:
File "C:\Users\dell\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\engine\training.py", line 1051, in train_function *
return step_function(self, iterator)
File "C:\Users\dell\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\engine\training.py", line 1040, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\dell\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\engine\training.py", line 1030, in run_step **
outputs = model.train_step(data)
File "C:\Users\dell\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\engine\training.py", line 890, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "C:\Users\dell\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\engine\training.py", line 948, in compute_loss
return self.compiled_loss(
File "C:\Users\dell\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\engine\compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "C:\Users\dell\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\losses.py", line 139, in __call__
losses = call_fn(y_true, y_pred)
File "C:\Users\dell\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\losses.py", line 243, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "C:\Users\dell\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\losses.py", line 1787, in categorical_crossentropy
return backend.categorical_crossentropy(
File "C:\Users\dell\AppData\Local\Programs\Python\Python310\lib\site-packages\keras\backend.py", line 5119, in categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
ValueError: Shapes (None, 133, 1320) and (None, 133, 5) are incompatible
'''
This is how my code looks like at the moment:
import tensorflow as tf
x_train = tf.random.normal((28, 133, 1320))
y_train = tf.random.normal((28, 133, 1320))
model = tf.keras.Sequential()
model.add(tf.keras.layers.LSTM(5,activation='tanh',recurrent_activation='sigmoid', input_shape=(x_train.shape[1],x_train.shape[2]),return_sequences=True))
model.add(tf.keras.layers.Dense(5, activation= "softmax"))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = model.fit(x_train, y_train, epochs=1, batch_size=16, verbose=1)
Could anyone help me in debugging this code, as I need to use something similar in another side project which involves both X and Y input data of similar shapes, and I was not able to find a solution to the problem. I know it has something to do with the loss function, but thats all.
Shape of Y - (28, 133, 1320)
Shape of X - (28, 133, 1320)
Categories needed - 5
You are currently trying to do categorical classification with 5 classes but y has the shape (28, 133, 1320). It does not work like that. Also, when you use categorical_crossentropy, you need one-hot-encoded labels. Here is a working example as orientation:
import tensorflow as tf
x_train = tf.random.normal((28, 133, 1320))
# one-hot encoded labels
y_train = tf.keras.utils.to_categorical(tf.random.uniform((28,), maxval=5, dtype=tf.int32))
model = tf.keras.Sequential()
model.add(tf.keras.layers.LSTM(5,activation='tanh',recurrent_activation='sigmoid', input_shape=(x_train.shape[1],x_train.shape[2]), return_sequences=False))
model.add(tf.keras.layers.Dense(5, activation= "softmax"))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = model.fit(x_train, y_train, epochs=1, batch_size=16, verbose=1)

Using Keras Without GPU

I want to train a Neural Network using Keras but when I want to build the model I get the following error
2022-03-14 09:38:10.526372: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected
2022-03-14 09:38:10.526465: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (HSKP02): /proc/driver/nvidia/version does not exist
2022-03-14 09:38:10.527391: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
I tried to solve this error by writing
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
before importing Keras but I still get this error. After this error my code fits the data with the validation set with model.fit() but I get another error
Traceback (most recent call last):
File "shallownet_ex.py", line 44, in <module>
H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=100, verbose=1)
File ".../venv/lib/python3.8/site-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File ".../venv/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 1147, in autograph_handler
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 860, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File ".../venv/lib/python3.8/site-packages/keras/engine/training.py", line 918, in compute_loss
return self.compiled_loss(
File ".../venv/lib/python3.8/site-packages/keras/engine/compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File ".../venv/lib/python3.8/site-packages/keras/losses.py", line 141, in __call__
losses = call_fn(y_true, y_pred)
File ".../venv/lib/python3.8/site-packages/keras/losses.py", line 245, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File ".../venv/lib/python3.8/site-packages/keras/losses.py", line 1789, in categorical_crossentropy
return backend.categorical_crossentropy(
File ".../venv/lib/python3.8/site-packages/keras/backend.py", line 5083, in categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
ValueError: Shapes (None, 4) and (None, 3) are incompatible
The code I'm using looks like this
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = "-1"
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from preprocesing import ImageToArrayPreprocessor, SimplePreprocesssor
from datasets import SimpleDatasetLoader
from neuralnetworks.conv import ShallowNet
from keras.optimizers import gradient_descent_v2
from imutils import paths
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to input dataset")
args = vars(ap.parse_args())
imagePaths = list(paths.list_images(args["dataset"]))
sp = SimplePreprocesssor(32, 32)
iap = ImageToArrayPreprocessor()
sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42)
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)
opt = gradient_descent_v2.SGD(learning_rate=0.005)
model = ShallowNet.build(width=32, height=32, depth=3, classes=3)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['acc'])
H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=100, verbose=1)
The simpleloader is a function that just loads the images and the simplepreprocesor just resizes the images and I think the error is inside the shallownet.py that looks like this
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.core import Activation, Flatten, Dense
from keras import backend as K
class ShallowNet():
#staticmethod
def build(width, height, depth, classes):
model = Sequential()
inputShape = (height, width, depth)
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
model.add(Conv2D(32, (3, 3), padding="same", input_shape=inputShape))
model.add(Activation("relu"))
model.add(Flatten())
model.add(Dense(classes))
model.add(Activation("softmax"))
return model
What I deduce is that as my computer doesn't have a GPU I can't perform the training and then I can't fit the model. There is a way to perform this training?

Tensorflow TypeError: Can not convert a NoneType into a Tensor or Operation

I am learning TensorFlow and was going through this step-by-step guide. The below code is the exact same as on the website. However, when running it, I get an error when trying to fit the model. The full traceback I get is as follows:
Traceback (most recent call last):
File "C:\users\name\desktop\python ml tutorial\embedding.py", line 49, in <module>
model.fit(x=padded_docs, y=labels, epochs=50, verbose=0)
File "C:\Users\name\AppData\Local\Programs\Python\Python37\lib\site-packages\keras\engine\training.py", line 1213, in fit
self._make_train_function()
File "C:\Users\name\AppData\Local\Programs\Python\Python37\lib\site-packages\keras\engine\training.py", line 316, in _make_train_function
loss=self.total_loss)
File "C:\Users\name\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\optimizer_v2\optimizer_v2.py", line 506, in get_updates
return [self.apply_gradients(grads_and_vars)]
File "C:\Users\name\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\optimizer_v2\optimizer_v2.py", line 441, in apply_gradients
kwargs={"name": name})
File "C:\Users\name\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\distribute\distribute_lib.py", line 1917, in merge_call
return self._merge_call(merge_fn, args, kwargs)
File "C:\Users\name\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\distribute\distribute_lib.py", line 1924, in _merge_call
return merge_fn(self._strategy, *args, **kwargs)
File "C:\Users\name\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\optimizer_v2\optimizer_v2.py", line 494, in _distributed_apply
with ops.control_dependencies(update_ops):
File "C:\Users\name\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\framework\ops.py", line 5257, in control_dependencies
return get_default_graph().control_dependencies(control_inputs)
File "C:\Users\name\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\framework\func_graph.py", line 356, in control_dependencies
return super(FuncGraph, self).control_dependencies(filtered_control_inputs)
File "C:\Users\name\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\framework\ops.py", line 4691, in control_dependencies
c = self.as_graph_element(c)
File "C:\Users\name\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\framework\ops.py", line 3610, in as_graph_element
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
File "C:\Users\name\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\framework\ops.py", line 3699, in _as_graph_element_locked
(type(obj).__name__, types_str))
TypeError: Can not convert a NoneType into a Tensor or Operation.
And the full code is below:
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
# define document
docs = ['Well done!',
'Good work',
'Great effort',
'nice work',
'Excellent!',
'Weak',
'Poor effort!',
'not good',
'poor work',
'Could have done better.']
# define class labels
labels = array([1,1,1,1,1,0,0,0,0,0])
# integer-encode the documents
vocab_size = 50
encoded_docs = [one_hot(d, vocab_size) for d in docs]
print(encoded_docs)
# padding
max_length = 4
padded_docs = pad_sequences(encoded_docs, maxlen = max_length, padding = 'post')
print(padded_docs)
# define model
model = Sequential()
model.add(Embedding(vocab_size, 8, input_length=max_length))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
# compile the model
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics=['accuracy'])
# summarize
print(model.summary())
# fit the model
model.fit(x=padded_docs, y=labels, epochs=50, verbose=0)
# evaluate the model
loss, accuracy = model.evaluate(x=padded_docs, y=labels, verbose=0)
print("Accuracy: {}".format(accuracy))
What's going on here? The article was originally written back in 2017 but its last revision and update was just a week ago. I imagine they are constantly tweaking TensorFlow since it is still state-of-the-art and needs a lot of improvement.
Any ideas on how to circumvent this?
Edit:
I began trying to figure out where the script could have gone wrong. I will be listing what I found here, hopefully it will help us spot something:
I found that in ops.py's control_dependencies() function, control_inputs parameter has the following values: Tensor("Adam/gradients/gradients/loss/dense_1_loss/binary_crossentropy/logistic_loss_grad/Reshape_1:0", shape=(None, 1), dtype=float32), Tensor("Adam/gradients/gradients/loss/dense_1_loss/binary_crossentropy/logistic_loss/Log1p_grad/mul:0", shape=(None, 1), dtype=float32), and None. When it becomes None, the program crashes.

Trying to fix the construction of a neural network (error message: negative dimension?)

This is a model based on the description on page 12, section B.3 of the paper https://arxiv.org/pdf/1609.04836.pdf
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
img_size = (3, 32, 32)
...
def shallownet(nb_classes):
global img_size
model = Sequential()
model.add(Conv2D(64, (5, 5), input_shape=img_size, data_format='channels_first'))
model.add(BatchNormalization(axis=1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same'))
model.add(Conv2D(64, (5, 5), padding='same', data_format='channels_first'))
model.add(BatchNormalization(axis=1))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2), padding='same'))
model.add(Flatten())
model.add(Dense(384))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(192))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))
return model
Which I attempted to convert as a result of Keras 2.0. Here is the original code: https://github.com/keskarnitish/large-batch-training/blob/master/network_zoo.py
I try to run the following code:
parser = argparse.ArgumentParser(description=
'''This code first trains the user-specific network (C[1-4])
using small-batch ADAM and large-batch ADAM, and then plots
the parametric plot connecting the two minimizers
illustrating the sharpness difference.''')
parser.add_argument('-n', '--network', help='''Selects which network
to plot the parametric plots for.
Choices are C1, C2, C3 and C4.''', required=True)
network_choice = vars(parser.parse_args())['network']
nb_epoch = 20
# the data, shuffled and split between train and test sets
if network_choice in ['C1', 'C2']:
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
nb_classes = 10
elif network_choice in ['C3', 'C4']:
(X_train, y_train), (X_test, y_test) = cifar100.load_data()
nb_classes = 100
else:
raise ValueError('''Invalid choice of network.
Please choose one of C1, C2, C3 or C4.
Refer to the paper for details regarding these networks''')
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
# build the network
if network_choice in ['C1', 'C3']:
model = network_zoo.shallownet(nb_classes)
elif network_choice in ['C2', 'C4']:
model = network_zoo.deepnet(nb_classes)
# let's train the model using Adam
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.save_weights('x0.h5')
# let's first find the small-batch solution
model.fit(X_train, Y_train,
batch_size=256,
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test),
shuffle=True)
And receive the following error message:
Traceback (most recent call last):
File "plot_parametric_plot.py", line 64, in <module>
model = network_zoo.shallownet(nb_classes)
File "/mnt_home/klee/LBSBGenGapSharpnessResearch/network_zoo.py", line 36, in shallownet
model.add(Conv2D(64, (5, 5), input_shape=img_size))
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/keras/engine/sequential.py", line 166, in add
layer(x)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py", line 75, in symbolic_fn_wrapper
return func(*args, **kwargs)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/keras/engine/base_layer.py", line 489, in __call__
output = self.call(inputs, **kwargs)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/keras/layers/convolutional.py", line 171, in call
dilation_rate=self.dilation_rate)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py", line 3717, in conv2d
**kwargs)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/tensorflow_core/python/ops/nn_ops.py", line 918, in convolution_v2
name=name)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/tensorflow_core/python/ops/nn_ops.py", line 1010, in convolution_internal
name=name)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/tensorflow_core/python/ops/gen_nn_ops.py", line 969, in conv2d
data_format=data_format, dilations=dilations, name=name)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/tensorflow_core/python/framework/op_def_library.py", line 742, in _apply_op_helper
attrs=attr_protos, op_def=op_def)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/tensorflow_core/python/framework/func_graph.py", line 595, in _create_op_internal
compute_device)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py", line 3322, in _create_op_internal
op_def=op_def)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py", line 1786, in __init__
control_input_ops)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py", line 1622, in _create_c_op
raise ValueError(str(e))
ValueError: Negative dimension size caused by subtracting 5 from 3 for 'conv2d_1/convolution' (op: 'Conv2D') with input shapes: [?,3,32,32], [5,5,32,64].
Though I'm not sure what this means...
UPDATE: after writing in Marco Celani's comment, here's the new error message I receive after running the main code:
Traceback (most recent call last):
File "plot_parametric_plot.py", line 64, in <module>
model = network_zoo.shallownet(nb_classes)
File "/mnt_home/klee/LBSBGenGapSharpnessResearch/network_zoo.py", line 36, in shallownet
model.add(Conv2D(64, (5, 5), input_shape=img_size, data_format='channels_first'))
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/keras/engine/sequential.py", line 166, in add
layer(x)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py", line 75, in symbolic_fn_wrapper
return func(*args, **kwargs)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/keras/engine/base_layer.py", line 489, in __call__
output = self.call(inputs, **kwargs)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/keras/layers/convolutional.py", line 171, in call
dilation_rate=self.dilation_rate)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py", line 3701, in conv2d
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py", line 3574, in _preprocess_conv2d_input
if not _has_nchw_support() or force_transpose:
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py", line 522, in _has_nchw_support
gpus_available = len(_get_available_gpus()) > 0
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py", line 506, in _get_available_gpus
_LOCAL_DEVICES = tf.config.experimental_list_devices()
AttributeError: module 'tensorflow_core._api.v2.config' has no attribute 'experimental_list_devices'
this means that you can't apply any operation because you reduce too much the dimension inside your network (it is below 0).
Looking at your data format seems like your images are (3, 32, 32), so the channels are the first dimension. Keras by default applies convolution with channels in the last dimensions. To override the error try to define data_format='channels_first' in convolutional and in pooling layers

Categories

Resources