I keep geting error form model.fil() keras tensorflow - python

I was trying to train a model for an AI chatbot !
but i keep geting this error below
in the line code " model.fit(X, Y, epochs=200, batch_size=5, verbose=1) "
import numpy as np
import random
import json
import pickle
import nltk
from nltk.stem import WordNetLemmatizer
from keras import Sequential
from keras.layers import advanced_activations, Dense, Dropout
from tensorflow.keras.optimizers import SGD
# from keras.utils.np_utils import to_categorical
# import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
lemmatizer = WordNetLemmatizer()
intents = json.loads(open("intents.json").read())
words = []
classes = []
documments = []
ignore_letters = ['?', '!', '.', '-', '_', ',']
for intent in intents['intents']:
for pattern in intent['patterns']:
word_list = nltk.word_tokenize(pattern)
words.extend(word_list)
documments.append((word_list, intent['tag']))
if intent['tag'] not in classes:
classes.append(intent['tag'])
words = [lemmatizer.lemmatize(word) for word in words if word not in ignore_letters]
words = sorted(set(words))
classes = sorted(set(classes))
pickle.dump(words, open('words.pkl', 'wb'))
pickle.dump(words, open('classes.pkl', 'wb'))
training = []
output_empty = [0] * len(classes)
for documment in documments:
bag = []
word_patterns = documment[0]
word_patterns = [lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in words:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[classes.index(documment[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training, dtype=object)
train_x = list(training[:, 0])
train_y = list(training[:, 1])
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='softmax'))
sgd = SGD(learning_rate=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
X = np.array(train_x)
Y = np.array(train_y)
model.fit(X, Y, epochs=200, batch_size=5, verbose=1)
model.save('Chatbot_depression_model.model')
print("Done")
for the output
2022-05-23 21:48:47.830167: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2022-05-23 21:48:47.841231: I tensorflow/core/common_runtime/process_util.cc:146] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.
2022-05-23 21:48:48.367785: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:185] None of the MLIR Optimization Passes are enabled (registered 2)
Traceback (most recent call last):
File "C:\Users\Abdel\Desktop\Mading\train.py", line 69, in
model.fit(X,Y, epochs=200, batch_size=5, verbose=1)
File "C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\keras\engine\training.py", line 1184, in fit
tmp_logs = self.train_function(iterator)
File "C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\eager\def_function.py", line 885, in call
result = self._call(*args, **kwds)
File "C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\eager\def_function.py", line 933, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\eager\def_function.py", line 759, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File "C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\eager\function.py", line 3066, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File "C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\eager\function.py", line 3463, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\eager\function.py", line 3298, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\framework\func_graph.py", line 1007, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\eager\def_function.py", line 668, in wrapped_fn
out = weak_wrapped_fn().wrapped(*args, **kwds)
File "C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\framework\func_graph.py", line 994, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\keras\engine\training.py:853 train_function *
return step_function(self, iterator)
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\keras\engine\training.py:842 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1286 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2849 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3632 _call_for_each_replica
return fn(*args, **kwargs)
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\keras\engine\training.py:835 run_step **
outputs = model.train_step(data)
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\keras\engine\training.py:788 train_step
loss = self.compiled_loss(
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\keras\engine\compile_utils.py:201 __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\keras\losses.py:141 __call__
losses = call_fn(y_true, y_pred)
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\keras\losses.py:245 call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\util\dispatch.py:206 wrapper
return target(*args, **kwargs)
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\keras\losses.py:1665 categorical_crossentropy
return backend.categorical_crossentropy(
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\util\dispatch.py:206 wrapper
return target(*args, **kwargs)
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\keras\backend.py:4839 categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
C:\Users\Abdel\anaconda3\envs\Mading\lib\site-packages\tensorflow\python\framework\tensor_shape.py:1161 assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (None, 11) and (None, 128) are incompatible
Process finished with exit code 1

Related

Convolution neural network for image segmentation with tensorflow

I am trying to make my first neural network using Tensorflow. I have some medical images and my goal is to segment them. I can't find what I am doing wrong. Here is the error :
2021-05-08 14:33:15.249134: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:116] None of the MLIR optimization passes are enabled (registered 2)
Epoch 1/50
Traceback (most recent call last):
File "C:/Users/tompi/PycharmProjects/ProjetDeepLearning/test.py", line 185, in <module>
history = model.fit(X_train, Y_train, epochs=epochs,
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1100, in fit
tmp_logs = self.train_function(iterator)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\def_function.py", line 828, in __call__
result = self._call(*args, **kwds)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\def_function.py", line 871, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\def_function.py", line 725, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\function.py", line 2969, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\function.py", line 3361, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\function.py", line 3196, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\framework\func_graph.py", line 990, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\eager\def_function.py", line 634, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File "C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\framework\func_graph.py", line 977, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\training.py:805 train_function *
return step_function(self, iterator)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\training.py:795 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\training.py:788 run_step **
outputs = model.train_step(data)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\training.py:754 train_step
y_pred = self(x, training=True)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:998 __call__
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
C:\Users\tompi\anaconda3\envs\tf2.4\lib\site-packages\tensorflow\python\keras\engine\input_spec.py:204 assert_input_compatibility
raise ValueError('Layer ' + layer_name + ' expects ' +
ValueError: Layer sequential expects 1 input(s), but it received 44 input tensors. Inputs received: ...
Below my code :
import tensorflow as tf
import pandas as pd
import numpy as np
import tensorflow.keras
import segmentation_models as sm
import os
import cv2
import Metrics as metrics # a python file
import matplotlib.pyplot as plt
from tensorflow.keras import datasets, layers, models
from sklearn.model_selection import train_test_split
width = 672
height = 448
dataframe = []
def normalize(path):
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (width, height))
# newSize = np.zeros((height, width, 3))
# newSize[:, :, 0] = image[:, :]
# newSize[:, :, 1] = image[:, :]
# newSize[:, :, 2] = image[:, :]
return image
def createDataset():
for folder in os.listdir(imagesPath):
for imageName in os.listdir(imagesPath + folder):
image = normalize(imagesPath + folder + "/" + imageName)
dataframe.append([folder, imageName, image])
createDataset()
df = pd.DataFrame(dataframe, columns=['Folder', 'Name', 'Image'])
def getImagesFromFolder(folder):
L = []
n, p = np.shape(df)
for i in range(n):
if df['Folder'][i] == folder:
L.append(df.iloc[i][2])
return L
originalImages = getImagesFromFolder('Original')
maskImages = getImagesFromFolder('Mask')
X_train, X_test, Y_train, Y_test = train_test_split(originalImages, maskImages, train_size=0.8, random_state=42)
classes = 3
activation = "softmax"
lr = 0.0001
loss = sm.losses.jaccard_loss
metrics = training_metrics = [
sm.metrics.IOUScore(threshold=0.5),
sm.metrics.FScore(threshold=0.5),
sm.metrics.Precision(),
sm.metrics.Recall(),
metrics.dice_coef
]
batch_size = 3
epochs = 50
callbacks = [tensorflow.keras.callbacks.ReduceLROnPlateau()]
I am using a very simple Unet :
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(height, width, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.summary()
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
model.summary()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(X_train, Y_train, epochs=epochs,
validation_data=(X_test, Y_test))
The error says that the input receives 44 tensors which is the number of images in X_train and Y_train (44, 448, 672, 3) but I don't know what I am doing wrong, I saw several posts having the same shape and it worked. Can anyone help we. It would be greatly appreciated.
Thanks.
I found out what was the error. The type of my variables X_train, Y_train, X_test, Y_test was list and not numpy.ndarray because of my function getImagesFromFolder. I had to return np.array(L) to make it run.

Input 0 of layer dense is incompatible with the layer: expected axis -1 of input shape to have value 8192 but received input with shape (None, 61608)

I am trying to create an image processing CNN. I am using VGG16 to speed up some of the learning process. The creation of my CNN below works to the point of training and saving the model & weights. The issue occurs when I try to run a predict function after loading in the model.
image_gen = ImageDataGenerator()
train = image_gen.flow_from_directory('./data/train', class_mode='categorical', shuffle=False, batch_size=10, target_size=(151, 136))
val = image_gen.flow_from_directory('./data/validate', class_mode='categorical', shuffle=False, batch_size=10, target_size=(151, 136))
pretrained_model = VGG16(include_top=False, input_shape=(151, 136, 3), weights='imagenet')
pretrained_model.summary()
vgg_features_train = pretrained_model.predict(train)
vgg_features_val = pretrained_model.predict(val)
train_target = to_categorical(train.labels)
val_target = to_categorical(val.labels)
model = Sequential()
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='categorical_crossentropy')
target_dir = './models/weights-improvement'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
checkpoint = ModelCheckpoint(filepath=target_dir + 'weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5', monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
model.fit(vgg_features_train, train_target, epochs=100, batch_size=8, validation_data=(vgg_features_val, val_target), callbacks=callbacks_list)
model.save('./models/model')
model.save_weights('./models/weights')
I have this predict function, that I would like to load in an image, and then return the categorisation of this image that the model gives.
from keras.preprocessing.image import load_img, img_to_array
def predict(file):
x = load_img(file, target_size=(151,136,3))
x = img_to_array(x)
print(x.shape)
print(x.shape)
x = np.expand_dims(x, axis=0)
array = model.predict(x)
result = array[0]
if result[0] > result[1]:
if result[0] > 0.9:
print("Predicted answer: Buy")
answer = 'buy'
print(result)
print(array)
else:
print("Predicted answer: Not confident")
answer = 'n/a'
print(result)
else:
if result[1] > 0.9:
print("Predicted answer: Sell")
answer = 'sell'
print(result)
else:
print("Predicted answer: Not confident")
answer = 'n/a'
print(result)
return answer
The issue I am experiencing is when I run this predict function, I get the following error.
File "predict-binary.py", line 24, in predict
array = model.predict(x)
File ".venv\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1629, in predict
tmp_batch_outputs = self.predict_function(iterator)
File ".venv\lib\site-packages\tensorflow\python\eager\def_function.py", line 828, in __call__
result = self._call(*args, **kwds)
File ".venv\lib\site-packages\tensorflow\python\eager\def_function.py", line 871, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File ".venv\lib\site-packages\tensorflow\python\eager\def_function.py", line 725, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File ".venv\lib\site-packages\tensorflow\python\eager\function.py", line 2969, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File ".venv\lib\site-packages\tensorflow\python\eager\function.py", line 3361, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File ".venv\lib\site-packages\tensorflow\python\eager\function.py", line 3196, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File ".venv\lib\site-packages\tensorflow\python\framework\func_graph.py", line 990, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File ".venv\lib\site-packages\tensorflow\python\eager\def_function.py", line 634, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File ".venv\lib\site-packages\tensorflow\python\framework\func_graph.py", line 977, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
.venv\lib\site-packages\tensorflow\python\keras\engine\training.py:1478 predict_function *
return step_function(self, iterator)
.venv\lib\site-packages\tensorflow\python\keras\engine\training.py:1468 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
.venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
.venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
.venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
.venv\lib\site-packages\tensorflow\python\keras\engine\training.py:1461 run_step **
outputs = model.predict_step(data)
.venv\lib\site-packages\tensorflow\python\keras\engine\training.py:1434 predict_step
return self(x, training=False)
.venv\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:1012 __call__
outputs = call_fn(inputs, *args, **kwargs)
.venv\lib\site-packages\tensorflow\python\keras\engine\sequential.py:375 call
return super(Sequential, self).call(inputs, training=training, mask=mask)
.venv\lib\site-packages\tensorflow\python\keras\engine\functional.py:424 call
return self._run_internal_graph(
.venv\lib\site-packages\tensorflow\python\keras\engine\functional.py:560 _run_internal_graph
outputs = node.layer(*args, **kwargs)
.venv\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:998 __call__
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
.venv\lib\site-packages\tensorflow\python\keras\engine\input_spec.py:255 assert_input_compatibility
raise ValueError(
ValueError: Input 0 of layer dense is incompatible with the layer: expected axis -1 of input shape to have value 8192 but received input with shape (None, 61608)
I'm assuming I need to change something between the Flatten() and Dense() layers of my model, but I'm not sure what. I attempted to add model.add(Dense(61608, activation='relu)) between these two as that seemed to be what was suggested in another post I saw (cannot find link now), but it lead to the same error. (I tried it with 8192 instead of 61608 as well). Any help is appreciated, thanks.
EDIT #1:
Changing the model creation/training code as I think it was suggested by Gerry P to this
img_shape = (151,136,3)
base_model=VGG19( include_top=False, input_shape=img_shape, pooling='max', weights='imagenet' )
x=base_model.output
x=Dense(100, activation='relu')(x)
x=Dropout(0.5)(x)
x=BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
output=Dense(2, activation='softmax')(x)
model=Model(inputs=base_model.input, outputs=output)
image_gen = ImageDataGenerator()
train = image_gen.flow_from_directory('./data/train', class_mode='categorical', shuffle=False, batch_size=10, target_size=(151, 136))
val = image_gen.flow_from_directory('./data/validate', class_mode='categorical', shuffle=False, batch_size=10, target_size=(151, 136))
vgg_features_train = base_model.predict(train)
vgg_features_val = base_model.predict(val)
train_target = to_categorical(train.labels)
val_target = to_categorical(val.labels)
model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='categorical_crossentropy')
model.fit(vgg_features_train, train_target, epochs=100, batch_size=8, validation_data=(vgg_features_val, val_target), callbacks=callbacks_list)
This resulted in a different input shape error of File "train-binary.py", line 37, in <module> model.fit(vgg_features_train, train_target, epochs=100, batch_size=8, validation_data=(vgg_features_val, val_target), callbacks=callbacks_list) ValueError: Input 0 is incompatible with layer model: expected shape=(None, 151, 136, 3), found shape=(None, 512)
your model is expecting to see an input for model.predict that has the same dimensions as it was trained on. In this case it is the dimensions of vgg_features_train.The input to model.predict that you are generating is for the input to the VGG model. You are essentially trying to do transfer learning so I suggest you proceed as below
base_model=tf.keras.applications.VGG19( include_top=False, input_shape=img_shape, pooling='max', weights='imagenet' )
x=base_model.output
x=Dense(100, activation='relu'))(x)
x=Dropout(0.5)(x)
x=BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
output=Dense(2, activation='softmax')(x)
model=Model(inputs=base_model.input, outputs=output)
model.fit( train, epochs=100, batch_size=8, validation_data=val, callbacks=callbacks_list)
now for prediction you can use the same dimensions as you used to train the model.

TypeError: '<' not supported between instances of 'function' and 'str'

I have built a sequential model with a customized f1 score metric. I pass this during the compilation of my model and then save it in *.hdf5 format. Whenever I load the model for testing purposes using the custom_objects attribute
model = load_model('app/model/test_model.hdf5', custom_objects={'f1':f1})
Keras throws the following error
TypeError: '<' not supported between instances of 'function' and 'str'
Note: No errors are shown if I don't include the f1 metric during compilation, and the testing process works well.
Train method
from metrics import f1
...
# GRU with glove embeddings and two dense layers
model = Sequential()
model.add(Embedding(len(word_index) + 1,
100,
weights=[embedding_matrix],
input_length=max_len,
trainable=False))
model.add(SpatialDropout1D(0.3))
model.add(GRU(100, dropout=0.3, recurrent_dropout=0.3, return_sequences=True))
model.add(GRU(100, dropout=0.3, recurrent_dropout=0.3))
model.add(Dense(1024, activation='relu'))
#model.add(Dropout(0.8))
model.add(Dense(1024, activation='relu'))
#model.add(Dropout(0.8))
model.add(Dense(2))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc', f1])
# Fit the model with early stopping callback
earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=3, verbose=0, mode='auto')
model.fit(xtrain_pad, y=ytrain_enc, batch_size=512, epochs=100,
verbose=1, validation_data=(xvalid_pad, yvalid_enc), callbacks=[earlystop])
model.save('app/model/test_model.hdf5')
Test method
from metrics import f1
...
model = load_model('app/model/test_model.hdf5', custom_objects={'f1':f1})
print(model.summary())
model.evaluate(xtest_pad, ytest_enc) # <-- error happens
Custom f1 metric
from keras import backend as K
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
EDIT
test
The preprocessed data used for evaluating the model
normalized_dataset = pd.read_pickle(DATA['preprocessed_test_path'])
lbl_enc = preprocessing.LabelEncoder()
y = lbl_enc.fit_transform(normalized_dataset.label.values)
xtest = normalized_dataset.preprocessed_tweets.values
ytest_enc = np_utils.to_categorical(y)
token = text.Tokenizer(num_words=None)
max_len = 70
token.fit_on_texts(list(xtest))
xtest_seq = token.texts_to_sequences(xtest)
xtest_pad = sequence.pad_sequences(xtest_seq, maxlen=max_len)
EDIT2
This is my full traceback that triggers the stated error
Traceback (most recent call last):
File "app/main.py", line 67, in <module>
main()
File "app/main.py", line 64, in main
test(embedding_dict)
File "/Users/justauser/Desktop/sentiment-analysis/app/test.py", line 50, in test
model.evaluate(xtest_pad, ytest_enc)
File "/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py", line 1389, in evaluate
tmp_logs = self.test_function(iterator)
File "/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 828, in __call__
result = self._call(*args, **kwds)
File "/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 871, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 725, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File "/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 2969, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File "/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 3361, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 3196, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 990, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 634, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File "/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 977, in wrapper
raise e.ag_error_metadata.to_exception(e)
TypeError: in user code:
/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:1233 test_function *
return step_function(self, iterator)
/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:1224 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:1219 run_step **
with ops.control_dependencies(_minimum_control_deps(outputs)):
/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:2793 _minimum_control_deps
outputs = nest.flatten(outputs, expand_composites=True)
/Users/justauser/Desktop/sentiment-analysis/env/lib/python3.8/site-packages/tensorflow/python/util/nest.py:341 flatten
return _pywrap_utils.Flatten(structure, expand_composites)
TypeError: '<' not supported between instances of 'function' and 'str'
After model.load() if you compile your model again with the custom metric then it should work.
Therefore, after loading your model from disk using
model = load_model('app/model/test_model.hdf5', custom_objects={'f1':f1})
Make sure to compile it with the metrics of interest
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc', f1])
As #Zaccharie Ramzi pointed out, the accepted answer is not suitable if you want to load model to resume training, as the compiling step will reset the optimizer state (this is anyway fine if you want only to evaluate or test the model). If you want to load the model to resume training you can workaround this issue recompiling the loaded model using the output of the load:
model = load_model('app/model/test_model.hdf5', custom_objects={'f1':f1})
model.compile(loss=model.loss, optimizer=model.optimizer, metrics=['acc', f1])
As a reference see the github issue where this solution was posted first.
This solved my problem!
The score of the in-memory model and the score on load_model is the same.
scores = model.evaluate(X, y, verbose=2, batch_size = batch_size)

ValueError: Can not squeeze dim[1], expected a dimension of 1

EDIT: I got past that error message by reshaping my data as follows:
train_x = np.array(train_x)
train_y = np.array(train_y)
x_size = train_x.shape[0] * train_x.shape[1]
train_x = train_x.reshape(x_size, train_x.shape[2])
train_x = np.expand_dims(train_x, 1)
train_x = train_x.transpose(0,2,1)
train_y = train_y.flatten()
shape = train_x.shape # 3D: number of texts * number of padded paragraphs, number of features, 1
time_steps = shape[0] # number of padded pars * number of texts
features = shape[1] # number of features
model = Sequential()
model.add(layers.Masking(mask_value=0, input_shape=(time_steps, features)))
model.add(layers.LSTM(128, return_sequences=True, return_state=False, input_shape=(time_steps, features))) # 128 internal units
model.add(layers.TimeDistributed(layers.Dense(1, activation='sigmoid')))
#model.add(layers.Dense(len(train_y))) # Dense layer
model.compile(loss='binary_crossentropy', optimizer='adam')
model.fit(train_x, train_y, batch_size=train_y.shape[0])
predictions = model.predict(test_x)
I get a new error message:
ValueError: Input 0 is incompatible with layer lstm: expected shape=(None, None, 3), found shape=[288, 3, 1]
I'll keep updating this question in case someone runs into a similiar problem.
Still happy about any input.
Original question:
I want to buil a sequential LSTM model that predicts binary classification at every time step. More exactly, I want to predict an output for every paragraph in my texts (48 is the number of paragraphs). This is my code:
shape = np.shape(train_x) # 3D: number of texts, number of padded paragraphs, number of features
n = shape[0] # number of texts
time_steps = shape[1] # number of padded pars
features = shape[2] # number of features
model = Sequential()
model.add(layers.Masking(mask_value=0.0, input_shape=(time_steps, features)))
model.add(layers.LSTM(128, return_sequences=True, return_state=False))
model.add(layers.TimeDistributed(layers.Dense(1)))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.summary()
#train_x = np.array(train_x).reshape(2, input_shape, 3)
train_x = tf.convert_to_tensor(train_x) # data needs to be tensor object
train_y = tf.convert_to_tensor(train_y)
model.fit(train_x, train_y, batch_size=2)
predictions = model.predict(test_x)
This is the error message I get:
ValueError: Can not squeeze dim[1], expected a dimension of 1,
got 48 for '{{node categorical_crossentropy/weighted_loss/Squeeze}} = Squeeze[T=DT_FLOAT,
squeeze_dims=[-1]](Cast)' with input shapes: [2,48].
I don't really know what to do with this, do I need to reshape my data? How? Or do I need to change something in the model?
Thanks!
(changing the loss function to 'binary_crossentropy' raises the same error)
This is the entire traceback:
Traceback (most recent call last):
File "program.py", line 247, in <module>
eval_scores = train_classifier(x_train, y_train_sc, x_test, y_test_sc)
File "program.py", line 201, in train_classifier
model.fit(train_x, train_y, batch_size=2)
File "C:\Python38\lib\site-packages\tensorflow\python\keras\engine\training.py", line 108, in _method_wrapper
return method(self, *args, **kwargs)
File "C:\Python38\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1098, in fit
tmp_logs = train_function(iterator)
File "C:\Python38\lib\site-packages\tensorflow\python\eager\def_function.py", line 780, in __call__
result = self._call(*args, **kwds)
File "C:\Python38\lib\site-packages\tensorflow\python\eager\def_function.py", line 823, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "C:\Python38\lib\site-packages\tensorflow\python\eager\def_function.py", line 696, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File "C:\Python38\lib\site-packages\tensorflow\python\eager\function.py", line 2855, in _get_concrete_function_internal_garbage_collected
graph_function, _, _ = self._maybe_define_function(args, kwargs)
File "C:\Python38\lib\site-packages\tensorflow\python\eager\function.py", line 3213, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "C:\Python38\lib\site-packages\tensorflow\python\eager\function.py", line 3065, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "C:\Python38\lib\site-packages\tensorflow\python\framework\func_graph.py", line 986, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "C:\Python38\lib\site-packages\tensorflow\python\eager\def_function.py", line 600, in wrapped_fn
return weak_wrapped_fn().__wrapped__(*args, **kwds)
File "C:\Python38\lib\site-packages\tensorflow\python\framework\func_graph.py", line 973, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
C:\Python38\lib\site-packages\tensorflow\python\keras\engine\training.py:806 train_function *
return step_function(self, iterator)
C:\Python38\lib\site-packages\tensorflow\python\keras\engine\training.py:796 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Python38\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Python38\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Python38\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2945 _call_for_each_replica
return fn(*args, **kwargs)
C:\Python38\lib\site-packages\tensorflow\python\keras\engine\training.py:789 run_step **
outputs = model.train_step(data)
C:\Python38\lib\site-packages\tensorflow\python\keras\engine\training.py:748 train_step
loss = self.compiled_loss(
C:\Python38\lib\site-packages\tensorflow\python\keras\engine\compile_utils.py:204 __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
C:\Python38\lib\site-packages\tensorflow\python\keras\losses.py:150 __call__
return losses_utils.compute_weighted_loss(
C:\Python38\lib\site-packages\tensorflow\python\keras\utils\losses_utils.py:111 compute_weighted_loss
weighted_losses = tf_losses_utils.scale_losses_by_sample_weight(
C:\Python38\lib\site-packages\tensorflow\python\ops\losses\util.py:142 scale_losses_by_sample_weight
losses, _, sample_weight = squeeze_or_expand_dimensions(
C:\Python38\lib\site-packages\tensorflow\python\ops\losses\util.py:95 squeeze_or_expand_dimensions
sample_weight = array_ops.squeeze(sample_weight, [-1])
C:\Python38\lib\site-packages\tensorflow\python\util\dispatch.py:201 wrapper
return target(*args, **kwargs)
C:\Python38\lib\site-packages\tensorflow\python\util\deprecation.py:507 new_func
return func(*args, **kwargs)
C:\Python38\lib\site-packages\tensorflow\python\ops\array_ops.py:4259 squeeze
return gen_array_ops.squeeze(input, axis, name)
C:\Python38\lib\site-packages\tensorflow\python\ops\gen_array_ops.py:10043 squeeze
_, _, _op, _outputs = _op_def_library._apply_op_helper(
C:\Python38\lib\site-packages\tensorflow\python\framework\op_def_library.py:742 _apply_op_helper
op = g._create_op_internal(op_type_name, inputs, dtypes=None,
C:\Python38\lib\site-packages\tensorflow\python\framework\func_graph.py:591 _create_op_internal
return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access
C:\Python38\lib\site-packages\tensorflow\python\framework\ops.py:3477 _create_op_internal
ret = Operation(
C:\Python38\lib\site-packages\tensorflow\python\framework\ops.py:1974 __init__
self._c_op = _create_c_op(self._graph, node_def, inputs,
C:\Python38\lib\site-packages\tensorflow\python\framework\ops.py:1815 _create_c_op
raise ValueError(str(e))
ValueError: Can not squeeze dim[1], expected a dimension of 1, got 48 for '{{node categorical_crossentropy/weighted_loss/Squeeze}} = Squeeze[T=DT_FLOAT, squeeze_dims=[-1]](Cast)' with input shapes: [2,48].

For the value of custom regularization parameters (tensorflow2.3.x keras)

I'm doing simple machine learning in tensorflow 2.3.x.
I would like to create and implement a custom regularization here.
I would like to create a loss by computing the weight values in 1D and a matrix that I created myself.
However, even if I create a matrix with the weights made 1D using the parameter x, it does not seem to contain any values. Naturally this results in a value error.
What if I want to calculate with the values of the weights in a custom regularization?
Here is the code that causes the error.
The return statement will be rewritten later.
#import datasets
from tensorflow.keras.datasets import cifar10
(X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
from tensorflow.keras.utils import to_categorical
X_train = X_train/255.
X_test = X_test/255.
Y_train = to_categorical(Y_train, 10)
Y_test = to_categorical(Y_test, 10)
import tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
import numpy as np
import tensorflow as tf
import random
import os
from tensorflow.keras import regularizers
def set_seed(seed=200):
tf.random.set_seed(seed)
# optional
# for numpy.random
np.random.seed(seed)
# for built-in random
random.seed(seed)
# for hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
set_seed(0)
from tensorflow.python.ops import math_ops
from tensorflow.python.keras import backend
import math
class Costom(regularizers.Regularizer):
def __init__(self, costom):
self.costom = costom
def __call__(self, x):
w = tf.reduce_mean(tf.reduce_mean(tf.reduce_mean(x,0),0),0)
print(x.shape[3])
SK = [[0] * 256 for i in range(x.shape[3])]
i = 0
while i < x.shape[3]:
SK[i][i] = 1
i += 1
tf.constant(SK)
#tf.matmul(w ,SK)
return self.costom * tf.reduce_sum(x)
def get_config(self):
return {'costom': float(self.costom)}
model = Sequential([
Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=X_train.shape[1:],kernel_regularizer=Costom(0.01)),
Conv2D(32, (3, 3), padding='same', activation='relu'),
MaxPooling2D(2, 2),
Dropout(0.25),
Conv2D(64, (3, 3), padding='same', activation='relu'),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(2, 2),
Dropout(0.25),
Flatten(),
Dense(512, activation='relu'),
Dropout(0.5),
Dense(10, activation='softmax'),
])
model.compile(loss='categorical_crossentropy',optimizer='SGD',metrics=['accuracy'])
history = model.fit(X_train, Y_train, epochs=1)
model.save('./CIFAR-10_reg.h5')
print(model.evaluate(X_test, Y_test))
The following is the error message output.
Traceback (most recent call last):
File "train.py", line 112, in <module>
history = model.fit(X_train, Y_train, epochs=1)
File "C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\keras\engine\training.py", line 108, in _method_wrapper
return method(self, *args, **kwargs)
File "C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1098, in fit
tmp_logs = train_function(iterator)
File "C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\eager\def_function.py", line 780, in __call__
result = self._call(*args, **kwds)
File "C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\eager\def_function.py", line 823, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\eager\def_function.py", line 696, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File "C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\eager\function.py", line 2855, in _get_concrete_function_internal_garbage_collected
graph_function, _, _ = self._maybe_define_function(args, kwargs)
File "C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\eager\function.py", line 3213, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\eager\function.py", line 3065, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\framework\func_graph.py", line 986, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\eager\def_function.py", line 600, in wrapped_fn
return weak_wrapped_fn().__wrapped__(*args, **kwds)
File "C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\framework\func_graph.py", line 973, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\keras\engine\training.py:806 train_function *
return step_function(self, iterator)
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\keras\engine\training.py:796 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2945 _call_for_each_replica
return fn(*args, **kwargs)
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\keras\engine\training.py:789 run_step **
outputs = model.train_step(data)
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\keras\engine\training.py:749 train_step
y, y_pred, sample_weight, regularization_losses=self.losses)
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:1433 losses
loss_tensor = regularizer()
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:1509 _tag_callable
loss = loss()
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:2433 _loss_for_variable
regularization = regularizer(v)
train.py:61 __call__
tf.matmul(w ,SK)
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\util\dispatch.py:201 wrapper
return target(*args, **kwargs)
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\ops\math_ops.py:3253 matmul
return gen_math_ops.mat_mul(
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\ops\gen_math_ops.py:5640 mat_mul
_, _, _op, _outputs = _op_def_library._apply_op_helper(
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\framework\op_def_library.py:742 _apply_op_helper
op = g._create_op_internal(op_type_name, inputs, dtypes=None,
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\framework\func_graph.py:591 _create_op_internal
return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\framework\ops.py:3477 _create_op_internal
ret = Operation(
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\framework\ops.py:1974 __init__
self._c_op = _create_c_op(self._graph, node_def, inputs,
C:\Users\81805\anaconda3\envs\tf23\lib\site-packages\tensorflow\python\framework\ops.py:1815 _create_c_op
raise ValueError(str(e))
ValueError: Shape must be rank 2 but is rank 1 for '{{node conv2d/kernel/Regularizer/MatMul}} = MatMul[T=DT_FLOAT, transpose_a=false, transpose_b=false](conv2d/kernel/Regularizer/Mean_2, conv2d/kernel/Regularizer/MatMul/b)' with input shapes: [32], [32,256].

Categories

Resources