Multiple input neural network training crashes - python

I'm currently working on a neural network which is supposed to adjust a camera pose based on a given pose and an image.
I can't get the network to train and my question is - what am I doing wrong ?
Data gets loaded by a custom dataloader function. The network and training setup script:
import dataloader
from tensorflow.keras import optimizers, Model, Input
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, concatenate
tensorboard_callback = TensorBoard(
log_dir="./tensorboard_logs",
histogram_freq=1,
write_graph=True,
write_images=False,
update_freq="epoch",
)
cp_callback = ModelCheckpoint(filepath="./model",
monitor="loss",
save_best_only=True,
save_weights_only=False,
verbose=1)
first_input = Input(shape=(25088,))
first_dense = Dense(6000, activation="relu")
x = first_dense(first_input)
x = Dense(1500, activation="relu")(x)
x = Dense(375, activation="relu")(x)
second_input = Input(shape=(6,))
merged = concatenate([x, second_input])
output = Dense(6, activation="sigmoid")(merged)
model = Model(inputs=[first_input, second_input], outputs=output, name="network_name")
# define optimizer
opt = optimizers.Adam(
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-07,
amsgrad=False,
name="Adam"
)
# Compile the model with optimizers and loss functions
model.compile(optimizer=opt, loss="MeanSquaredError", metrics=["MeanSquaredError"])
model.summary()
train_gen = dataloader.Dataloader().dataloader(batch_size=5, training=True)
val_gen = dataloader.Dataloader().dataloader(batch_size=5, training=True)
# Train model
model.fit(
x=train_gen,
validation_data=next(val_gen),
# validation_data=tuple(train_gen),
validation_steps=1,
steps_per_epoch=7,
initial_epoch=1,
epochs=1000,
shuffle=False,
callbacks=[tensorboard_callback, cp_callback]
)
model.save("./model/new_model.h5")
This results in following architecture:
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 25088)] 0
__________________________________________________________________________________________________
dense (Dense) (None, 6000) 150534000 input_1[0][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 1500) 9001500 dense[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 375) 562875 dense_1[0][0]
__________________________________________________________________________________________________
input_2 (InputLayer) [(None, 6)] 0
__________________________________________________________________________________________________
concatenate (Concatenate) (None, 381) 0 dense_2[0][0]
input_2[0][0]
__________________________________________________________________________________________________
dense_3 (Dense) (None, 6) 2292 concatenate[0][0]
==================================================================================================
Total params: 160,100,667
Trainable params: 160,100,667
Non-trainable params: 0
__________________________________________________________________________________________________
The first input is the mentioned image. the second input is the to be adjusted pose.
My issue is that there seems to be an error in how I hand over the data from my dataloader to the fit() function. I receive following error:
2022-02-03 13:17:42.111511: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:185] None of the MLIR Optimization Passes are enabled (registered 2)
Epoch 2/1000
Traceback (most recent call last):
File "C:\path\network.py", line 73, in <module>
model.fit(
File "C:\Python39\lib\site-packages\keras\engine\training.py", line 1184, in fit
tmp_logs = self.train_function(iterator)
File "C:\Python39\lib\site-packages\tensorflow\python\eager\def_function.py", line 885, in __call__
result = self._call(*args, **kwds)
File "C:\Python39\lib\site-packages\tensorflow\python\eager\def_function.py", line 933, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "C:\Python39\lib\site-packages\tensorflow\python\eager\def_function.py", line 759, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File "C:\Python39\lib\site-packages\tensorflow\python\eager\function.py", line 3066, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File "C:\Python39\lib\site-packages\tensorflow\python\eager\function.py", line 3463, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "C:\Python39\lib\site-packages\tensorflow\python\eager\function.py", line 3298, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "C:\Python39\lib\site-packages\tensorflow\python\framework\func_graph.py", line 1007, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "C:\Python39\lib\site-packages\tensorflow\python\eager\def_function.py", line 668, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File "C:\Python39\lib\site-packages\tensorflow\python\framework\func_graph.py", line 994, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
C:\Python39\lib\site-packages\keras\engine\training.py:853 train_function *
return step_function(self, iterator)
C:\Python39\lib\site-packages\keras\engine\training.py:842 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Python39\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1286 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Python39\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2849 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Python39\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3632 _call_for_each_replica
return fn(*args, **kwargs)
C:\Python39\lib\site-packages\keras\engine\training.py:835 run_step **
outputs = model.train_step(data)
C:\Python39\lib\site-packages\keras\engine\training.py:787 train_step
y_pred = self(x, training=True)
C:\Python39\lib\site-packages\keras\engine\base_layer.py:1020 __call__
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
C:\Python39\lib\site-packages\keras\engine\input_spec.py:199 assert_input_compatibility
raise ValueError('Layer ' + layer_name + ' expects ' +
ValueError: Layer network_name expects 2 input(s), but it received 10 input tensors. Inputs received: [<tf.Tensor 'ExpandDims:0' shape=(None,
1) dtype=float32>, <tf.Tensor 'ExpandDims_1:0' shape=(None, 1) dtype=float32>, <tf.Tensor 'ExpandDims_2:0' shape=(None, 1) dtype=float32>, <tf.Tensor 'ExpandDims_3:0' shape=(None, 1) dtype=float32>, <tf.Tensor 'ExpandDims_4:0' shape=(None, 1) dtype=float32>, <tf.Tensor 'ExpandDims_5:0' shape=(None, 1) dtype=float32>, <tf.Tensor 'ExpandDims_6:0' shape=(None, 1) dtype=float32>, <tf.Tensor 'ExpandDims_7:0' shape=(None, 1) dtype=float32>, <tf.Tensor 'ExpandDims_8:0' shape=(None, 1) dtype=float32>, <tf.Tensor 'ExpandDims_9:0' shape=(None, 1) dtype=float32>]
I found an article (sadly can't find it anymore and didn't save the link) which said that the problem could be solved by applying the tuple() function to val_generator in the fit() function, but that resulted in my script just doing nothing after compiling the network
Please also let me know if there are general problems with the network architecture or how to improve. Since this is my first real neural network project I'm not really certain about anything I'm doing to be honest.
If there is any additional info, one might need- please let me know.
Thanks in advance

Related

Runnnig a model.fit for cnn : valueError

Once I want to train my model, I get the following error in the model.fit() line:
Code:
DO = Denoiser()
visible = Input(shape=(500, batch_size))
my_denoiser = DO.rkhs(visible, kern, I_mat)
conv1 = Conv1D(6, kernel_size=4, activation='relu')(visible)
pool1 = MaxPooling1D(pool_size=5)(conv1)
conv2 = Conv1D(12, kernel_size=4, activation='relu')(pool1)
pool2 = MaxPooling1D(pool_size=5)(conv2)
flat = Flatten()(pool2)
hidden1 = Dense(10, activation='relu')(flat)
output = Dense(3, activation='softmax')(hidden1)
model = Model(inputs=visible, outputs=output)
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(new_signal, y, epochs=2, batch_size=200)
output :
Model: "model_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_6 (InputLayer) [(None, 500, 128)] 0
conv1d_7 (Conv1D) (None, 497, 6) 3078
max_pooling1d_7 (MaxPooling (None, 99, 6) 0
1D)
conv1d_8 (Conv1D) (None, 96, 12) 300
max_pooling1d_8 (MaxPooling (None, 19, 12) 0
1D)
flatten_3 (Flatten) (None, 228) 0
dense_6 (Dense) (None, 10) 2290
dense_7 (Dense) (None, 3) 33
=================================================================
Total params: 5,701
Trainable params: 5,701
Non-trainable params: 0
_________________________________________________________________
Error:
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 859, in train_step
y_pred = self(x, training=True)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/usr/local/lib/python3.7/dist-packages/keras/engine/input_spec.py", line 264, in assert_input_compatibility
raise ValueError(f'Input {input_index} of layer "{layer_name}" is '
ValueError: Input 0 of layer "model_3" is incompatible with the layer: expected shape=(None, 500, 128), found shape=(None, 10)

I am getting error like "Input 1 of layer "model_5" is incompatible with the layer: expected shape=(None, 224, 224, 3), found shape=(None, 5)

I am trying to fuse features of two image inputs of shape (299, 299, 3), (224, 224, 3) and am getting shape errors.
Here is my code
from tensorFlow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.vgg16 import VGG16
import tensorflow as tf
from tensorflow.keras import layers, Input
inp_pre_trained_model = InceptionV3( include_top=False)
inp_pre_trained_model.trainable=False
inp_input=tf.keras.Input(shape=(299,299,3),name="input_layer_inception_V3")
inp_x=inp_pre_trained_model (inp_input)
inp_x=layers.GlobalAveragePooling2D(name="global_average_pooling_layer_inception_v3")(inp_x)
vgg_pre_trained_model = VGG16( include_top=False)
vgg_pre_trained_model.trainable=False
vgg_input=tf.keras.Input(shape=(224,224,3),name="input_layer_VGG_16")
vgg_x=vgg_pre_trained_model(vgg_input)
vgg_x=layers.GlobalAveragePooling2D(name="global_average_pooling_layer_vgg_16")(vgg_x)
x=tf.keras.layers.concatenate([inp_x,vgg_x],axis=-1)
x = tf.keras.layers.Flatten()(x)
outputs=tf.keras.layers.Dense(5,activation="softmax", name= "output_layer") (x)
model=tf.keras.Model(inputs=[inp_input,vgg_input],outputs=outputs)
model.summary()
My model summary
Model: "model_9"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_layer_inception_V3 (Inpu [(None, 224, 224, 3 0 []
tLayer) )]
input_layer_VGG_16 (InputLayer [(None, 299, 299, 3 0 []
) )]
inception_v3 (Functional) (None, None, None, 21802784 ['input_layer_inception_V3[0][0]'
2048) ]
vgg16 (Functional) (None, None, None, 14714688 ['input_layer_VGG_16[0][0]']
512)
global_average_pooling_incepti (None, 2048) 0 ['inception_v3[0][0]']
on (GlobalAveragePooling2D)
global_average_pooling_vgg (Gl (None, 512) 0 ['vgg16[0][0]']
obalAveragePooling2D)
concatenate_71 (Concatenate) (None, 2560) 0 ['global_average_pooling_inceptio
n[0][0]',
'global_average_pooling_vgg[0][0
]']
output_layer (Dense) (None, 5) 12805 ['concatenate_71[0][0]']
==================================================================================================
Total params: 36,530,277
Trainable params: 12,805
Non-trainable params: 36,517,472
compiler
model.compile(loss="sparse_categorical_crossentropy",optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),metrics=["accuracy"])
train = tf.data.Dataset.zip((cache_train_data, ceced_train_data))
test = tf.data.Dataset.zip((cache_test_data, ceced_test_data))
train_dataset = train.prefetch(tf.data.AUTOTUNE)
test_dataset = test.prefetch(tf.data.AUTOTUNE)
train_dataset, test_dataset
--->(<PrefetchDataset element_spec=((TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None, 5), dtype=tf.float32, name=None)), (TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None, 5), dtype=tf.float32, name=None)))>,
<PrefetchDataset element_spec=((TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None, 5), dtype=tf.float32, name=None)), (TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None, 5), dtype=tf.float32, name=None)))>)
fit the model
model_history = model.fit(train_dataset,
steps_per_epoch=len(train_dataset),
epochs=3,
validation_data=test_dataset,
validation_steps=len(test_dataset))
error
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1051, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1040, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1030, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 889, in train_step
y_pred = self(x, training=True)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/usr/local/lib/python3.7/dist-packages/keras/engine/input_spec.py", line 264, in assert_input_compatibility
raise ValueError(f'Input {input_index} of layer "{layer_name}" is '
ValueError: Input 1 of layer "model_9" is incompatible with the layer: expected shape=(None, 299, 299, 3), found shape=(None, 5)
You have a problem in your fit function, related to the train data. See from the Keras documentation, fit can take the following arguments:
Model.fit(
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
)
But you are passing a train_dataset instead, that I think it’s a tf.data.Dataset that holds x_train and y_train together.
In order to fix the error you should separate x from y and pass those as arguments instead.
I think that something like this should do:
for images, labels in train_dataset.take(-1):
X_train = images.numpy()
y_train = labels.numpy()
# doing the same for validation
for images, labels in test_dataset.take(-1):
X_test = images.numpy()
y_test = labels.numpy()
You want to have something like this:
model_history = model.fit(x=X_train, y=y_train, steps_per_epoch=len(train_dataset), epochs=3, validation_data=(X_test, y_test), validation_steps=len(test_dataset))

ValueError: Input 0 of layer "sequential_20" is incompatible with the layer: expected shape=(None, 304413), found shape=(None, 1, 13)

I am trying to create a LSTM model for time series prediction, at each time step input has 9 elements and the output has 4.
To create a dataset I write this code:
def create_dataset(dataset, look_back):
dataX, dataY = [], []
for i in range(len(dataset)-look_back):
dataX.append(dataset[i:(i+look_back)]) # all 22 columns for X
dataY.append(dataset[i + look_back, 9:14]) # first 8 columns for Y, just as an example
return np.array(dataX), np.array(dataY)
data = np.concatenate((input, output), axis=1)
X, Y = create_dataset(data, 1)
I used this model
model=Sequential()
model.add(Embedding(1, 13, input_length=304413))
model.add(LSTM(12, input_shape=(304413,1,13), kernel_initializer='normal',activation='relu',return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(12, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(4, activation='relu'))
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),metrics=['accuracy','mse'])
The input shape is (304413,13) and the output shape is (304413, 4)
The output of summary is:
Model: "sequential_20"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_15 (Embedding) (None, 304413, 13) 13
lstm_30 (LSTM) (None, 304413, 12) 1248
dropout_28 (Dropout) (None, 304413, 12) 0
lstm_31 (LSTM) (None, 12) 1200
dropout_29 (Dropout) (None, 12) 0
dense_14 (Dense) (None, 4) 52
=================================================================
Total params: 2,513
Trainable params: 2,513
Non-trainable params: 0
and here where I face the error:
pred=model.fit(x,Y, verbose=0,epochs=150, batch_size=70)
The error is
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 859, in train_step
y_pred = self(x, training=True)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/usr/local/lib/python3.7/dist-packages/keras/engine/input_spec.py", line 264, in assert_input_compatibility
raise ValueError(f'Input {input_index} of layer "{layer_name}" is '
ValueError: Input 0 of layer "sequential_20" is incompatible with the layer: expected shape=(None, 304413), found shape=(None, 1, 13)
What is my mistake and how can I solve it?

Keras ValueError: Dimensions must be equal, but are 2 and 32 for '{{node Equal}} with input shapes: [?,2], [?,32,32]

I was trying to train a simple Keras network for classification when I faced the following error. I know there is something wrong with my inputs but I couldn't figure out how to fix it. Here is my code
my data set shape :
x_train : float32 0.0 1.0 (2444, 64, 64, 1)
y_train : float32 0.0 1.0 (2444, 2)
x_test : float32 0.0 1.0 (9123, 64, 64, 1)
y_test : float32 0.0 1.0 (9123, 2)
the model :
inputs = keras.Input(shape=(64,64,1), dtype='float32')
x = keras.layers.Conv2D(12,(9,9), padding="same",input_shape=(64,64,1), dtype='float32',activation='relu')(inputs)
x = keras.layers.Conv2D(18,(7,7), padding="same", activation='relu')(x)
x = keras.layers.MaxPool2D(pool_size=(2,2))(x)
x = keras.layers.Dropout(0.25)(x)
x = keras.layers.Dense(50, activation='relu')(x)
x = keras.layers.Dropout(0.4)(x)
outputs = keras.layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs, outputs)
model summary :
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 64, 64, 1)] 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 64, 64, 12) 984
_________________________________________________________________
conv2d_3 (Conv2D) (None, 64, 64, 18) 10602
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 32, 32, 18) 0
_________________________________________________________________
dropout_2 (Dropout) (None, 32, 32, 18) 0
_________________________________________________________________
dense_2 (Dense) (None, 32, 32, 50) 950
_________________________________________________________________
dropout_3 (Dropout) (None, 32, 32, 50) 0
_________________________________________________________________
dense_3 (Dense) (None, 32, 32, 2) 102
=================================================================
Total params: 12,638
Trainable params: 12,638
Non-trainable params: 0
________________________
compiler and fitter which error occurs when I wanna fit the model
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=keras.optimizers.Adam(0.01),
metrics=["acc"],
)
model.fit(x_train, y_train, batch_size=32, epochs = 20, validation_split= 0.3,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)])
and finally the error:
ValueError Traceback (most recent call last)
<ipython-input-31-e4cade46a08c> in <module>()
1 model.fit(x_train, y_train, batch_size=32, epochs = 20, validation_split= 0.3,
----> 2 callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)])
9 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
992 except Exception as e: # pylint:disable=broad-except
993 if hasattr(e, "ag_error_metadata"):
--> 994 raise e.ag_error_metadata.to_exception(e)
995 else:
996 raise
ValueError: in user code:
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:853 train_function *
return step_function(self, iterator)
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:842 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:1286 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:2849 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:3632 _call_for_each_replica
return fn(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:835 run_step **
outputs = model.train_step(data)
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:792 train_step
self.compiled_metrics.update_state(y, y_pred, sample_weight)
/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py:457 update_state
metric_obj.update_state(y_t, y_p, sample_weight=mask)
/usr/local/lib/python3.7/dist-packages/keras/utils/metrics_utils.py:73 decorated
update_op = update_state_fn(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/keras/metrics.py:177 update_state_fn
return ag_update_state(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/keras/metrics.py:681 update_state **
matches = ag_fn(y_true, y_pred, **self._fn_kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:206 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/keras/metrics.py:3537 sparse_categorical_accuracy
return tf.cast(tf.equal(y_true, y_pred), backend.floatx())
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:206 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/math_ops.py:1864 equal
return gen_math_ops.equal(x, y, name=name)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/gen_math_ops.py:3219 equal
name=name)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/op_def_library.py:750 _apply_op_helper
attrs=attr_protos, op_def=op_def)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py:601 _create_op_internal
compute_device)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py:3569 _create_op_internal
op_def=op_def)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py:2042 __init__
control_input_ops, op_def)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py:1883 _create_c_op
raise ValueError(str(e))
ValueError: Dimensions must be equal, but are 2 and 32 for '{{node Equal}} = Equal[T=DT_FLOAT, incompatible_shape_error=true](IteratorGetNext:1, Cast_1)' with input shapes: [?,2], [?,32,32].
As you can see in the model summary, the output shape of the model is (None,32,32,2), while based on target values it should be (None,2), Try to add Flatten layer before Dense layers:
x = keras.layers.Dropout(0.25)(x)
x = keras.layers.Flatten()(x) # Add this
x = keras.layers.Dense(50, activation='relu')(x)

ValueError: Shapes (None, 9) and (None, 10) are incompatible

I have a dataset with 565 features and 10 different columns on the prediction site for predicting labels in the training model.Here is the model summary dimensions :
_________________________________________________________________
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv1d (Conv1D) (None, 564, 64) 256
_________________________________________________________________
flatten (Flatten) (None, 36096) 0
_________________________________________________________________
dense (Dense) (None, 50) 1804850
_________________________________________________________________
dense_1 (Dense) (None, 50) 2550
_________________________________________________________________
dense_2 (Dense) (None, 50) 2550
_________________________________________________________________
dense_3 (Dense) (None, 50) 2550
_________________________________________________________________
dense_4 (Dense) (None, 10) 510
=================================================================
Total params: 1,813,266
Trainable params: 1,813,266
Non-trainable params: 0
_________________________________________________________________
Here is the code I have used :
import pandas as pd
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv1D, Flatten
from tensorflow.keras import optimizers
from sklearn.metrics import confusion_matrix
import tensorflow as tf
import tensorflow.keras.metrics
data = pd.read_csv('Step1_reducedfile.csv',skiprows = 1,header = None)
data = data.sample(frac=1).reset_index(drop=True)
train_X = data[0:data.shape[0],0:566]
train_y = data[0:data.shape[0],566:data.shape[1]]
train_X = train_X.reshape((train_X.shape[0], train_X.shape[1], 1))
import random
neurons = 50
strategy = tensorflow.distribute.MirroredStrategy()
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv1D(64,kernel_size = 3,activation='relu',input_shape=train_X.shape[1:]),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(neurons,activation='relu'),
tf.keras.layers.Dense(neurons,activation='relu'),
tf.keras.layers.Dense(neurons,activation='relu'),
tf.keras.layers.Dense(neurons,activation='relu'),
tf.keras.layers.Dense(10, activation='softmax'),])
model.summary()
sgd = optimizers.SGD(lr=0.05, decay=1e-6, momentum=0.24, nesterov=True)
model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy',tensorflow.keras.metrics.Precision()])
model.summary()
results = model.fit(train_X,train_y,validation_split = 0.2,epochs=10,batch_size = 100)
print(results)
I am getting the following error :
ValueError: in user code:
/usr/local/lib64/python3.6/site-packages/tensorflow/python/keras/engine/training.py:806 train_function *
return step_function(self, iterator)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/keras/engine/training.py:796 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/usr/local/lib64/python3.6/site-packages/tensorflow/python/distribute/distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/distribute/distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/distribute/mirrored_strategy.py:585 _call_for_each_replica
self._container_strategy(), fn, args, kwargs)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/distribute/mirrored_run.py:96 call_for_each_replica
return _call_for_each_replica(strategy, fn, args, kwargs)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/distribute/mirrored_run.py:237 _call_for_each_replica
coord.join(threads)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/training/coordinator.py:389 join
six.reraise(*self._exc_info_to_raise)
/usr/local/lib/python3.6/site-packages/six.py:703 reraise
raise value
/usr/local/lib64/python3.6/site-packages/tensorflow/python/training/coordinator.py:297 stop_on_exception
yield
/usr/local/lib64/python3.6/site-packages/tensorflow/python/distribute/mirrored_run.py:323 run
self.main_result = self.main_fn(*self.main_args, **self.main_kwargs)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/keras/engine/training.py:789 run_step **
outputs = model.train_step(data)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/keras/engine/training.py:749 train_step
y, y_pred, sample_weight, regularization_losses=self.losses)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/keras/engine/compile_utils.py:204 __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/keras/losses.py:149 __call__
losses = ag_call(y_true, y_pred)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/keras/losses.py:253 call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/keras/losses.py:1535 categorical_crossentropy
return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/keras/backend.py:4687 categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
/usr/local/lib64/python3.6/site-packages/tensorflow/python/framework/tensor_shape.py:1134 assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (None, 9) and (None, 10) are incompatible
That error shows that you are giving a wrong shape of label array to your model. It is s expecting an array of shape (None, 9), while you are giving an array of shape (None, 10). This may be because your dataset has 9 classes as rightly mentioned by Dr.Snoopy.
For the benefit of community here i am providing complete working code.
import pandas as pd
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv1D, Flatten
from tensorflow.keras import optimizers
from sklearn.metrics import confusion_matrix
import tensorflow as tf
import tensorflow.keras.metrics
data = pd.read_csv('Step1_reducedfile.csv',skiprows = 1,header = None)
data = data.sample(frac=1).reset_index(drop=True)
train_X = data[0:data.shape[0],0:566]
train_y = data[0:data.shape[0],566:data.shape[1]]
train_X = train_X.reshape((train_X.shape[0], train_X.shape[1], 1))
import random
neurons = 50
strategy = tensorflow.distribute.MirroredStrategy()
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Conv1D(64,kernel_size = 3,activation='relu',input_shape=train_X.shape[1:]),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(neurons,activation='relu'),
tf.keras.layers.Dense(neurons,activation='relu'),
tf.keras.layers.Dense(neurons,activation='relu'),
tf.keras.layers.Dense(neurons,activation='relu'),
tf.keras.layers.Dense(9, activation='softmax'),])
model.summary()
sgd = optimizers.SGD(lr=0.05, decay=1e-6, momentum=0.24, nesterov=True)
model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy',tensorflow.keras.metrics.Precision()])
model.summary()
results = model.fit(train_X,train_y,validation_split = 0.2,epochs=10,batch_size = 100)
print(results)

Categories

Resources