tensorflow: error with parallel channel model - python

I'm debugging a model I created to accept a variable number of input channels (each channel is an RGB image). I suspect that not all the channels are properly connected.
Model code:
IMG_SHAPE = (160, 160, 3)
def get_ch_model_simple():
i_input = tf.keras.Input(shape=IMG_SHAPE)
# scale pixels to float
x = tf.keras.layers.Rescaling(1.0 / 255)(i_input)
x = tf.keras.layers.Conv2D(32, kernel_size=(3,3), activation="relu")(x)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x)
return tf.keras.Model(i_input, x)
def get_model(n_chan=2):
inputs = tf.keras.Input(shape=(n_chan, 160, 160, 3))
ch_features = []
for ch in range(n_chan):
ch_model = get_ch_model_simple()
# select specific channel
ch_model_input = inputs[:,ch,:,:,:]
i_ch_features = tf.keras.layers.Flatten()(ch_model(ch_model_input))
i_ch_features = tf.keras.layers.Dropout(0.5)(i_ch_features)
ch_features.append(i_ch_features)
all_ch_features = tf.keras.layers.concatenate(ch_features)
outputs = tf.keras.layers.Dense(2, activation = "softmax")(all_ch_features)
return tf.keras.Model(inputs, outputs)
Checking the model:
m = get_model(n_chan=2)
m.summary()
Summary output:
Model: "model_49"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_25 (InputLayer) [(None, 2, 160, 160 0 []
, 3)]
tf.__operators__.getitem_16 (S (None, 160, 160, 3) 0 ['input_25[0][0]']
licingOpLambda)
tf.__operators__.getitem_17 (S (None, 160, 160, 3) 0 ['input_25[0][0]']
licingOpLambda)
model_47 (Functional) (None, 79, 79, 32) 896 ['tf.__operators__.getitem_16[0][
0]']
model_48 (Functional) (None, 79, 79, 32) 896 ['tf.__operators__.getitem_17[0][
0]']
flatten_19 (Flatten) (None, 199712) 0 ['model_47[0][0]']
flatten_20 (Flatten) (None, 199712) 0 ['model_48[0][0]']
dropout_19 (Dropout) (None, 199712) 0 ['flatten_19[0][0]']
dropout_20 (Dropout) (None, 199712) 0 ['flatten_20[0][0]']
concatenate_8 (Concatenate) (None, 399424) 0 ['dropout_19[0][0]',
'dropout_20[0][0]']
dense_10 (Dense) (None, 2) 798850 ['concatenate_8[0][0]']
==================================================================================================
Total params: 800,642
Trainable params: 800,642
Non-trainable params: 0
__________________________________________________________________________________________________
I'm concerned that the 2 slicing operators are connected to input_25[0][0], they seem to be getting the same channel slice instead of getting a different channel each.
In addition if I try to create a submodel to check the ch_model input and output I get error:
r_m = tf.keras.Model(model.inputs, model.layers[3].input) works fine
however r_m2 = tf.keras.Model(model.inputs, model.layers[3].output) errors out:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Input In [175], in <cell line: 1>()
----> 1 r_m2 = tf.keras.Model(model.inputs, model.layers[3].output)
File ~/PycharmProjects/venv39/lib/python3.9/site-packages/tensorflow/python/training/tracking/base.py:629, in no_automatic_dependency_tracking.<locals>._method_wrapper(self, *args, **kwargs)
627 self._self_setattr_tracking = False # pylint: disable=protected-access
628 try:
--> 629 result = method(self, *args, **kwargs)
630 finally:
631 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
File ~/PycharmProjects/venv39/lib/python3.9/site-packages/keras/engine/functional.py:146, in Functional.__init__(self, inputs, outputs, name, trainable, **kwargs)
143 if not all([functional_utils.is_input_keras_tensor(t)
144 for t in tf.nest.flatten(inputs)]):
145 inputs, outputs = functional_utils.clone_graph_nodes(inputs, outputs)
--> 146 self._init_graph_network(inputs, outputs)
File ~/PycharmProjects/venv39/lib/python3.9/site-packages/tensorflow/python/training/tracking/base.py:629, in no_automatic_dependency_tracking.<locals>._method_wrapper(self, *args, **kwargs)
627 self._self_setattr_tracking = False # pylint: disable=protected-access
628 try:
--> 629 result = method(self, *args, **kwargs)
630 finally:
631 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
File ~/PycharmProjects/venv39/lib/python3.9/site-packages/keras/engine/functional.py:229, in Functional._init_graph_network(self, inputs, outputs)
226 self._input_coordinates.append((layer, node_index, tensor_index))
228 # Keep track of the network's nodes and layers.
--> 229 nodes, nodes_by_depth, layers, _ = _map_graph_network(
230 self.inputs, self.outputs)
231 self._network_nodes = nodes
232 self._nodes_by_depth = nodes_by_depth
File ~/PycharmProjects/venv39/lib/python3.9/site-packages/keras/engine/functional.py:1036, in _map_graph_network(inputs, outputs)
1034 for x in tf.nest.flatten(node.keras_inputs):
1035 if id(x) not in computable_tensors:
-> 1036 raise ValueError(
1037 f'Graph disconnected: cannot obtain value for tensor {x} '
1038 f'at layer "{layer.name}". The following previous layers '
1039 f'were accessed without issue: {layers_with_complete_input}')
1040 for x in tf.nest.flatten(node.outputs):
1041 computable_tensors.add(id(x))
ValueError: Graph disconnected: cannot obtain value for tensor KerasTensor(type_spec=TensorSpec(shape=(None, 160, 160, 3), dtype=tf.float32, name='input_8'), name='input_8', description="created by layer 'input_8'") at layer "rescaling_4". The following previous layers were accessed without issue: []

Related

Invalid Argument Error: Graph Execution Error on Tensorflow NN

I'm trying to fit the following neural network:
def make_model():
input = tf.keras.Input(shape=train_df.shape[1:])
x = tf.keras.layers.Flatten()(input)
x = tf.keras.layers.Dense(128, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
x = tf.keras.layers.Dense(32, activation='relu')(x)
output = tf.keras.layers.Dense(8, activation='softmax')(x)
model = tf.keras.models.Model(input,output)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),
loss= tf.keras.losses.CategoricalCrossentropy(),
metrics=[tf.keras.metrics.CategoricalAccuracy(),
tf.keras.metrics.FalseNegatives(),
tf.keras.metrics.AUC(name='prc', curve='PR')])
return model
model = make_model()
model.fit(x=train_features, y=train_labels, epochs=2)
where:
model.summary()
print(train_features.shape, train_labels.shape)
outputs the following:
Model: "model_8"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_9 (InputLayer) [(None, 17)] 0
flatten_8 (Flatten) (None, 17) 0
dense_32 (Dense) (None, 128) 2304
dense_33 (Dense) (None, 64) 8256
dense_34 (Dense) (None, 32) 2080
dense_35 (Dense) (None, 8) 264
=================================================================
Total params: 12,904
Trainable params: 12,904
Non-trainable params: 0
_________________________________________________________________
Train Features Shape: (64140, 17)
Train Labels Shape: (64140, 8)
However, it keeps getting this error mid-epoch:
Epoch 1/2
444/2005 [=====>........................] - ETA: 1s - loss: 1.1139 - categorical_accuracy: 0.4904 - false_negatives_6: 10143.0000 - prc: 0.5232
Output exceeds the size limit. Open the full output data in a text editor
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
c:\Users\nrtc\OneDrive\Documentos\AI Summer School\Competition\Cópia_de_imbalanced_data.ipynb Cell 34' in <module>
----> 1 model.fit(x=train_features, y=train_labels, epochs=2)
File c:\Python39\lib\site-packages\keras\utils\traceback_utils.py:67, in filter_traceback.<locals>.error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
File ~\AppData\Roaming\Python\Python39\site-packages\tensorflow\python\eager\execute.py:54, in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
52 try:
53 ctx.ensure_initialized()
---> 54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
57 if name is not None:
InvalidArgumentError: Graph execution error:
Detected at node 'assert_greater_equal/Assert/AssertGuard/Assert' defined at (most recent call last):
File "c:\Python39\lib\runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
...
File "c:\Python39\lib\site-packages\keras\utils\metrics_utils.py", line 602, in update_confusion_matrix_variables
tf.debugging.assert_greater_equal(
Node: 'assert_greater_equal/Assert/AssertGuard/Assert'
assertion failed: [predictions must be >= 0] [Condition x >= y did not hold element-wise:] [x (model_6/dense_27/Softmax:0) = ] [[0.101199746 0.358387947 0.118633337...]...] [y (Cast_4/x:0) = ] [0]
[[{{node assert_greater_equal/Assert/AssertGuard/Assert}}]] [Op:__inference_train_function_10341]
Any idea what might be the error? I've seen other threads for the same error (*), but I do think that the last layer is correctly set up for 8 output labels.
*Other stack overflow threads for a similar problem
https://stackoverflow.com/questions/62606345/tensorflow-2-2-0-error-predictions-must-be-0-condition-x-y-did-not-hold
https://stackoverflow.com/questions/71153492/invalid-argument-error-graph-execution-error
Some labels on the training set are NaN values, so plotting the labels does not make the error clear.
train_df.dropna(inplace=True)
does the trick.

Keras ValueError: Dimensions must be equal, but are 2 and 32 for '{{node Equal}} with input shapes: [?,2], [?,32,32]

I was trying to train a simple Keras network for classification when I faced the following error. I know there is something wrong with my inputs but I couldn't figure out how to fix it. Here is my code
my data set shape :
x_train : float32 0.0 1.0 (2444, 64, 64, 1)
y_train : float32 0.0 1.0 (2444, 2)
x_test : float32 0.0 1.0 (9123, 64, 64, 1)
y_test : float32 0.0 1.0 (9123, 2)
the model :
inputs = keras.Input(shape=(64,64,1), dtype='float32')
x = keras.layers.Conv2D(12,(9,9), padding="same",input_shape=(64,64,1), dtype='float32',activation='relu')(inputs)
x = keras.layers.Conv2D(18,(7,7), padding="same", activation='relu')(x)
x = keras.layers.MaxPool2D(pool_size=(2,2))(x)
x = keras.layers.Dropout(0.25)(x)
x = keras.layers.Dense(50, activation='relu')(x)
x = keras.layers.Dropout(0.4)(x)
outputs = keras.layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs, outputs)
model summary :
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 64, 64, 1)] 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 64, 64, 12) 984
_________________________________________________________________
conv2d_3 (Conv2D) (None, 64, 64, 18) 10602
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 32, 32, 18) 0
_________________________________________________________________
dropout_2 (Dropout) (None, 32, 32, 18) 0
_________________________________________________________________
dense_2 (Dense) (None, 32, 32, 50) 950
_________________________________________________________________
dropout_3 (Dropout) (None, 32, 32, 50) 0
_________________________________________________________________
dense_3 (Dense) (None, 32, 32, 2) 102
=================================================================
Total params: 12,638
Trainable params: 12,638
Non-trainable params: 0
________________________
compiler and fitter which error occurs when I wanna fit the model
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=keras.optimizers.Adam(0.01),
metrics=["acc"],
)
model.fit(x_train, y_train, batch_size=32, epochs = 20, validation_split= 0.3,
callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)])
and finally the error:
ValueError Traceback (most recent call last)
<ipython-input-31-e4cade46a08c> in <module>()
1 model.fit(x_train, y_train, batch_size=32, epochs = 20, validation_split= 0.3,
----> 2 callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)])
9 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
992 except Exception as e: # pylint:disable=broad-except
993 if hasattr(e, "ag_error_metadata"):
--> 994 raise e.ag_error_metadata.to_exception(e)
995 else:
996 raise
ValueError: in user code:
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:853 train_function *
return step_function(self, iterator)
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:842 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:1286 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:2849 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:3632 _call_for_each_replica
return fn(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:835 run_step **
outputs = model.train_step(data)
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:792 train_step
self.compiled_metrics.update_state(y, y_pred, sample_weight)
/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py:457 update_state
metric_obj.update_state(y_t, y_p, sample_weight=mask)
/usr/local/lib/python3.7/dist-packages/keras/utils/metrics_utils.py:73 decorated
update_op = update_state_fn(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/keras/metrics.py:177 update_state_fn
return ag_update_state(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/keras/metrics.py:681 update_state **
matches = ag_fn(y_true, y_pred, **self._fn_kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:206 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/keras/metrics.py:3537 sparse_categorical_accuracy
return tf.cast(tf.equal(y_true, y_pred), backend.floatx())
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:206 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/math_ops.py:1864 equal
return gen_math_ops.equal(x, y, name=name)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/gen_math_ops.py:3219 equal
name=name)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/op_def_library.py:750 _apply_op_helper
attrs=attr_protos, op_def=op_def)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py:601 _create_op_internal
compute_device)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py:3569 _create_op_internal
op_def=op_def)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py:2042 __init__
control_input_ops, op_def)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py:1883 _create_c_op
raise ValueError(str(e))
ValueError: Dimensions must be equal, but are 2 and 32 for '{{node Equal}} = Equal[T=DT_FLOAT, incompatible_shape_error=true](IteratorGetNext:1, Cast_1)' with input shapes: [?,2], [?,32,32].
As you can see in the model summary, the output shape of the model is (None,32,32,2), while based on target values it should be (None,2), Try to add Flatten layer before Dense layers:
x = keras.layers.Dropout(0.25)(x)
x = keras.layers.Flatten()(x) # Add this
x = keras.layers.Dense(50, activation='relu')(x)

"Shapes of all inputs must match" error loss function when trying to do custom training with tf.GradientTape()

I'm using Python 3.7.7. and Tensorflow 2.1.0 with Functional API and Eager Execution.
I'm trying to do custom training, with an encoder extracted from a U-Net pretrained network:
I get the U-Net model without compile it.
I have loaded the weights into the model.
I have extracted the encoder and decoder from that model.
Then I want to use the encoder with this summary:
Model: "encoder"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 200, 200, 1)] 0
_________________________________________________________________
conv1_1 (Conv2D) (None, 200, 200, 64) 1664
_________________________________________________________________
conv1_2 (Conv2D) (None, 200, 200, 64) 102464
_________________________________________________________________
pool1 (MaxPooling2D) (None, 100, 100, 64) 0
_________________________________________________________________
conv2_1 (Conv2D) (None, 100, 100, 96) 55392
_________________________________________________________________
conv2_2 (Conv2D) (None, 100, 100, 96) 83040
_________________________________________________________________
pool2 (MaxPooling2D) (None, 50, 50, 96) 0
_________________________________________________________________
conv3_1 (Conv2D) (None, 50, 50, 128) 110720
_________________________________________________________________
conv3_2 (Conv2D) (None, 50, 50, 128) 147584
_________________________________________________________________
pool3 (MaxPooling2D) (None, 25, 25, 128) 0
_________________________________________________________________
conv4_1 (Conv2D) (None, 25, 25, 256) 295168
_________________________________________________________________
conv4_2 (Conv2D) (None, 25, 25, 256) 1048832
_________________________________________________________________
pool4 (MaxPooling2D) (None, 12, 12, 256) 0
_________________________________________________________________
conv5_1 (Conv2D) (None, 12, 12, 512) 1180160
_________________________________________________________________
conv5_2 (Conv2D) (None, 12, 12, 512) 2359808
=================================================================
Total params: 5,384,832
Trainable params: 5,384,832
Non-trainable params: 0
_________________________________________________________________
I use this function to do the custom training:
def train_encoder_unet_custom(model, dataset):
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
for episode in range(num_episodes):
selected = np.random.permutation(no_of_samples)[:num_shot + num_query]
# Create our Support Set.
support_set = np.array(dataset[selected[:num_shot]])
X_train = support_set[:,0,:]
y_train = support_set[:,1,:]
loss_value, grads = grad(model, X_train, y_train)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
The grad function is:
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
def loss(model, x, y, training):
# training=training is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
y_ = model(x, training=training)
return loss_object(y_true=y, y_pred=y_)
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets, training=False)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
But when I try to run it I get the error:
InvalidArgumentError: Shapes of all inputs must match: values[0].shape = [5,12,12,512] != values[1].shape = [5,25,25,256] [Op:Pack] name: packed
In loss function, I have checked the values for y_ variable. y_ is a list of 6 elements with these shapes:
(5, 12, 12, 512)
(5, 25, 25, 256)
(5, 50, 50, 128)
(5, 100, 100, 96)
(5, 200, 200, 64)
(5, 200, 200, 1)
Any idea about what is it happening?
If you need more details, please ask me.
This is the full call stack:
<ipython-input-133-22827956a9f6> in train_encoder_unet_custom(model, dataset, feat_type, show)
22 y_valid = query_set[:,1,:]
23
---> 24 loss_value, grads = grad(model, X_train, y_train)
25
26 optimizer.apply_gradients(zip(grads, model.trainable_variables))
<ipython-input-143-58ff4de686d6> in grad(model, inputs, targets)
10 def grad(model, inputs, targets):
11 with tf.GradientTape() as tape:
---> 12 loss_value = loss(model, inputs, targets, training=False)
13 return loss_value, tape.gradient(loss_value, model.trainable_variables)
<ipython-input-143-58ff4de686d6> in loss(model, x, y, training)
6 y_ = model(x, training=training)
7
----> 8 return loss_object(y_true=y, y_pred=y_)
9
10 def grad(model, inputs, targets):
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/losses.py in __call__(self, y_true, y_pred, sample_weight)
147 with K.name_scope(self._name_scope), graph_ctx:
148 ag_call = autograph.tf_convert(self.call, ag_ctx.control_status_ctx())
--> 149 losses = ag_call(y_true, y_pred)
150 return losses_utils.compute_weighted_loss(
151 losses, sample_weight, reduction=self._get_reduction())
/usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py in wrapper(*args, **kwargs)
253 try:
254 with conversion_ctx:
--> 255 return converted_call(f, args, kwargs, options=options)
256 except Exception as e: # pylint:disable=broad-except
257 if hasattr(e, 'ag_error_metadata'):
/usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py in converted_call(f, args, kwargs, caller_fn_scope, options)
455 if conversion.is_in_whitelist_cache(f, options):
456 logging.log(2, 'Whitelisted %s: from cache', f)
--> 457 return _call_unconverted(f, args, kwargs, options, False)
458
459 if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/autograph/impl/api.py in _call_unconverted(f, args, kwargs, options, update_cache)
337
338 if kwargs is not None:
--> 339 return f(*args, **kwargs)
340 return f(*args)
341
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/losses.py in call(self, y_true, y_pred)
251 y_pred, y_true)
252 ag_fn = autograph.tf_convert(self.fn, ag_ctx.control_status_ctx())
--> 253 return ag_fn(y_true, y_pred, **self._fn_kwargs)
254
255 def get_config(self):
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
199 """Call target, and fall back on dispatchers if there is a TypeError."""
200 try:
--> 201 return target(*args, **kwargs)
202 except (TypeError, ValueError):
203 # Note: convert_to_eager_tensor currently raises a ValueError, not a
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/losses.py in sparse_categorical_crossentropy(y_true, y_pred, from_logits, axis)
1562 Sparse categorical crossentropy loss value.
1563 """
-> 1564 y_pred = ops.convert_to_tensor_v2(y_pred)
1565 y_true = math_ops.cast(y_true, y_pred.dtype)
1566 return K.sparse_categorical_crossentropy(
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in convert_to_tensor_v2(value, dtype, dtype_hint, name)
1380 name=name,
1381 preferred_dtype=dtype_hint,
-> 1382 as_ref=False)
1383
1384
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
1497
1498 if ret is None:
-> 1499 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1500
1501 if ret is NotImplemented:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/array_ops.py in _autopacking_conversion_function(v, dtype, name, as_ref)
1500 elif dtype != inferred_dtype:
1501 v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)
-> 1502 return _autopacking_helper(v, dtype, name or "packed")
1503
1504
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/array_ops.py in _autopacking_helper(list_or_tuple, dtype, name)
1406 # checking.
1407 if all(isinstance(elem, core.Tensor) for elem in list_or_tuple):
-> 1408 return gen_array_ops.pack(list_or_tuple, name=name)
1409 must_pack = False
1410 converted_elems = []
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_array_ops.py in pack(values, axis, name)
6457 return _result
6458 except _core._NotOkStatusException as e:
-> 6459 _ops.raise_from_not_ok_status(e, name)
6460 except _core._FallbackException:
6461 pass
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in raise_from_not_ok_status(e, name)
6841 message = e.message + (" name: " + name if name is not None else "")
6842 # pylint: disable=protected-access
-> 6843 six.raise_from(core._status_to_exception(e.code, message), None)
6844 # pylint: enable=protected-access
6845
/usr/local/lib/python3.6/dist-packages/six.py in raise_from(value, from_value)

Keras ValueError: Dimensions must be equal issue

Even after applying the suggestions in answer and comments, it looks like the dimension mismatch issue persists. This is exact code and data file to replicate as well: https://drive.google.com/drive/folders/1q67s0VhB-O7J8OtIhU2jmj7Kc4LxL3sf?usp=sharing
How can this be corrected!? Latest code, model summary, functions used and error I get is below
type_ae=='dcor'
#Wrappers for keras
def custom_loss1(y_true,y_pred):
dcor = -1*distance_correlation(y_true,encoded_layer)
return dcor
def custom_loss2(y_true,y_pred):
recon_loss = losses.categorical_crossentropy(y_true, y_pred)
return recon_loss
input_layer = Input(shape=(64,64,1))
encoded_layer = Conv2D(filters = 128, kernel_size = (5,5),padding = 'same',activation ='relu',
input_shape = (64,64,1))(input_layer)
encoded_layer = MaxPool2D(pool_size=(2,2))(encoded_layer)
encoded_layer = Dropout(0.25)(encoded_layer)
encoded_layer = (Conv2D(filters = 64, kernel_size = (3,3),padding = 'same',activation ='relu'))(encoded_layer)
encoded_layer = (MaxPool2D(pool_size=(2,2)))(encoded_layer)
encoded_layer = (Dropout(0.25))(encoded_layer)
encoded_layer = (Conv2D(filters = 64, kernel_size = (3,3),padding = 'same',activation ='relu'))(encoded_layer)
encoded_layer = (MaxPool2D(pool_size=(2,2)))(encoded_layer)
encoded_layer = (Dropout(0.25))(encoded_layer)
encoded_layer = Conv2D(filters = 1, kernel_size = (3,3),padding = 'same',activation ='relu',
input_shape = (64,64,1),strides=1)(encoded_layer)
encoded_layer = ZeroPadding2D(padding=(28, 28), data_format=None)(encoded_layer)
decoded_imag = Conv2D(8, (2, 2), activation='relu', padding='same')(encoded_layer)
decoded_imag = UpSampling2D((2, 2))(decoded_imag)
decoded_imag = Conv2D(8, (3, 3), activation='relu', padding='same')(decoded_imag)
decoded_imag = UpSampling2D((2, 2))(decoded_imag)
decoded_imag = Conv2D(16, (3, 3), activation='relu', padding='same')(decoded_imag)
decoded_imag = UpSampling2D((2, 2))(decoded_imag)
decoded_imag = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(decoded_imag)
flat_layer = Flatten()(decoded_imag)
dense_layer = Dense(256,activation = "relu")(flat_layer)
dense_layer = Dense(64,activation = "relu")(dense_layer)
dense_layer = Dense(32,activation = "relu")(dense_layer)
output_layer = Dense(9, activation = "softmax")(dense_layer)
autoencoder = Model(input_layer, [encoded_layer,output_layer])
autoencoder.summary()
autoencoder.compile(optimizer='adadelta', loss=[custom_loss1,custom_loss2])
autoencoder.fit(x_train,[x_train, y_train],batch_size=32,epochs=3,shuffle=True,
validation_data=(x_val, [x_val,y_val]))
The data is of dimensions:
x_train.shape: (4000, 64, 64, 1)
x_val.shape: (1000, 64, 64, 1)
y_train.shape: (4000, 9)
y_val.shape: (1000, 9)
losses look like:
def custom_loss1(y_true,y_pred):
dcor = -1*distance_correlation(y_true,encoded_layer)
return dcor
def custom_loss2(y_true,y_pred):
recon_loss = losses.categorical_crossentropy(y_true, y_pred)
return recon_loss
The correlation function is based on tensors as follows:
def distance_correlation(y_true,y_pred):
pred_r = tf.reduce_sum(y_pred*y_pred,1)
pred_r = tf.reshape(pred_r,[-1,1])
pred_d = pred_r - 2*tf.matmul(y_pred,tf.transpose(y_pred))+tf.transpose(pred_r)
true_r = tf.reduce_sum(y_true*y_true,1)
true_r = tf.reshape(true_r,[-1,1])
true_d = true_r - 2*tf.matmul(y_true,tf.transpose(y_true))+tf.transpose(true_r)
concord = 1-tf.matmul(y_true,tf.transpose(y_true))
#print(pred_d)
#print(tf.reshape(tf.reduce_mean(pred_d,1),[-1,1]))
#print(tf.reshape(tf.reduce_mean(pred_d,0),[1,-1]))
#print(tf.reduce_mean(pred_d))
tf.check_numerics(pred_d,'pred_d has NaN')
tf.check_numerics(true_d,'true_d has NaN')
A = pred_d - tf.reshape(tf.reduce_mean(pred_d,1),[-1,1]) - tf.reshape(tf.reduce_mean(pred_d,0),[1,-1]) + tf.reduce_mean(pred_d)
B = true_d - tf.reshape(tf.reduce_mean(true_d,1),[-1,1]) - tf.reshape(tf.reduce_mean(true_d,0),[1,-1]) + tf.reduce_mean(true_d)
#dcor = -tf.reduce_sum(concord*pred_d)/tf.reduce_sum((1-concord)*pred_d)
dcor = -tf.log(tf.reduce_mean(A*B))+tf.log(tf.sqrt(tf.reduce_mean(A*A)*tf.reduce_mean(B*B)))#-tf.reduce_sum(concord*pred_d)/tf.reduce_sum((1-concord)*pred_d)
#print(dcor.shape)
#tf.Print(dcor,[dcor])
#dcor = tf.tile([dcor],batch_size)
return (dcor)
model summary looks like:
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_5 (InputLayer) (None, 64, 64, 1) 0
_________________________________________________________________
conv2d_30 (Conv2D) (None, 64, 64, 128) 3328
_________________________________________________________________
max_pooling2d_13 (MaxPooling (None, 32, 32, 128) 0
_________________________________________________________________
dropout_13 (Dropout) (None, 32, 32, 128) 0
_________________________________________________________________
conv2d_31 (Conv2D) (None, 32, 32, 64) 73792
_________________________________________________________________
max_pooling2d_14 (MaxPooling (None, 16, 16, 64) 0
_________________________________________________________________
dropout_14 (Dropout) (None, 16, 16, 64) 0
_________________________________________________________________
conv2d_32 (Conv2D) (None, 16, 16, 64) 36928
_________________________________________________________________
max_pooling2d_15 (MaxPooling (None, 8, 8, 64) 0
_________________________________________________________________
dropout_15 (Dropout) (None, 8, 8, 64) 0
_________________________________________________________________
conv2d_33 (Conv2D) (None, 8, 8, 1) 577
_________________________________________________________________
zero_padding2d_5 (ZeroPaddin (None, 64, 64, 1) 0
_________________________________________________________________
conv2d_34 (Conv2D) (None, 64, 64, 8) 40
_________________________________________________________________
up_sampling2d_10 (UpSampling (None, 128, 128, 8) 0
_________________________________________________________________
conv2d_35 (Conv2D) (None, 128, 128, 8) 584
_________________________________________________________________
up_sampling2d_11 (UpSampling (None, 256, 256, 8) 0
_________________________________________________________________
conv2d_36 (Conv2D) (None, 256, 256, 16) 1168
_________________________________________________________________
up_sampling2d_12 (UpSampling (None, 512, 512, 16) 0
_________________________________________________________________
conv2d_37 (Conv2D) (None, 512, 512, 1) 145
_________________________________________________________________
flatten_4 (Flatten) (None, 262144) 0
_________________________________________________________________
dense_13 (Dense) (None, 256) 67109120
_________________________________________________________________
dense_14 (Dense) (None, 64) 16448
_________________________________________________________________
dense_15 (Dense) (None, 32) 2080
_________________________________________________________________
dense_16 (Dense) (None, 9) 297
=================================================================
Total params: 67,244,507
Trainable params: 67,244,507
Non-trainable params: 0
_________________________________________________________________
This is the error:
InvalidArgumentError Traceback (most recent call last)
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in _create_c_op(graph, node_def, inputs, control_inputs)
1658 try:
-> 1659 c_op = c_api.TF_FinishOperation(op_desc)
1660 except errors.InvalidArgumentError as e:
InvalidArgumentError: Dimensions must be equal, but are 1 and 64 for 'loss_1/zero_padding2d_5_loss/MatMul' (op: 'BatchMatMul') with input shapes: [?,64,64,1], [1,64,64,?].
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-11-0e924885fc6b> in <module>
40 autoencoder = Model(input_layer, [encoded_layer,output_layer])
41 autoencoder.summary()
---> 42 autoencoder.compile(optimizer='adadelta', loss=[custom_loss1,custom_loss2])
43 autoencoder.fit(x_train,[x_train, y_train],batch_size=32,epochs=3,shuffle=True,
44 validation_data=(x_val, [x_val,y_val]))
~/anaconda3/lib/python3.6/site-packages/keras/engine/training.py in compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, target_tensors, **kwargs)
340 with K.name_scope(self.output_names[i] + '_loss'):
341 output_loss = weighted_loss(y_true, y_pred,
--> 342 sample_weight, mask)
343 if len(self.outputs) > 1:
344 self.metrics_tensors.append(output_loss)
~/anaconda3/lib/python3.6/site-packages/keras/engine/training_utils.py in weighted(y_true, y_pred, weights, mask)
402 """
403 # score_array has ndim >= 2
--> 404 score_array = fn(y_true, y_pred)
405 if mask is not None:
406 # Cast the mask to floatX to avoid float64 upcasting in Theano
<ipython-input-11-0e924885fc6b> in custom_loss1(y_true, y_pred)
2 #Wrappers for keras
3 def custom_loss1(y_true,y_pred):
----> 4 dcor = -1*distance_correlation(y_true,encoded_layer)
5 return dcor
6
<ipython-input-6-f282528532cc> in distance_correlation(y_true, y_pred)
2 pred_r = tf.reduce_sum(y_pred*y_pred,1)
3 pred_r = tf.reshape(pred_r,[-1,1])
----> 4 pred_d = pred_r - 2*tf.matmul(y_pred,tf.transpose(y_pred))+tf.transpose(pred_r)
5 true_r = tf.reduce_sum(y_true*y_true,1)
6 true_r = tf.reshape(true_r,[-1,1])
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py in matmul(a, b, transpose_a, transpose_b, adjoint_a, adjoint_b, a_is_sparse, b_is_sparse, name)
2415 adjoint_b = True
2416 return gen_math_ops.batch_mat_mul(
-> 2417 a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
2418
2419 # Neither matmul nor sparse_matmul support adjoint, so we conjugate
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/ops/gen_math_ops.py in batch_mat_mul(x, y, adj_x, adj_y, name)
1421 adj_y = _execute.make_bool(adj_y, "adj_y")
1422 _, _, _op = _op_def_lib._apply_op_helper(
-> 1423 "BatchMatMul", x=x, y=y, adj_x=adj_x, adj_y=adj_y, name=name)
1424 _result = _op.outputs[:]
1425 _inputs_flat = _op.inputs
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
786 op = g.create_op(op_type_name, inputs, output_types, name=scope,
787 input_types=input_types, attrs=attr_protos,
--> 788 op_def=op_def)
789 return output_structure, op_def.is_stateful, op
790
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py in new_func(*args, **kwargs)
505 'in a future version' if date is None else ('after %s' % date),
506 instructions)
--> 507 return func(*args, **kwargs)
508
509 doc = _add_deprecated_arg_notice_to_docstring(
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in create_op(***failed resolving arguments***)
3298 input_types=input_types,
3299 original_op=self._default_original_op,
-> 3300 op_def=op_def)
3301 self._create_op_helper(ret, compute_device=compute_device)
3302 return ret
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in __init__(self, node_def, g, inputs, output_types, control_inputs, input_types, original_op, op_def)
1821 op_def, inputs, node_def.attr)
1822 self._c_op = _create_c_op(self._graph, node_def, grouped_inputs,
-> 1823 control_input_ops)
1824
1825 # Initialize self._outputs.
~/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in _create_c_op(graph, node_def, inputs, control_inputs)
1660 except errors.InvalidArgumentError as e:
1661 # Convert to ValueError for backwards compatibility.
-> 1662 raise ValueError(str(e))
1663
1664 return c_op
ValueError: Dimensions must be equal, but are 1 and 64 for 'loss_1/zero_padding2d_5_loss/MatMul' (op: 'BatchMatMul') with input shapes: [?,64,64,1], [1,64,64,?].
You are having two loss functions and so you have to pass two y (ground truths) for evaluating the loss with respect to the predictions.
Your first prediction is the output of layer encoded_layer which has a size of (None, 8, 8, 128) as observed from the model.summary for conv2d_59 (Conv2D)
But what you are passing in the fit for y is [x_train, y_train]. The loss_1 is expecting input of size (None, 8, 8, 128) but you are passing x_train which has a different size.
If you want the loss_1 to find the correlation of input image with the encoded image then stack the convolutions such that the output of the convolutions will result in the shape which is the same as your x_train image shape. Use model.summary to see the output shape of convolutions.
No use the padding, strides and kernel size of the convolution layer to get the desired output size of convolutions. use formula W2=(W1−F+2P)/S+1 and H2=(H1−F+2P)/S+1 to find the output width and height of convolutions. Check this reference
There are two major issues with your approach.
Your loss function is checking the correlation between the encoded image and the actual image. The correct way to do it is to decode the image back from the encoded image and then check the correlation between the decoded image and the actual image (in lines of Autoencoder)
Your loss 1 is using numpy arrays. For a loss function to be part of a computation graph it should use tensor operations, not numy operations.
Below is the working code. However, for loss 1 I am using l2 norm of the two images. If you want to use correlation then you have to somehow convert it into tensor operations (which is a different issue from this question)
def image_loss(y_true,y_pred):
return tf.norm(y_true - y_pred)
def label_loss(y_true,y_pred):
return categorical_crossentropy(y_true, y_pred)
input_img = Input(shape=(64, 64, 1))
enocded_imag = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
enocded_imag = MaxPooling2D((2, 2), padding='same')(enocded_imag)
enocded_imag = Conv2D(8, (3, 3), activation='relu', padding='same')(enocded_imag)
enocded_imag = MaxPooling2D((2, 2), padding='same')(enocded_imag)
enocded_imag = Conv2D(8, (3, 3), activation='relu', padding='same')(enocded_imag)
enocded_imag = MaxPooling2D((2, 2), padding='same')(enocded_imag)
decoded_imag = Conv2D(8, (2, 2), activation='relu', padding='same')(enocded_imag)
decoded_imag = UpSampling2D((2, 2))(decoded_imag)
decoded_imag = Conv2D(8, (3, 3), activation='relu', padding='same')(decoded_imag)
decoded_imag = UpSampling2D((2, 2))(decoded_imag)
decoded_imag = Conv2D(16, (3, 3), activation='relu', padding='same')(decoded_imag)
decoded_imag = UpSampling2D((2, 2))(decoded_imag)
decoded_imag = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(decoded_imag)
flat_layer = Flatten()(enocded_imag)
dense_layer = Dense(32,activation = "relu")(flat_layer)
output_layer = Dense(9, activation = "softmax")(dense_layer)
model = Model(input_img, [decoded_imag, output_layer])
model.compile(optimizer='adadelta', loss=[image_loss, label_loss])
images = np.random.randn(10,64,64,1)
model.fit(images, [images, np.random.randn(10,9)])
The loss function distance_correlation you have coded assumes that each row in y_true and y_pred represent an image. When you use Dense layers it will work because Dense layer outputs a batch of (row) vectors, where each vector represents an individual image. However, 2D convolutions operate on a batch of 2d tensors with multiple channels ( you have only 1 channel). So to use the distance_correlation loss function you have to reshape your tensor such that each row corresponds to an image. Add below two lines to reshape your tensors.
def distance_correlation(y_true,y_pred):
y_true = tf.reshape(tf.squeeze(y_true), [-1,64*64])
y_pred = tf.reshape(tf.squeeze(y_pred), [-1,64*64])
.... REST OF THE CODE ....
The intention is to use the original image in custom_loss1 and the scalar label values in custom_loss2. I think the working code by #mujjiga in his answer is almost correct. I suggest one slight modification.
In model.compile() pass the input tensor in the loss which needs it. Keep the other one same. model.fit() just passes the labels.
model.compile(optimizer='adadelta', loss=[custom_loss1(input_layer), custom_loss2])
mode.fit(x_train, y_train)
Inside the custom loss functions:
def custom_loss1(input):
def loss1(y_true, y_pred):
return tf.norm(input - y_pred) # use your custom loss 1
return loss1
def custom_loss2(y_true, y_pred):
return categorical_crossentropy(y_true, y_pred) # use your custom loss 2
Try this with simple in-built Keras loss functions first. If that works well, look into your custom loss functions.

Shape problems in keras while trying to output categorical variable

I am using the following, fairly simple code to predict an output variable which may have 3 categories:
n_factors = 20
np.random.seed = 42
def embedding_input(name, n_in, n_out, reg):
inp = Input(shape=(1,), dtype='int64', name=name)
return inp, Embedding(n_in, n_out, input_length=1, W_regularizer=l2(reg))(inp)
user_in, u = embedding_input('user_in', n_users, n_factors, 1e-4)
artifact_in, a = embedding_input('artifact_in', n_artifacts, n_factors, 1e-4)
mt = Input(shape=(31,))
mr = Input(shape=(1,))
sub = Input(shape=(24,))
def onehot(featurename):
onehot_encoder = OneHotEncoder(sparse=False)
onehot_encoded = onehot_encoder.fit_transform(Modality_Durations[featurename].reshape(-1, 1))
trn_onehot_encoded = onehot_encoded[msk]
val_onehot_encoded = onehot_encoded[~msk]
return trn_onehot_encoded, val_onehot_encoded
trn_onehot_encoded_mt, val_onehot_encoded_mt = onehot('modality_type')
trn_onehot_encoded_mr, val_onehot_encoded_mr = onehot('roleid')
trn_onehot_encoded_sub, val_onehot_encoded_sub = onehot('subject')
trn_onehot_encoded_quartile, val_onehot_encoded_quartile = onehot('quartile')
# Model
x = merge([u, a], mode='concat')
x = Flatten()(x)
x = merge([x, mt], mode='concat')
x = merge([x, mr], mode='concat')
x = merge([x, sub], mode='concat')
x = Dense(10, activation='relu')(x)
BatchNormalization()
x = Dense(3, activation='softmax')(x)
nn = Model([user_in, artifact_in, mt, mr, sub], x)
nn.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
nn.optimizer.lr = 0.001
nn.fit([trn.member_id, trn.artifact_id, trn_onehot_encoded_mt, trn_onehot_encoded_mr, trn_onehot_encoded_sub], trn_onehot_encoded_quartile,
batch_size=256,
epochs=2,
validation_data=([val.member_id, val.artifact_id, val_onehot_encoded_mt, val_onehot_encoded_mr, val_onehot_encoded_sub], val_onehot_encoded_quartile)
)
Here's the summary of the model:
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
user_in (InputLayer) (None, 1) 0
____________________________________________________________________________________________________
artifact_in (InputLayer) (None, 1) 0
____________________________________________________________________________________________________
embedding_9 (Embedding) (None, 1, 20) 5902380 user_in[0][0]
____________________________________________________________________________________________________
embedding_10 (Embedding) (None, 1, 20) 594200 artifact_in[0][0]
____________________________________________________________________________________________________
merge_25 (Merge) (None, 1, 40) 0 embedding_9[0][0]
embedding_10[0][0]
____________________________________________________________________________________________________
flatten_7 (Flatten) (None, 40) 0 merge_25[0][0]
____________________________________________________________________________________________________
input_13 (InputLayer) (None, 31) 0
____________________________________________________________________________________________________
merge_26 (Merge) (None, 71) 0 flatten_7[0][0]
input_13[0][0]
____________________________________________________________________________________________________
input_14 (InputLayer) (None, 1) 0
____________________________________________________________________________________________________
merge_27 (Merge) (None, 72) 0 merge_26[0][0]
input_14[0][0]
____________________________________________________________________________________________________
input_15 (InputLayer) (None, 24) 0
____________________________________________________________________________________________________
merge_28 (Merge) (None, 96) 0 merge_27[0][0]
input_15[0][0]
____________________________________________________________________________________________________
dense_13 (Dense) (None, 10) 970 merge_28[0][0]
____________________________________________________________________________________________________
dense_14 (Dense) (None, 3) 33 dense_13[0][0]
====================================================================================================
Total params: 6,497,583
Trainable params: 6,497,583
Non-trainable params: 0
_____________________________
But on the fit statement, I get the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-71-7de0782d7d5d> in <module>()
5 batch_size=256,
6 epochs=2,
----> 7 validation_data=([val.member_id, val.artifact_id, val_onehot_encoded_mt, val_onehot_encoded_mr, val_onehot_encoded_sub], val_onehot_encoded_quartile)
8 )
9 # nn.fit([trn.member_id, trn.artifact_id, trn_onehot_encoded_mt, trn_onehot_encoded_mr, trn_onehot_encoded_sub], trn.duration_new,
/home/prateek_dl/anaconda3/lib/python3.5/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
1520 class_weight=class_weight,
1521 check_batch_axis=False,
-> 1522 batch_size=batch_size)
1523 # Prepare validation data.
1524 do_validation = False
/home/prateek_dl/anaconda3/lib/python3.5/site-packages/keras/engine/training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_batch_axis, batch_size)
1380 output_shapes,
1381 check_batch_axis=False,
-> 1382 exception_prefix='target')
1383 sample_weights = _standardize_sample_weights(sample_weight,
1384 self._feed_output_names)
/home/prateek_dl/anaconda3/lib/python3.5/site-packages/keras/engine/training.py in _standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
142 ' to have shape ' + str(shapes[i]) +
143 ' but got array with shape ' +
--> 144 str(array.shape))
145 return arrays
146
ValueError: Error when checking target: expected dense_14 to have shape (None, 1) but got array with shape (1956554, 3)
How do I resolve this error? Why is the final layer expecting (None,1) when according to the summary() it has to output (None,3)?
Any help would be greatly appreciated.
I fixed the error using categorical_entropy instead of sparse_categorical_entropy.

Categories

Resources