I am using the below code where I am trying to train a 1d CNN, my x_train data shape is (10027, 5, 14) and y_train shape is (10027,4). But I am getting an error (below the code)
regarding the shape compatibility.
model = keras.models.Sequential([
keras.layers.Conv1D(filters=20, kernel_size=4, strides=2, padding="valid",
input_shape=(n_timesteps,n_features))
])
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
verbose, epochs, batch_size = 0, 10, 5
model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, verbose=verbose)```
Below error
```ValueError: in user code:
/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/engine/training.py:805 train_function *
return step_function(self, iterator)
/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/engine/training.py:795 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/usr/local/lib/python3.8/dist-packages/tensorflow/python/distribute/distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/lib/python3.8/dist-packages/tensorflow/python/distribute/distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/usr/local/lib/python3.8/dist-packages/tensorflow/python/distribute/distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/engine/training.py:788 run_step **
outputs = model.train_step(data)
/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/engine/training.py:755 train_step
loss = self.compiled_loss(
/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/engine/compile_utils.py:203 __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/losses.py:152 __call__
losses = call_fn(y_true, y_pred)
/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/losses.py:256 call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
/usr/local/lib/python3.8/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/losses.py:1537 categorical_crossentropy
return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
/usr/local/lib/python3.8/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.8/dist-packages/tensorflow/python/keras/backend.py:4833 categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
/usr/local/lib/python3.8/dist-packages/tensorflow/python/framework/tensor_shape.py:1134 assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (None, 4) and (None, 1, 20) are incompatible```
I mean, the last layer of your network is a conv1D with filters=20, so the output of your network will be (batch, 1, 20), and you give a Y with a shape of (batch, 4).
The loss don't understand what it has to compare, since the two arrays are not matching at all.
If you want to have something working you should give a last layer compatible with your Y :
model = keras.models.Sequential([
keras.layers.Conv1D(filters=20, kernel_size=4, strides=2, padding="valid",
input_shape=(n_timesteps,n_features)),
keras.layers.Flatten(),
keras.layers.Dense(4, activation = 'softmax')
])
Related
I hit with the error message on my model;
'ValueError: Input 0 of layer sequential_5 is incompatible with the layer: expected axis -1 of input shape to have value 20 but received input with shape (None, 20, 637)'
Im not sure how to fix this. Code as below;
print(str(audio_train.shape)+''+str(y_train.shape)+''+str(audio_valid.shape))
(700, 20, 637) (700, 2) (236, 20, 637)
model=Sequential()
###first layer
model.add(Dense(100,input_shape=(20,)))
model.add(Activation('relu'))
model.add(Dropout(0.3))
###second layer
model.add(Dense(200))
model.add(Activation('relu'))
model.add(Dropout(0.3))
###third layer
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dropout(0.3))
###final layer
model.add(Dense(2))
model.add(Activation('softmax'))
model.summary()
adam = tf.keras.optimizers.Adam(learning_rate=0.0001)
model.compile(optimizer = adam, loss = 'categorical_crossentropy', metrics = ['accuracy'])
#fitting the model
Au_model = model.fit(audio_train, y_train, batch_size = 32, epochs = 10, validation_data = (audio_valid, y_valid), verbose=1)
Error message as below;
ValueError: in user code:
C:\Users\seren\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:855 train_function *
return step_function(self, iterator)
C:\Users\seren\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:845 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Users\seren\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1285 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Users\seren\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2833 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Users\seren\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3608 _call_for_each_replica
return fn(*args, **kwargs)
C:\Users\seren\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:838 run_step **
outputs = model.train_step(data)
C:\Users\seren\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:795 train_step
y_pred = self(x, training=True)
C:\Users\seren\anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:1013 __call__
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
C:\Users\seren\anaconda3\lib\site-packages\tensorflow\python\keras\engine\input_spec.py:251 assert_input_compatibility
raise ValueError(
ValueError: Input 0 of layer sequential_5 is incompatible with the layer: expected axis -1 of input shape to have value 20 but received input with shape (None, 20, 637)
Appreciate your advice/suggestion.
Change
model.add(Dense(100,input_shape=(20,)))
To
model.add(Dense(100,input_shape=(20, 637)))
You can also use input_shape=(audio_train.shape[1], audio_train.shape[2]) instead if you wanted to be a bit more programmatic.
I am working on an image segmentation(TensorFlow) problem. The training images in the dataset are 50 and testing images are 51, while training my model I am receiving the following error
ValueError: Shapes (None, 256, 256, 2) and (None, 256, 256, 1) are incompatible
The training code is:
model = build_model(input_shape)
model.compile(
loss="binary_crossentropy",
optimizer=tf.keras.optimizers.Adam(lr),
metrics=[
tf.keras.metrics.MeanIoU(num_classes=2),
tf.keras.metrics.Recall(),
tf.keras.metrics.Precision()
]
)
MODEL:
def build_model(shape):
inputs = Input(shape)
x, skip_1 = encoder1(inputs)
x = ASPP(x, 64)
x = decoder1(x, skip_1)
outputs1 = output_block(x)
x = inputs * outputs1
x, skip_2 = encoder2(x)
x = ASPP(x, 64)
x = decoder2(x, skip_1, skip_2)
outputs2 = output_block(x)
outputs = Concatenate()([outputs1, outputs2])
model = Model(inputs, outputs)
return model
if __name__ == "__main__":
model = build_model((256, 256, 3))
model.summary()
Trace error:
ValueError: in user code:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:855 train_function *
return step_function(self, iterator)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:845 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:1285 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:2833 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:3608 _call_for_each_replica
return fn(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:838 run_step **
outputs = model.train_step(data)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:800 train_step
self.compiled_metrics.update_state(y, y_pred, sample_weight)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/compile_utils.py:460 update_state
metric_obj.update_state(y_t, y_p, sample_weight=mask)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/metrics_utils.py:86 decorated
update_op = update_state_fn(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/metrics.py:177 update_state_fn
return ag_update_state(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/metrics.py:1462 update_state **
sample_weight=sample_weight)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/metrics_utils.py:366 update_confusion_matrix_variables
y_pred.shape.assert_is_compatible_with(y_true.shape)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/tensor_shape.py:1161 assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (None, 256, 256, 2) and (None, 256, 256, 1) are incompatible
I dont know what this error is referring to and at what point.
I am developing a image classification model. I have my input shape of image as (128,128,3) but when I am running the model.fit it is giving an error.
My input data is
real_data = [f for f in os.listdir(data_dir+'/test') if f.endswith('.png')]
fake_data = [f for f in os.listdir(data_dir+'/test_f') if f.endswith('.png')]
print(real_data)
X = []
Y = []
for img in real_data:
X.append(img_to_array(load_img(data_dir+'/test/'+img)) / 255.0)
Y.append(1)
for img in fake_data:
X.append(img_to_array(load_img(data_dir+'/test_f/'+img)) / 255.0)
Y.append(0)
Y_val_org = Y
X = np.array(X)
Y = to_categorical(Y, 2)
print(X)
print(Y)
My model is
model = Sequential()
model.add(Conv2D(16, kernel_size=(3,3), activation='relu',input_shape=(128,128,3)))
model.add(Conv2D(16, kernel_size=(3,3), activation='relu'))
model.add(Dense(units=3, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.Adam(lr=1e-5, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),
metrics=['accuracy'])
#model.build(input_shape=(128,128,3))
model.summary()
And model summary is
Model: "sequential_80"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_892 (Conv2D) (None, 126, 126, 16) 448
_________________________________________________________________
conv2d_893 (Conv2D) (None, 124, 124, 16) 2320
_________________________________________________________________
dense_48 (Dense) (None, 124, 124, 3) 51
=================================================================
Total params: 2,819
Trainable params: 2,819
Non-trainable params: 0
_________________________________________________________________
When I am fitting the model through model.fit()
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=2, mode='auto')
EPOCHS = 20
BATCH_SIZE = 100
history = model.fit(X_train, Y_train, batch_size = BATCH_SIZE, epochs = EPOCHS, validation_data = (X_val, Y_val))
This is the error I am getting
Epoch 1/20
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-168-b3e2ed37ed88> in <module>()
2 EPOCHS = 20
3 BATCH_SIZE = 100
----> 4 history = model.fit(X_train, Y_train, batch_size = BATCH_SIZE, epochs = EPOCHS, validation_data = (X_val, Y_val))
9 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
975 except Exception as e: # pylint:disable=broad-except
976 if hasattr(e, "ag_error_metadata"):
--> 977 raise e.ag_error_metadata.to_exception(e)
978 else:
979 raise
ValueError: in user code:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:805 train_function *
return step_function(self, iterator)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:795 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:788 run_step **
outputs = model.train_step(data)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:756 train_step
y, y_pred, sample_weight, regularization_losses=self.losses)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/compile_utils.py:203 __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/losses.py:152 __call__
losses = call_fn(y_true, y_pred)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/losses.py:256 call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/losses.py:1608 binary_crossentropy
K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/backend.py:4979 binary_crossentropy
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/ops/nn_impl.py:174 sigmoid_cross_entropy_with_logits
(logits.get_shape(), labels.get_shape()))
ValueError: logits and labels must have the same shape ((None, 124, 124, 3) vs (None, 2))
Change your model into:
model.add(Conv2D(16, kernel_size=(3,3), activation='relu'))
model.add(Flatten()) # added flatten before dense
model.add(Dense(units=2, activation='softmax'))
Last output should be 2 units because you have 2 classes. Also change your loss to:
loss='categorical_crossentropy'
because you applied to_categorical().
I am trying to create an image processing CNN. I am using VGG16 to speed up some of the learning process. The creation of my CNN below works to the point of training and saving the model & weights. The issue occurs when I try to run a predict function after loading in the model.
image_gen = ImageDataGenerator()
train = image_gen.flow_from_directory('./data/train', class_mode='categorical', shuffle=False, batch_size=10, target_size=(151, 136))
val = image_gen.flow_from_directory('./data/validate', class_mode='categorical', shuffle=False, batch_size=10, target_size=(151, 136))
pretrained_model = VGG16(include_top=False, input_shape=(151, 136, 3), weights='imagenet')
pretrained_model.summary()
vgg_features_train = pretrained_model.predict(train)
vgg_features_val = pretrained_model.predict(val)
train_target = to_categorical(train.labels)
val_target = to_categorical(val.labels)
model = Sequential()
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='categorical_crossentropy')
target_dir = './models/weights-improvement'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
checkpoint = ModelCheckpoint(filepath=target_dir + 'weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5', monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
model.fit(vgg_features_train, train_target, epochs=100, batch_size=8, validation_data=(vgg_features_val, val_target), callbacks=callbacks_list)
model.save('./models/model')
model.save_weights('./models/weights')
I have this predict function, that I would like to load in an image, and then return the categorisation of this image that the model gives.
from keras.preprocessing.image import load_img, img_to_array
def predict(file):
x = load_img(file, target_size=(151,136,3))
x = img_to_array(x)
print(x.shape)
print(x.shape)
x = np.expand_dims(x, axis=0)
array = model.predict(x)
result = array[0]
if result[0] > result[1]:
if result[0] > 0.9:
print("Predicted answer: Buy")
answer = 'buy'
print(result)
print(array)
else:
print("Predicted answer: Not confident")
answer = 'n/a'
print(result)
else:
if result[1] > 0.9:
print("Predicted answer: Sell")
answer = 'sell'
print(result)
else:
print("Predicted answer: Not confident")
answer = 'n/a'
print(result)
return answer
The issue I am experiencing is when I run this predict function, I get the following error.
File "predict-binary.py", line 24, in predict
array = model.predict(x)
File ".venv\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1629, in predict
tmp_batch_outputs = self.predict_function(iterator)
File ".venv\lib\site-packages\tensorflow\python\eager\def_function.py", line 828, in __call__
result = self._call(*args, **kwds)
File ".venv\lib\site-packages\tensorflow\python\eager\def_function.py", line 871, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File ".venv\lib\site-packages\tensorflow\python\eager\def_function.py", line 725, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File ".venv\lib\site-packages\tensorflow\python\eager\function.py", line 2969, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File ".venv\lib\site-packages\tensorflow\python\eager\function.py", line 3361, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File ".venv\lib\site-packages\tensorflow\python\eager\function.py", line 3196, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File ".venv\lib\site-packages\tensorflow\python\framework\func_graph.py", line 990, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File ".venv\lib\site-packages\tensorflow\python\eager\def_function.py", line 634, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File ".venv\lib\site-packages\tensorflow\python\framework\func_graph.py", line 977, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
.venv\lib\site-packages\tensorflow\python\keras\engine\training.py:1478 predict_function *
return step_function(self, iterator)
.venv\lib\site-packages\tensorflow\python\keras\engine\training.py:1468 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
.venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
.venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
.venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
.venv\lib\site-packages\tensorflow\python\keras\engine\training.py:1461 run_step **
outputs = model.predict_step(data)
.venv\lib\site-packages\tensorflow\python\keras\engine\training.py:1434 predict_step
return self(x, training=False)
.venv\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:1012 __call__
outputs = call_fn(inputs, *args, **kwargs)
.venv\lib\site-packages\tensorflow\python\keras\engine\sequential.py:375 call
return super(Sequential, self).call(inputs, training=training, mask=mask)
.venv\lib\site-packages\tensorflow\python\keras\engine\functional.py:424 call
return self._run_internal_graph(
.venv\lib\site-packages\tensorflow\python\keras\engine\functional.py:560 _run_internal_graph
outputs = node.layer(*args, **kwargs)
.venv\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:998 __call__
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
.venv\lib\site-packages\tensorflow\python\keras\engine\input_spec.py:255 assert_input_compatibility
raise ValueError(
ValueError: Input 0 of layer dense is incompatible with the layer: expected axis -1 of input shape to have value 8192 but received input with shape (None, 61608)
I'm assuming I need to change something between the Flatten() and Dense() layers of my model, but I'm not sure what. I attempted to add model.add(Dense(61608, activation='relu)) between these two as that seemed to be what was suggested in another post I saw (cannot find link now), but it lead to the same error. (I tried it with 8192 instead of 61608 as well). Any help is appreciated, thanks.
EDIT #1:
Changing the model creation/training code as I think it was suggested by Gerry P to this
img_shape = (151,136,3)
base_model=VGG19( include_top=False, input_shape=img_shape, pooling='max', weights='imagenet' )
x=base_model.output
x=Dense(100, activation='relu')(x)
x=Dropout(0.5)(x)
x=BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
output=Dense(2, activation='softmax')(x)
model=Model(inputs=base_model.input, outputs=output)
image_gen = ImageDataGenerator()
train = image_gen.flow_from_directory('./data/train', class_mode='categorical', shuffle=False, batch_size=10, target_size=(151, 136))
val = image_gen.flow_from_directory('./data/validate', class_mode='categorical', shuffle=False, batch_size=10, target_size=(151, 136))
vgg_features_train = base_model.predict(train)
vgg_features_val = base_model.predict(val)
train_target = to_categorical(train.labels)
val_target = to_categorical(val.labels)
model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='categorical_crossentropy')
model.fit(vgg_features_train, train_target, epochs=100, batch_size=8, validation_data=(vgg_features_val, val_target), callbacks=callbacks_list)
This resulted in a different input shape error of File "train-binary.py", line 37, in <module> model.fit(vgg_features_train, train_target, epochs=100, batch_size=8, validation_data=(vgg_features_val, val_target), callbacks=callbacks_list) ValueError: Input 0 is incompatible with layer model: expected shape=(None, 151, 136, 3), found shape=(None, 512)
your model is expecting to see an input for model.predict that has the same dimensions as it was trained on. In this case it is the dimensions of vgg_features_train.The input to model.predict that you are generating is for the input to the VGG model. You are essentially trying to do transfer learning so I suggest you proceed as below
base_model=tf.keras.applications.VGG19( include_top=False, input_shape=img_shape, pooling='max', weights='imagenet' )
x=base_model.output
x=Dense(100, activation='relu'))(x)
x=Dropout(0.5)(x)
x=BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
output=Dense(2, activation='softmax')(x)
model=Model(inputs=base_model.input, outputs=output)
model.fit( train, epochs=100, batch_size=8, validation_data=val, callbacks=callbacks_list)
now for prediction you can use the same dimensions as you used to train the model.
I'm trying to construct a 2D CNN neural network, looking at this code: https://kgptalkie.com/human-activity-recognition-using-accelerometer-data/. I have four arrtibutes and nine classes
X_train[0].shape, X_test[0].shape
((200, 4), (200, 4))
X_train = X_train.reshape(3104, 200, 4, 1)
X_test = X_test.reshape(776, 200, 4, 1)
X_train[0].shape, X_test[0].shape
((200, 4, 1), (200, 4, 1))
model = Sequential()
model.add(Conv2D(16, (2, 2), activation = 'relu', input_shape = X_train[0].shape))
model.add(Dropout(0.1))
model.add(Conv2D(32, (2, 2), activation='relu'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(64, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(9, activation='softmax'))
model.compile(optimizer=Adam(learning_rate = 0.001), loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
history = model.fit(X_train, y_train, epochs = 10, validation_data= (X_test, y_test), verbose=1)
I'm finding this error:
ValueError Traceback (most recent call last)
<ipython-input-42-d7e8ba9ba93b> in <module>()
1 model.compile(optimizer=Adam(learning_rate = 0.001), loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
----> 2 history = model.fit(X_train, y_train, epochs = 10, validation_data= (X_test, y_test), verbose=1)
10 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
ValueError: in user code:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:806 train_function *
return step_function(self, iterator)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:796 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2945 _call_for_each_replica
return fn(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:789 run_step **
outputs = model.train_step(data)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:749 train_step
y, y_pred, sample_weight, regularization_losses=self.losses)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/compile_utils.py:204 __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/losses.py:149 __call__
losses = ag_call(y_true, y_pred)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/losses.py:253 call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/losses.py:1567 sparse_categorical_crossentropy
y_true, y_pred, from_logits=from_logits, axis=axis)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py:4783 sparse_categorical_crossentropy
labels=target, logits=output)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_ops.py:4176 sparse_softmax_cross_entropy_with_logits_v2
labels=labels, logits=logits, name=name)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_ops.py:4091 sparse_softmax_cross_entropy_with_logits
logits.get_shape()))
ValueError: Shape mismatch: The shape of labels (received (288,)) should equal the shape of logits except for the last dimension (received (32, 6)).
Could you help me please?
I resolved the problem changing the loss mode from 'sparse_categorical_crossentropy' to loss=tf.keras.losses.KLDivergence()