Related
I am building a TensorFlow model to predict ranks from text data.
I have attached the vectorizer to the model as a layer and based on that I specified
the shape of the input as (1, )
the data type as a string
below is the entire model structure but my main focus is on the first model full rnn model
vec = tf.keras.layers.TextVectorization(max_tokens=MAX_VOCAB_LENGTH // 2, standardize='lower', pad_to_max_tokens=True, output_sequence_length=MAX_LENGTH, name='source_vec_layer') # ngrams=NGRAMS,
vec.adapt(df.source.to_numpy())
# full rnn model
inputs = tf.keras.layers.Input(shape=(1, ), dtype=tf.string)
hidden = vec (inputs)
hidden = tf.keras.layers.Embedding(
input_dim=MAX_VOCAB_LENGTH // 2,
output_dim=EMMPIDING_OUTPUT_DIM,
input_length=MAX_LENGTH,
embeddings_initializer=tf.keras.initializers.HeNormal(RANDOM_SEED),
name='rnn_emmbd_layer'
) (hidden)
hidden = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(256, return_sequences=True)) (hidden)
hidden = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(256, return_sequences=True)) (hidden)
hidden = tf.keras.layers.Dropout(0.3) (hidden)
hidden = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(256, return_sequences=True)) (hidden)
hidden = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(256, return_sequences=True)) (hidden)
hidden = tf.keras.layers.Dropout(0.3) (hidden)
hidden = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(256, return_sequences=True)) (hidden)
hidden = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(256, return_sequences=True)) (hidden)
hidden = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(128)) (hidden)
hidden = tf.keras.layers.Dropout(0.3) (hidden)
outputs = tf.keras.layers.Dense(128, activation = 'tanh') (hidden)
source_model = tf.keras.Model(inputs=inputs, outputs=outputs)
# second fully connected model on the generated columns
inputs = tf.keras.layers.Input(shape=(GENERATED_COLUMNS_COUNT, ), dtype=tf.float16)
hidden = tf.keras.layers.Dense(128, activation = 'tanh') (inputs)
hidden = tf.keras.layers.Dense(128, activation = 'tanh') (hidden)
hidden = tf.keras.layers.BatchNormalization()(hidden)
hidden = tf.keras.layers.Dropout(0.2) (hidden)
hidden = tf.keras.layers.Dense(128, activation = 'tanh')(hidden)
hidden = tf.keras.layers.Dense(128, activation = 'tanh')(hidden)
hidden = tf.keras.layers.BatchNormalization()(hidden)
hidden = tf.keras.layers.Dropout(0.2)(hidden)
outputs = tf.keras.layers.Dense(128, activation = 'tanh')(hidden)
dense_model = tf.keras.Model(inputs=inputs, outputs=outputs)
concat = tf.keras.layers.Concatenate()([source_model.output, dense_model.output])
concat = tf.squeeze(tf.keras.layers.Dense(1) (concat), axis=-1)
model = tf.keras.Model(
inputs=[source_model.input, dense_model.input],
outputs=concat
)
model.compile(
optimizer=tf.keras.optimizers.Nadam(learning_rate=0.01),
loss=tfr.keras.losses.SoftmaxLoss(),
metrics=[
'mse',
'mae',
tfr.keras.metrics.get("ndcg", name="ndcg"),
tfr.keras.metrics.get(key="mrr", name="mrr"),
tfa.metrics.KendallsTau(actual_max=MAX_NB_CELL_COUNT)#, preds_max=MAX_NB_CELL_COUNT,)
]
)
Then I created the Tensorflow datasets to load the data into the model
splitter = GroupShuffleSplit(n_splits=1, test_size=VALIDATION_RATIO, random_state=RANDOM_SEED)
# Split, keeping notebooks with a common origin (ancestor_id) together
ids = df.index.unique('id')
ancestors = df_ancestors.loc[ids, 'ancestor_id']
ids_train, ids_valid = next(splitter.split(ids, groups=ancestors))
ids_train, ids_valid = ids[ids_train], ids[ids_valid]
df_train = df.loc[ids_train, :].reset_index(drop=True)
df_valid = df.loc[ids_valid, :].reset_index(drop=True)
xtrain = tf.data.Dataset.from_tensor_slices((df_train['source'].astype(str), df_train.drop(['rank', 'source'], axis=1)))
ytrain = tf.data.Dataset.from_tensor_slices(df_train['rank'].astype(float))
print(xtrain, ytrain, '\n')
train_dataset = tf.data.Dataset.zip((xtrain, ytrain)).shuffle(SHUFFLE_SIZE).batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE)
print(train_dataset, '\n\n')
xvalid = tf.data.Dataset.from_tensor_slices((df_valid['source'].astype(str), df_valid.drop(['rank', 'source'], axis=1)))
yvalid = tf.data.Dataset.from_tensor_slices(df_valid['rank'].astype(float))
print(xvalid, yvalid, '\n')
validation_dataset = tf.data.Dataset.zip((xvalid, yvalid)).shuffle(SHUFFLE_SIZE).batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE)
print(validation_dataset, '\n\n')
callbacks = [tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_delta=0.001, min_lr=0.0001, mode='min'),]
# callbacks += [tf.keras.callbacks.ModelCheckpoint('best_model', monitor='val_loss', verbose=0, save_best_only=True, mode='min', save_format='tf')]
# callbacks += [tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.001, patience=10, restore_best_weights=True, mode='min')]
history = model.fit(
train_dataset,
validation_data=validation_dataset,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
callbacks=callbacks,
use_multiprocessing=True,
workers=PROCESSORS_COUNT,
max_queue_size= PROCESSORS_COUNT*3,
)
When I started the fitting process the below error start raising.
<TensorSliceDataset element_spec=(TensorSpec(shape=(), dtype=tf.string, name=None), TensorSpec(shape=(13,), dtype=tf.int64, name=None))> <TensorSliceDataset element_spec=TensorSpec(shape=(), dtype=tf.float64, name=None)>
<PrefetchDataset element_spec=((TensorSpec(shape=(None,), dtype=tf.string, name=None), TensorSpec(shape=(None, 13), dtype=tf.int64, name=None)), TensorSpec(shape=(None,), dtype=tf.float64, name=None))>
<TensorSliceDataset element_spec=(TensorSpec(shape=(), dtype=tf.string, name=None), TensorSpec(shape=(13,), dtype=tf.int64, name=None))> <TensorSliceDataset element_spec=TensorSpec(shape=(), dtype=tf.float64, name=None)>
<PrefetchDataset element_spec=((TensorSpec(shape=(None,), dtype=tf.string, name=None), TensorSpec(shape=(None, 13), dtype=tf.int64, name=None)), TensorSpec(shape=(None,), dtype=tf.float64, name=None))>
Epoch 1/10
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Input In [56], in <cell line: 31>()
27 callbacks = [tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_delta=0.001, min_lr=0.0001, mode='min'),]
28 # callbacks += [tf.keras.callbacks.ModelCheckpoint('best_model', monitor='val_loss', verbose=0, save_best_only=True, mode='min', save_format='tf')]
29 # callbacks += [tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.001, patience=10, restore_best_weights=True, mode='min')]
---> 31 history = model.fit(
32 train_dataset,
33 validation_data=validation_dataset,
34 epochs=EPOCHS,
35 batch_size=BATCH_SIZE,
36 callbacks=callbacks,
37 use_multiprocessing=True,
38 workers=PROCESSORS_COUNT,
39 max_queue_size= PROCESSORS_COUNT*3,
40 )
42 pd.DataFrame(history.history).plot()
43 plt.show()
File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\utils\traceback_utils.py:67, in filter_traceback.<locals>.error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
File ~\AppData\Local\Temp\__autograph_generated_fileunnzlwaq.py:15, in outer_factory.<locals>.inner_factory.<locals>.tf__train_function(iterator)
13 try:
14 do_return = True
---> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
16 except:
17 do_return = False
File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow_ranking\python\keras\losses.py:557, in SoftmaxLoss.__call__(self, y_true, y_pred, sample_weight)
555 def __call__(self, y_true, y_pred, sample_weight=None):
556 """See _RankingLoss."""
--> 557 losses, sample_weight = self._loss.compute_per_list(
558 y_true, y_pred, sample_weight)
559 return losses_utils.compute_weighted_loss(
560 losses, sample_weight, reduction=self._get_reduction())
File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow_ranking\python\losses_impl.py:1045, in SoftmaxLoss.compute_per_list(self, labels, logits, weights, mask)
1042 # As opposed to the other listwise losses, SoftmaxLoss returns already
1043 # squeezed losses, which can be returned directly.
1044 logits = self.get_logits(logits)
-> 1045 labels, logits = self.precompute(labels, logits, weights, mask)
1046 return self._compute_unreduced_loss_impl(labels, logits, mask)
File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow_ranking\python\losses_impl.py:987, in SoftmaxLoss.precompute(self, labels, logits, weights, mask)
985 if mask is None:
986 mask = utils.is_label_valid(labels)
--> 987 ranks = _compute_ranks(logits, mask)
988 # Reset the masked labels to 0 and reset the masked logits to a logit with
989 # ~= 0 contribution in softmax.
990 labels = tf.compat.v1.where(mask, labels, tf.zeros_like(labels))
File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow_ranking\python\losses_impl.py:392, in _compute_ranks(logits, is_valid)
380 def _compute_ranks(logits, is_valid):
381 """Computes ranks by sorting valid logits.
382
383 Args:
(...)
390 The `ranks` Tensor.
391 """
--> 392 _check_tensor_shapes([logits, is_valid])
393 # Only sort entries with is_valid = True.
394 scores = tf.compat.v1.where(
395 is_valid, logits, -1e-6 * tf.ones_like(logits) +
396 tf.reduce_min(input_tensor=logits, axis=1, keepdims=True))
File ~\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow_ranking\python\losses_impl.py:40, in _check_tensor_shapes(tensors)
38 for tensor in tensors:
39 tensor = tf.convert_to_tensor(value=tensor)
---> 40 tensor.get_shape().assert_has_rank(2)
41 tensor.get_shape().assert_is_compatible_with(
42 tf.convert_to_tensor(value=tensors[0]).get_shape())
ValueError: in user code:
File "C:\Users\yazee\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\engine\training.py", line 1051, in train_function *
return step_function(self, iterator)
File "C:\Users\yazee\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\engine\training.py", line 1040, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\yazee\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\engine\training.py", line 1030, in run_step **
outputs = model.train_step(data)
File "C:\Users\yazee\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\engine\training.py", line 890, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "C:\Users\yazee\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\engine\training.py", line 948, in compute_loss
return self.compiled_loss(
File "C:\Users\yazee\AppData\Local\Programs\Python\Python39\lib\site-packages\keras\engine\compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "C:\Users\yazee\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow_ranking\python\keras\losses.py", line 557, in __call__
losses, sample_weight = self._loss.compute_per_list(
File "C:\Users\yazee\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow_ranking\python\losses_impl.py", line 1045, in compute_per_list
labels, logits = self.precompute(labels, logits, weights, mask)
File "C:\Users\yazee\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow_ranking\python\losses_impl.py", line 987, in precompute
ranks = _compute_ranks(logits, mask)
File "C:\Users\yazee\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow_ranking\python\losses_impl.py", line 392, in _compute_ranks
_check_tensor_shapes([logits, is_valid])
File "C:\Users\yazee\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow_ranking\python\losses_impl.py", line 40, in _check_tensor_shapes
tensor.get_shape().assert_has_rank(2)
ValueError: Shape (None,) must have rank 2
One thing to point out is that I have started to work on this as a prediction problem with the loss as MAE and the code was running amazingly. However, when I moved to rank I changed the loss to use tfr.keras.losses.SoftmaxLoss().
The tf-dataset shape for the source column clearly specifies that the source data doesn't have any rank/shape since it is one-dimensional. I have tried to convert it to DataFrame and to expand some dimensions and am stuck with the same error every time.
I am trying to perform semantic segmentation by using the U-Net architecture.
When I fit the model:
history = model.fit(imgs_train,
masks_train,
batch_size= 5,
epochs = 5)
I keep getting the error below.
imgs_train.shape: (1500, 256, 256, 3)
masks_train.shape: (1500, 256, 256, 3)
# Building Unet using encoder and decoder blocks
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, concatenate, Conv2DTranspose,
BatchNormalization, Dropout, Lambda
from keras.layers import Activation, MaxPool2D, Concatenate
def conv_block(input, num_filters=64):
# first conv layer
x = Conv2D(num_filters, kernel_size = (3,3), padding='same')(input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# second conv layer
x = Conv2D(num_filters, kernel_size= (3,3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def encoder_block(input, num_filters=64):
# conv block
x = conv_block(input,num_filters)
# maxpooling
p = MaxPool2D(strides = (2,2))(x)
p = Dropout(0.4)(p)
return x,p
def decoder_block(input, skip_features, num_filters=64):
x = Conv2DTranspose(num_filters, (2,2), strides=2, padding='same')(input)
x = Concatenate()([x, skip_features])
x = conv_block(x, num_filters)
return x
num_classes=7
def unet_architect(input_shape=(256,256,3)):
""" Input Layer """
inputs = Input(input_shape)
""" Encoder """
s1,p1 = encoder_block(inputs, 64)
s2,p2 = encoder_block(p1,128)
s3,p3 = encoder_block(p2, 256)
s4,p4 = encoder_block(p3, 512)
""" Bridge """
b1 = conv_block(p4,1024)
""" Decoder """
d1 = decoder_block(b1, s4, 512)
d2 = decoder_block(d1, s3, 256)
d3 = decoder_block(d2, s2, 128)
d4 = decoder_block(d3, s1, 64)
""" Output Layer """
outputs = Conv2D(num_classes, (1,1), padding='same', activation = 'softmax')(d4)
model = Model(inputs, outputs, name='U-Net')
return model
model = unet_architect()
model.compile(optimizer = 'adam' ,
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
I tried to change sparse_categorical_crossentropy to categorical_cross_entropy, another error shows up.
And when I change the batch size while fitting the model, the logits.shape and labels.shape changes accordingly.
ERROR
Epoch 1/5
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-35-4c0704d8f65d> in <module>()
2 masks_train,
3 batch_size= 10,
----> 4 epochs = 5)
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1145 except Exception as e: # pylint:disable=broad-except
1146 if hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e)
1148 else:
1149 raise
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 860, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 919, in compute_loss
y, y_pred, sample_weight, regularization_losses=self.losses)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 141, in __call__
losses = call_fn(y_true, y_pred)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 245, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 1863, in sparse_categorical_crossentropy
y_true, y_pred, from_logits=from_logits, axis=axis)
File "/usr/local/lib/python3.7/dist-packages/keras/backend.py", line 5203, in sparse_categorical_crossentropy
labels=target, logits=output)
ValueError: `labels.shape` must equal `logits.shape` except for the last dimension. Received: labels.shape=(1966080,) and logits.shape=(655360, 7)
NOTEBOOK LINK:https://github.com/Tamimi123600/Deep-Learning/blob/main/Image_Segmentation1.ipynb
Thanks in advance
Why are your mask image (GT targets) of shape 1500, 256, 256, 3, and not 1500, 256, 256? You have num_classes=7 so your GT images should have a single channel with values {0...6} representing the class of each pixel.
Please check how you load and process your target images -- the issue is there.
I am trying to create an autoencoder for 3d images and here is the model:
def create_encoder(width, height, depth):
x = Input(shape=(height, width, depth))
# Encoder
e_conv1 = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
pool1 = MaxPooling2D((2, 2), padding='same')(e_conv1)
batchnorm_1 = BatchNormalization()(pool1)
e_conv2 = Conv2D(32, (3, 3), activation='relu', padding='same')(batchnorm_1)
pool2 = MaxPooling2D((2, 2), padding='same')(e_conv2)
batchnorm_2 = BatchNormalization()(pool2)
e_conv3 = Conv2D(16, (3, 3), activation='relu', padding='same')(batchnorm_2)
h = MaxPooling2D((2, 2), padding='same')(e_conv3)
# Decoder
d_conv1 = Conv2D(64, (3, 3), activation='relu', padding='same')(h)
up1 = UpSampling2D((2, 2))(d_conv1)
d_conv2 = Conv2D(32, (3, 3), activation='relu', padding='same')(up1)
up2 = UpSampling2D((2, 2))(d_conv2)
d_conv3 = Conv2D(16, (3, 3), activation='relu')(up2)
up3 = UpSampling2D((2, 2))(d_conv3)
r = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(up3)
model = Model(x, r)
model.compile(optimizer='adam', loss='mse')
return model
But whenever I try to run the code:
width = 512
height = 512
depth = 3
EPOCHS = 100
BS = 128
autoencoder = create_encoder(width, height, depth)
earlystop = EarlyStopping(monitor='loss', patience=3)
H = autoencoder.fit(trainXNoisy, trainX, validation_data=(testXNoisy, testX), epochs=EPOCHS, batch_size=BS, callbacks=[earlystop])
I get this error:
Epoch 1/100
/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/adam.py:105: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead.
super(Adam, self).__init__(name, **kwargs)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-91-48e3c96bc5fd> in <module>()
12 earlystop = EarlyStopping(monitor='loss', patience=3)
13 # H = autoencoder.fit(trainXNoisy, trainX, validation_data=(testXNoisy, testX), epochs=EPOCHS, batch_size=BS, callbacks=[earlystop])
---> 14 H = autoencoder.fit(trainXNoisy, trainX, epochs=EPOCHS, batch_size=BS, callbacks=[earlystop])
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1127 except Exception as e: # pylint:disable=broad-except
1128 if hasattr(e, "ag_error_metadata"):
-> 1129 raise e.ag_error_metadata.to_exception(e)
1130 else:
1131 raise
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 878, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 867, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 860, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 810, in train_step
y, y_pred, sample_weight, regularization_losses=self.losses)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 141, in __call__
losses = call_fn(y_true, y_pred)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 245, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 1204, in mean_squared_error
return backend.mean(tf.math.squared_difference(y_pred, y_true), axis=-1)
ValueError: Dimensions must be equal, but are 508 and 512 for '{{node mean_squared_error/SquaredDifference}} = SquaredDifference[T=DT_FLOAT](model_8/conv2d_85/Sigmoid, IteratorGetNext:1)' with input shapes: [?,508,508,3], [?,512,512,3].
I am a newbie to autoencoders (maybe to the whole machine learning world) but I tried to get information from other similar problems but I couldn't. If any one could give a better a version, it'll be way better.
Thanks,
The line
d_conv3 = Conv2D(16, (3, 3), activation='relu')(up2)
miss the padding argument. Therefore instead of a 256 X 256 X 16 output you get a 254 X 254 X 16, which becomes a 508 X 508 X 16 after the upsampling, and finaly a 508 X 508 X 3 after the last Conv2D
MSE Error needs to compare two images of the same size, and the input is a 512 X 512 X 3. Juste add the padding argument as you do in the other Conv2D and it should work just fine
d_conv3 = Conv2D(16, (3, 3), activation='relu', padding='same')(up2)
OS: Manjaro Linux x64
CUDA: 11.0.3
TF/Keras: 2.4(.1)
Py: 3.8
Hello,
I'm trying to build some kind of W-VAE-GAN. I keep un running into that very same error over and over again and I already had that problem with Keras 2.3, interestingly NOT using TF/K 2.2. Unfortunately I need to use Keras 2.4 because I'm supposed to run my code on our university server with these exact TF/K and CUDA-Versions. At this point I'm just trying to make sure my code works as intended.
The error I get is the following, with the exact lines commented in my code:
...
Epoch 1/20
Traceback (most recent call last):
File "/home/peer/Programmierkram/BA/Metal_GAN/wvaegan.py", line 282, in <module>
gan.fit(gen_trans, batch_size=batch_size, epochs=epochs, callbacks=[callback])
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py", line 1100, in fit
tmp_logs = self.train_function(iterator)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 828, in __call__
result = self._call(*args, **kwds)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 871, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 725, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 2969, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 3361, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 3196, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 990, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 634, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 977, in wrapper
raise e.ag_error_metadata.to_exception(e)
tensorflow.python.framework.errors_impl.OperatorNotAllowedInGraphError: in user code:
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:805 train_function *
return step_function(self, iterator)
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:795 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:788 run_step **
outputs = model.train_step(data)
/home/peer/Programmierkram/BA/Metal_GAN/wvaegan.py:190 train_step
z_mean, z_log_var, z = self.encoder(clip_img) # <------ WHY NO ERROR HERE?
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:505 __iter__
self._disallow_iteration()
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:498 _disallow_iteration
self._disallow_when_autograph_enabled("iterating over `tf.Tensor`")
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:474 _disallow_when_autograph_enabled
raise errors.OperatorNotAllowedInGraphError(
OperatorNotAllowedInGraphError: iterating over `tf.Tensor` is not allowed: AutoGraph did convert this function. This might indicate you are trying to use an unsupported feature.
What I do not understand is how it comes to this error in the first place, as I successfully executed the VAE-part using TF2.2 without any error like this, and, more importantly, there is no iteration over any tensor obvious to me. Even if I commented out the for-loop in my train_step the same error occurs few lines later in the same context. I have also tried decorating the def train_step() with #tf.function, yet nothing changed.
The code I used is the following:
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow.keras.backend as K
from keras.preprocessing.image import ImageDataGenerator
import itertools
import scipy.io
import matplotlib.pyplot as plt
import matplotlib.image as PIL
runOnGPU = 0
if runOnGPU==1:
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
path_clipped_train = os.path.join('./sinograms/clip')
path_transparent_train = os.path.join('./sinograms/transparent')
img_width, img_height = 512, 512
bottleneck = 1024 * 2
filters = (1024, 512, 256, 64)
filter_size = (3, 3, 3, 3)
batch_size = 4
epochs = 20
dsc_steps = 1
gp_w = 10.0
beta_v = 2
learning_rate = 125e-5
latent_dim = 2
input_shape = (1, img_width, img_height, 1)
dataset_gen1 = ImageDataGenerator(rescale=1 / 255, dtype="float32")
dataset_gen2 = ImageDataGenerator(rescale=1 / 255, dtype="float32")
gen_trans = dataset_gen1.flow_from_directory(path_transparent_train,
target_size=(img_width, img_height),
color_mode='grayscale',
classes=[''],
class_mode=None,
batch_size=batch_size,
shuffle=False,
)
gen_clip = dataset_gen2.flow_from_directory(path_clipped_train,
target_size=(img_width, img_height),
color_mode='grayscale',
classes=[''],
class_mode=None,
batch_size=batch_size,
shuffle=False,
)
class Sampling(layers.Layer):
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
def get_encoder():
encoder_inputs = keras.Input(shape=input_shape[1:], name="encoder_input")
enc = encoder_inputs
for (numFilters, szFilters) in zip(filters, filter_size):
enc = layers.Conv2D(numFilters, szFilters, activation='relu', strides=2, padding='same')(enc)
enc = layers.BatchNormalization()(enc)
enc = layers.Dropout(0.2)(enc)
conv_shape = K.int_shape(enc)[1:]
enc = layers.Flatten()(enc)
enc = layers.Dense(bottleneck, activation='relu', name="bottleneck")(enc)
enc = layers.BatchNormalization()(enc)
z_mean = layers.Dense(latent_dim, name="z_mean")(enc)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(enc)
latent_z = Sampling()([z_mean, z_log_var])
encoder_model = keras.models.Model(encoder_inputs, latent_z, name="encoder")
return encoder_model, conv_shape
enc_model, conv_shape = get_encoder()
enc_model.summary()
def get_decoder():
latent_input = keras.Input(shape=(latent_dim,))
dec = layers.Dense(conv_shape[0] * conv_shape[1] * conv_shape[2], activation='relu')(latent_input)
dec = layers.Reshape(conv_shape)(dec)
for (numFilters, szFilters) in zip(reversed(filters), reversed(filter_size)):
dec = layers.Conv2DTranspose(numFilters, szFilters, activation='relu', strides=2, padding='same')(dec)
dec = layers.BatchNormalization()(dec)
dec = layers.Dropout(0.2)(dec)
decoder_outputs = layers.Conv2DTranspose(1, 3, activation='relu', padding='same')(dec)
decoder_model = keras.models.Model(latent_input, decoder_outputs, name="decoder")
return decoder_model
dec_model = get_decoder()
dec_model.summary()
def get_discriminator():
dscr_input = keras.Input(shape=input_shape[1:])
dscr = dscr_input
for numFilters in filters:
dscr = layers.Conv2D(numFilters, kernel_size=5, activation='relu', strides=2, padding='same')(dscr)
dscr = layers.Flatten()(dscr)
dscr = layers.Dense(1, activation="relu", name="dsc_end")(dscr)
discriminator_model = keras.models.Model(dscr_input, dscr, name="discriminator")
return discriminator_model
dsc_model = get_discriminator()
dsc_model.summary()
class GAN(keras.Model):
def __init__(self,
discriminator,
encoder,
decoder,
latent_dim,
dsc_steps=dsc_steps,
gp_w=gp_w,
):
super(GAN, self).__init__()
self.discriminator = discriminator
self.encoder = encoder
self.decoder = decoder
self.latent_dim = latent_dim
self.dsc_steps = dsc_steps
self.gp_w = gp_w
def compile(self,
dsc_optimizer, enc_optimizer, dec_optimizer,
dsc_loss_fn, enc_loss_fn, dec_loss_fn):
super(GAN, self).compile()
self.dsc_optimizer = dsc_optimizer
self.enc_optimizer = enc_optimizer
self.dec_optimizer = dec_optimizer
self.dsc_loss_fn = dsc_loss_fn
self.enc_loss_fn = enc_loss_fn
self.dec_loss_fn = dec_loss_fn
def call(self, data):
ds = self.discriminator(data)
e = self.encoder(data)
d = self.decoder(e)
def gradient_penalty(self, batch_size, ref_img, gen_img):
alpha = tf.random_normal([batch_size, 1, 1, 1], 0.0, 1.0)
diff = gen_img - ref_img
interpolated = ref_img + alpha * diff
with tf.GradientTape() as gp_tape:
gp_tape.watch(interpolated)
pred = self.discriminator(interpolated, training=True)
grads = gp_tape.gradient(pred, [interpolated])[0]
norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2, 3]))
gp = tf.reduce_mean((norm - 1.0) ** 2)
return gp
#tf.function # doesn't make any difference if decorating with that
def train_step(self, data):
trans_img = data
clip_img = data
batch_size = tf.shape(trans_img)[:1]
for i in range(self.dsc_steps):
with tf.GradientTape() as tape:
z_mean, z_log_var, z = self.encoder(clip_img) # <------ ERROR HERE
gen_img = self.decoder(z)
gen_logits = self.discriminator(gen_img)
ref_logits = self.discriminator(trans_img)
dsc_cost = self.dsc_loss_fn(ref_img=ref_logits, gen_img=gen_logits)
gp = self.gradient_penalty(batch_size, trans_img, gen_img)
dsc_loss = dsc_cost + gp * self.gp_w
dsc_gradient = tape.gradient(dsc_loss, self.discriminator.trainable_variables)
self.dsc_optimizer.apply_gradients(zip(dsc_gradient, self.discriminator.trainable_variables))
with tf.GradientTape() as tape:
z_mean, z_log_var, z = self.encoder(clip_img) # <------ ERROR ALSO HERE IF dsc_steps = 0
gen_img = self.decoder(z)
gen_img_logits = self.discriminator(gen_img)
dec_loss = self.dec_loss_fn(gen_img_logits)
kl_loss = self.kl_loss(z_mean, z_log_var)
enc_gradient = tape.gradient(kl_loss, self.encoder.trainable_variables)
self.enc_optimizer.apply_gradients(zip(enc_gradient, self.encoder.trainable_variables))
dec_gradient = tape.gradient(dec_loss, self.decoder.trainable_variables)
self.dec_optimizer.apply_gradients(zip(dec_gradient, self.decoder.trainable_variables))
return {"dsc_loss": dsc_loss, "KL-Loss": kl_loss, "dec_loss": dec_loss}
class GANMonitor(keras.callbacks.Callback):
def __init__(self, num_img=6, latent_dim=latent_dim):
self.num_img = num_img
self.latent_dim = latent_dim
def on_epoch_end(self, epoch, logs=None):
generated_images = self.model.decoder()
generated_images = (generated_images * 127.5) + 127.5
for i in range(self.num_img):
img = generated_images[i].np()
img = keras.preprocessing.image.array_to_img(img)
img.save("generated_img_{i}_{epoch}.png".format(i=i, epoch=epoch))
encoder_optimizer = keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5, beta_2=0.9)
decoder_optimizer = keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5, beta_2=0.9)
discriminator_optimizer = keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5, beta_2=0.9)
def discriminator_loss(real_img, fake_img):
real_loss = tf.reduce_mean(real_img)
fake_loss = tf.reduce_mean(fake_img)
return fake_loss - real_loss
def generator_loss(fake_img):
return -tf.reduce_mean(fake_img)
def kl_loss(z_mean, z_log_var):
kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))
return beta_v * kl_loss
def reconstruction_loss(data, reconstruction):
rec_loss = tf.reduce_mean(
tf.reduce_sum(keras.losses.mse(data, reconstruction), axis=(1, 2))
)
return rec_loss
callback = GANMonitor(num_img=3, latent_dim=latent_dim)
gan = GAN(
discriminator=dsc_model,
encoder=enc_model,
decoder=dec_model,
latent_dim=latent_dim,
dsc_steps=dsc_steps,
)
gan.compile(
dsc_optimizer=discriminator_optimizer,
enc_optimizer=encoder_optimizer,
dec_optimizer=decoder_optimizer,
dsc_loss_fn=discriminator_loss,
enc_loss_fn=kl_loss,
dec_loss_fn=generator_loss,
)
gan.fit(gen_trans, batch_size=batch_size, epochs=epochs, callbacks=[callback])
I'd be very thankful for any help because I haven't been able to find or work any solution to this. I've read that in TF2.5 some errors like this shouldn't occur anymore, but using TF2.5 is not an option.
I found the problem.
In the original Keras example for an VAE, the encoder ends in three layers, namely z_mean, z_log_var and latent_z. While it was possible to access all terminal layers of a model in TF 2.2, as I did in my train_step
z_mean, z_log_var, z = encoder(data)
only (lantent_) z is committed as defined in the encoder model initialization.
By defining the model with
encoder_model = keras.models.Model(encoder_inputs, ([z_mean, z_log_var, latent_z]), name="encoder")
with a list of output layers, all z* are accessible.
I assume that getting multiple variables from a single tensor with a singular output
x1, x2, x3 = model(data)
results in a loop that might look something like:
for i in x:
x{i} = model(data)
which is the only explaination for an iteration over a tensor I can think of.
However, reading code might be useful, I'll try to remember that.
I am trying to create an image processing CNN. I am using VGG16 to speed up some of the learning process. The creation of my CNN below works to the point of training and saving the model & weights. The issue occurs when I try to run a predict function after loading in the model.
image_gen = ImageDataGenerator()
train = image_gen.flow_from_directory('./data/train', class_mode='categorical', shuffle=False, batch_size=10, target_size=(151, 136))
val = image_gen.flow_from_directory('./data/validate', class_mode='categorical', shuffle=False, batch_size=10, target_size=(151, 136))
pretrained_model = VGG16(include_top=False, input_shape=(151, 136, 3), weights='imagenet')
pretrained_model.summary()
vgg_features_train = pretrained_model.predict(train)
vgg_features_val = pretrained_model.predict(val)
train_target = to_categorical(train.labels)
val_target = to_categorical(val.labels)
model = Sequential()
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='categorical_crossentropy')
target_dir = './models/weights-improvement'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
checkpoint = ModelCheckpoint(filepath=target_dir + 'weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5', monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
model.fit(vgg_features_train, train_target, epochs=100, batch_size=8, validation_data=(vgg_features_val, val_target), callbacks=callbacks_list)
model.save('./models/model')
model.save_weights('./models/weights')
I have this predict function, that I would like to load in an image, and then return the categorisation of this image that the model gives.
from keras.preprocessing.image import load_img, img_to_array
def predict(file):
x = load_img(file, target_size=(151,136,3))
x = img_to_array(x)
print(x.shape)
print(x.shape)
x = np.expand_dims(x, axis=0)
array = model.predict(x)
result = array[0]
if result[0] > result[1]:
if result[0] > 0.9:
print("Predicted answer: Buy")
answer = 'buy'
print(result)
print(array)
else:
print("Predicted answer: Not confident")
answer = 'n/a'
print(result)
else:
if result[1] > 0.9:
print("Predicted answer: Sell")
answer = 'sell'
print(result)
else:
print("Predicted answer: Not confident")
answer = 'n/a'
print(result)
return answer
The issue I am experiencing is when I run this predict function, I get the following error.
File "predict-binary.py", line 24, in predict
array = model.predict(x)
File ".venv\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1629, in predict
tmp_batch_outputs = self.predict_function(iterator)
File ".venv\lib\site-packages\tensorflow\python\eager\def_function.py", line 828, in __call__
result = self._call(*args, **kwds)
File ".venv\lib\site-packages\tensorflow\python\eager\def_function.py", line 871, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File ".venv\lib\site-packages\tensorflow\python\eager\def_function.py", line 725, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File ".venv\lib\site-packages\tensorflow\python\eager\function.py", line 2969, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File ".venv\lib\site-packages\tensorflow\python\eager\function.py", line 3361, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File ".venv\lib\site-packages\tensorflow\python\eager\function.py", line 3196, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File ".venv\lib\site-packages\tensorflow\python\framework\func_graph.py", line 990, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File ".venv\lib\site-packages\tensorflow\python\eager\def_function.py", line 634, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File ".venv\lib\site-packages\tensorflow\python\framework\func_graph.py", line 977, in wrapper
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
.venv\lib\site-packages\tensorflow\python\keras\engine\training.py:1478 predict_function *
return step_function(self, iterator)
.venv\lib\site-packages\tensorflow\python\keras\engine\training.py:1468 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
.venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
.venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
.venv\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
.venv\lib\site-packages\tensorflow\python\keras\engine\training.py:1461 run_step **
outputs = model.predict_step(data)
.venv\lib\site-packages\tensorflow\python\keras\engine\training.py:1434 predict_step
return self(x, training=False)
.venv\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:1012 __call__
outputs = call_fn(inputs, *args, **kwargs)
.venv\lib\site-packages\tensorflow\python\keras\engine\sequential.py:375 call
return super(Sequential, self).call(inputs, training=training, mask=mask)
.venv\lib\site-packages\tensorflow\python\keras\engine\functional.py:424 call
return self._run_internal_graph(
.venv\lib\site-packages\tensorflow\python\keras\engine\functional.py:560 _run_internal_graph
outputs = node.layer(*args, **kwargs)
.venv\lib\site-packages\tensorflow\python\keras\engine\base_layer.py:998 __call__
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
.venv\lib\site-packages\tensorflow\python\keras\engine\input_spec.py:255 assert_input_compatibility
raise ValueError(
ValueError: Input 0 of layer dense is incompatible with the layer: expected axis -1 of input shape to have value 8192 but received input with shape (None, 61608)
I'm assuming I need to change something between the Flatten() and Dense() layers of my model, but I'm not sure what. I attempted to add model.add(Dense(61608, activation='relu)) between these two as that seemed to be what was suggested in another post I saw (cannot find link now), but it lead to the same error. (I tried it with 8192 instead of 61608 as well). Any help is appreciated, thanks.
EDIT #1:
Changing the model creation/training code as I think it was suggested by Gerry P to this
img_shape = (151,136,3)
base_model=VGG19( include_top=False, input_shape=img_shape, pooling='max', weights='imagenet' )
x=base_model.output
x=Dense(100, activation='relu')(x)
x=Dropout(0.5)(x)
x=BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
output=Dense(2, activation='softmax')(x)
model=Model(inputs=base_model.input, outputs=output)
image_gen = ImageDataGenerator()
train = image_gen.flow_from_directory('./data/train', class_mode='categorical', shuffle=False, batch_size=10, target_size=(151, 136))
val = image_gen.flow_from_directory('./data/validate', class_mode='categorical', shuffle=False, batch_size=10, target_size=(151, 136))
vgg_features_train = base_model.predict(train)
vgg_features_val = base_model.predict(val)
train_target = to_categorical(train.labels)
val_target = to_categorical(val.labels)
model.compile(optimizer='rmsprop', metrics=['accuracy'], loss='categorical_crossentropy')
model.fit(vgg_features_train, train_target, epochs=100, batch_size=8, validation_data=(vgg_features_val, val_target), callbacks=callbacks_list)
This resulted in a different input shape error of File "train-binary.py", line 37, in <module> model.fit(vgg_features_train, train_target, epochs=100, batch_size=8, validation_data=(vgg_features_val, val_target), callbacks=callbacks_list) ValueError: Input 0 is incompatible with layer model: expected shape=(None, 151, 136, 3), found shape=(None, 512)
your model is expecting to see an input for model.predict that has the same dimensions as it was trained on. In this case it is the dimensions of vgg_features_train.The input to model.predict that you are generating is for the input to the VGG model. You are essentially trying to do transfer learning so I suggest you proceed as below
base_model=tf.keras.applications.VGG19( include_top=False, input_shape=img_shape, pooling='max', weights='imagenet' )
x=base_model.output
x=Dense(100, activation='relu'))(x)
x=Dropout(0.5)(x)
x=BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
output=Dense(2, activation='softmax')(x)
model=Model(inputs=base_model.input, outputs=output)
model.fit( train, epochs=100, batch_size=8, validation_data=val, callbacks=callbacks_list)
now for prediction you can use the same dimensions as you used to train the model.