I am trying to pass to my triplet network 3 images using my data generator. I am loading the different pairs and stacking them into batches. I don't know how can I return it back as 3 different arrays. I tried appending into a list, but that also didn't work. How can I use a data generator to return them back?
class DataGenerator(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, list_IDs, batch_size=16, dim=(244,244,3), n_channels=3, shuffle=True):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.list_IDs = list_IDs
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_IDs_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X,Z, y = self.__data_generation(list_IDs_temp)
return X, Z, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
# V = np.stack((X, Z), axis=-1)
# F = np.stack((V, y), axis=-1)
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, *self.dim))
Z = np.empty((self.batch_size, *self.dim))
y = np.empty((self.batch_size, *self.dim))
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
image = plt.imread(os.path.join(IMAGE_DIR, ID[0])).astype(np.float32)
image = imresize(image, (IM_SIZE, IM_SIZE))
image1 = plt.imread(os.path.join(IMAGE_DIR, ID[1])).astype(np.float32)
image1 = imresize(image1, (IM_SIZE, IM_SIZE))
image2 = plt.imread(os.path.join(IMAGE_DIR, ID[2])).astype(np.float32)
image2 = imresize(image2, (IM_SIZE, IM_SIZE))
X[i,] = image
Z[i,] = image1
y[i,] = image2
return X, Z, y
input_a = Input(shape=(224,224,3))
input_b = Input(shape=(224,224,3))
input_c = Input(shape=(224,224,3))
conv = Sequential([
Conv2D(24, (7, 7), strides=(1,1), input_shape=(224,224,3)),
BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9),
MaxPooling2D((3,3), strides=(2, 2)),
Activation('relu'),
Dropout(0.2),
ZeroPadding2D((2, 2)),
Conv2D(64, (5, 5), padding='same', strides=(1,1), kernel_initializer='glorot_uniform'),
BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9),
MaxPooling2D((3,3), strides=(2, 2)),
Activation('relu'),
Dropout(0.2),
ZeroPadding2D((1, 1)),
Conv2D(96, (3,3), padding='same', strides=(1,1),kernel_initializer='glorot_uniform'),
BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9),
MaxPool2D(pool_size=(2,2), strides=(2,2)),
Activation('relu'),
Dropout(0.2),
ZeroPadding2D((1, 1)),
Conv2D(96, (3,3), padding='same', strides=(1,1),kernel_initializer='glorot_uniform'),
BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9),
Activation('relu'),
MaxPool2D(pool_size=(2,2), strides=(2,2)),
Dropout(0.2),
ZeroPadding2D((1, 1)),
Conv2D(64, (5, 5), padding='same', strides=(1,1), kernel_initializer='glorot_uniform'),
BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9),
Activation('relu', name="activation_1_5"),
MaxPooling2D((3,3), strides=(2, 2)),
Dropout(0.2),
Dense(256, activation='relu'),
Flatten()
])
net1 = conv(input_a)
net2 = conv(input_b)
net3 = conv(input_c)
d1 = subtract(net1, net2)
d2 = subtract(net1, net3)
n1 = norm(d1)
n2 = norm(d2)
out = Activation('sigmoid')(subtract(n2, n1))
model = Model(inputs=[input_a, input_b, input_c], outputs=out)
params = {'dim': (224,224,3),
'batch_size': BATCH_SIZE,
'n_channels': 3,
'shuffle': False}
paramsv = {'dim': (224,224,3),
'batch_size': BATCH_SIZE,
'n_channels': 3,
'shuffle': True}
training_generator = DataGenerator(partition_image['train'], **params)
validation_generator = DataGenerator(partition_image['validation'], **paramsv)
opt = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, decay=1e-6)
filepath = 'weights/weights.{epoch:02d}-{val_loss:.2f}.hdf5'
cpkt1 = ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=True, mode='auto', period=1)
cpkt2 = TensorBoard(log_dir='tensorboard/', histogram_freq=0, write_graph=True, write_images=True)
cpkt3 = EarlyStopping(monitor='val_loss', min_delta=0, patience=4, verbose=0, mode='auto')
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=['accuracy'])
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
steps_per_epoch=int(np.ceil(len(partition_image['train']) / BATCH_SIZE) ),
validation_steps=int(np.ceil(len(partition_image['validation']) / BATCH_SIZE) ),
epochs= EPOCHS,
shuffle = True,
verbose=1, callbacks=[cpkt1,cpkt2,cpkt3])
ValueError: Error when checking model input: the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 3 array(s), but instead got the following list of 1 arrays: [array([[[[180., 189., 194.],
[...
There might be other solutions, but what I do is to name my input layers and then use as inputs an dictionary with the same names.
So in your model you should name your inputs:
input_a = Input(shape=(224,224,3), name = "input_a")
input_b = Input(shape=(224,224,3), name = "input_b")
input_c = Input(shape=(224,224,3), name = "input_b")
Then, in the generator must return something like this:
inputs ={"input_a":X,
"input_b":Z,
"input_c":y}
outputs ={"output":o}
return inputs,outputs
You can find and example with a generator with multiple inputs in this keras example
Related
I got a dataset for motion blur. 29216 good images and 29216 blurry images with a 20% train test split. Initially, I was only using a dataset of 2000 total images so my autoencoder looked like this:
# Below is a custom data loader.
def load_image(file, target_size):
image = tf.keras.preprocessing.image.load_img(file, target_size=target_size)
image = tf.keras.preprocessing.image.img_to_array(image).astype('float32') / 255
return image
clean_frames = []
blurry_frames = []
extensions = ['.jpg', 'jpeg', '.png']
for file in tqdm(sorted(os.listdir(good_frames))):
if any(extension in file for extension in extensions):
file_path = os.path.join(good_frames, file)
clean_frames.append(load_image(file_path, (128,128)))
clean_frames = np.array(clean_frames)
for file in tqdm(sorted(os.listdir(bad_frames))):
if any(extension in file for extension in extensions):
file_path = os.path.join(bad_frames, file)
blurry_frames.append(load_image(file_path, (128,128)))
blurry_frames = np.array(blurry_frames)
print('number of clean frames: ', len(clean_frames))
print('number of blurry frames: ', len(blurry_frames))
# Train test split
x_train, x_test, y_train, y_test = train_test_split(clean_frames, blurry_frames, test_size=0.2, random_state=42)
# Network Parameters
input_shape = (128, 128, 3)
batch_size = 32
kernel_size = 3
latent_dim = 256
inputs = Input(shape = input_shape, name = 'encoder_input')
x = inputs
# Layers of the encoder
x = Conv2D(filters=64, kernel_size=kernel_size, strides=2, activation='relu', padding='same')(x)
x = Conv2D(filters=128, kernel_size=kernel_size, strides=2, activation='relu', padding='same')(x)
x = Conv2D(filters=256, kernel_size=kernel_size, strides=2, activation='relu', padding='same')(x)
shape = K.int_shape(x)
x = Flatten()(x)
latent = Dense(latent_dim, name='latent_vector')(x)
encoder = Model(inputs, latent, name='encoder')
encoder.summary()
# Layers of the decoder
latent_inputs = Input(shape=(latent_dim,), name='decoder_input')
x = Dense(shape[1]*shape[2]*shape[3])(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)
x = Conv2DTranspose(filters=256,kernel_size=kernel_size, strides=2, activation='relu', padding='same')(x)
x = Conv2DTranspose(filters=128,kernel_size=kernel_size, strides=2, activation='relu', padding='same')(x)
x = Conv2DTranspose(filters=64,kernel_size=kernel_size, strides=2, activation='relu', padding='same')(x)
outputs = Conv2DTranspose(filters=3, kernel_size=kernel_size, activation='sigmoid', padding='same', name='decoder_output')(x)
decoder = Model(latent_inputs, outputs, name='decoder')
autoencoder = Model(inputs, decoder(encoder(inputs)), name='autoencoder')
autoencoder.compile(loss='mse', optimizer='adam',metrics=["acc"])
# Automated Learning Rate reducer
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
verbose=1,
min_lr=0.5e-6)
callbacks = [lr_reducer]
# Begins training
history = autoencoder.fit(blurry_frames,
clean_frames,
validation_data=(blurry_frames, clean_frames),
epochs=100,
batch_size=batch_size,
callbacks=callbacks)
This would yield an accuracy of 82% after 100 epochs.
Now that I got more data, I believe I can get better performance. How should I adjust my layers and parameters to make the most of the data that I have?
I don't know how to solve it.
def create_model(time_steps, num_layer, num_filters, kernel_size, strides, dropout_rate, activation):
model = tf.keras.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=(time_steps, 1)))
for i in range(num_layer):
filters = int(num_filters / (i+1))
model.add(
tf.keras.layers.Conv1D(
filters=filters, kernel_size=kernel_size, padding="same", strides=strides, activation=activation
)
)
if i < (num_layer - 1):
model.add(tf.keras.layers.Dropout(rate=dropout_rate))
for i in reversed(range(num_layer)):
filters = int(num_filters / (i+1))
model.add(
tf.keras.layers.Conv1DTranspose(
filters=filters, kernel_size=kernel_size, padding="same", strides=strides, activation=activation
)
)
if i != 0:
model.add(tf.keras.layers.Dropout(rate=dropout_rate))
model.add(
tf.keras.layers.Conv1DTranspose(
filters=1, kernel_size=kernel_size, padding="same"
)
)
return model
def objective(trial):
num_layer = trial.suggest_int("num_layer", 1, 3)
num_filters = int(trial.suggest_categorical("num_filters", [16, 32, 64]))
kernel_size = trial.suggest_int("kernel_size", 1, 5, 2)
strides = trial.suggest_int("strides", 2, 4, 2)
dropout_rate = trial.suggest_uniform('dropout_rate', 0.0, 0.5)
activation = trial.suggest_categorical("activation", ["relu", "sigmoid", "tanh"])
optimizer = trial.suggest_categorical("optimizer", ["sgd", "adam"])
model = create_model(TIME_STEPS, num_layer, num_filters, kernel_size, strides, dropout_rate, activation)
model.compile(
optimizer=optimizer,
loss="mse"
)
model.summary()
history = model.fit(
x_train,
x_train,
epochs=50,
batch_size=128,
validation_split=0.1,
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=5, mode="min")
],
)
return history.history["val_loss"][-1]
study = optuna.create_study()
study.optimize(objective, n_trials=50)
Error Code
study.optimize(objective, n_trials=50)
Error Statement
ValueError: Dimensions must be equal, but are 32 and 20 for '{{node mean_squared_error/SquaredDifference}} = SquaredDifference[T=DT_FLOAT](mean_squared_error/remove_squeezable_dimensions/Squeeze, IteratorGetNext:1)' with input shapes: [?,32], [?,20].
i want to establish the VAE-CNN but i dont know why show this error.
train_datagen = ImageDataGenerator(rescale=1. / 255)
validation_datagen = ImageDataGenerator(rescale=1. / 255)
train_gen = train_datagen.flow_from_directory(
'./train for dataset/',
target_size=(80, 24),
color_mode='grayscale',
batch_size=32,
class_mode='input',
shuffle=True,
seed = 42
)
validation_gen = validation_datagen.flow_from_directory(
'./test/',
target_size=(80, 24),
color_mode='grayscale',
batch_size=32,
class_mode='input',
shuffle=False,
seed = 42
)
#VAE-CNN
filter1_V=64
filter2_V=88
latent_dim_V=20
original_inputs = keras.Input(shape=(80,24,1))
init = tf.keras.initializers.VarianceScaling(scale=0.3, mode='fan_in',distribution='uniform')
layer1_v = layers.Conv2D(filter1_V, kernel_size=3, activation = 'relu', kernel_initializer=init, padding='same', strides = 2)(original_inputs)
layer1_v = layers.MaxPool2D(pool_size=(2,2))(layer1_v)
# strides is 2 in default, which equals to pool_size
layer2_v = layers.Conv2D(filter2_V, kernel_size=3, activation='relu', kernel_initializer=init, padding='same', strides = 2)(layer1_v)
layer2_v = layers.MaxPool2D(pool_size=(2,2))(layer2_v)
layer3_v = layers.Flatten()(layer2_v)
# start to code the core part of mean and variance
#get mean
layer_mean = layers.Dense(latent_dim_V)(layer3_v)
# get log variance, it can get the value from negative to positive, if only use variance, the value is only positive
log_var = layers.Dense(latent_dim_V)(layer3_v)
# dur to the sample, in order to get back propogation, add one parameter which its distribution is normal(0,1)
def sampling(args):
layer_mean,log_var=args
eps = K.random_normal(shape=(K.shape(log_var)[0],latent_dim_V),mean=0.,stddev=1.0)
# reparameterize
# the standard varinace is what we want
std = K.exp(log_var)**0.5
return layer_mean + std * eps
z = layers.Lambda(sampling, output_shape=(latent_dim_V,))([layer_mean, log_var])
#decoder part
dec1_v = layers.Dense(layer3_v.shape[1], activation='relu')(z)
dec2_v = layers.Reshape((layer2_v.shape[1],layer2_v.shape[2],layer2_v.shape[3]))(dec1_v)
dec3_v = layers.Conv2DTranspose(filter2_V, kernel_size=3, output_padding=(1,2), activation = 'relu',kernel_initializer=init, padding = 'same', strides=(2,3))(dec2_v)
dec4_v = layers.Conv2DTranspose(filter1_V, kernel_size=3, activation = 'relu', kernel_initializer=init, padding = 'same', strides=2)(dec3_v)
dec5_v = layers.Conv2DTranspose(filter1_V, kernel_size=3, activation = "relu", kernel_initializer=init, padding = 'same', strides=2)(dec4_v)
dec_v_outputs = layers.Conv2DTranspose(1, kernel_size=3, activation = "relu", kernel_initializer=init, padding = 'same', strides=2)(dec5_v)
encoder_v = keras.Model(inputs=original_inputs, outputs=[z,layer_mean,log_var], name='encoder')
decoder_v = keras.Model(inputs=z, outputs=dec_v_outputs, name='decoder')
outputs = decoder_v(encoder_v(original_inputs)[0])
vae_model = keras.Model(inputs=original_inputs, outputs=outputs, name='vae_model')
vae_model.summary()
kl_loss = -0.5 * K.sum(log_var + 1 - layer_mean**2 - K.exp(log_var), axis=-1)
kl_loss = K.mean(kl_loss)/1920.
lr=1e-3
optimizer = keras.optimizers.Adam(learning_rate=lr)
vae_model.add_loss(kl_loss)
vae_model.compile(optimizer, loss="binary_crossentropy")
history=vae_model.fit(train_gen,train_gen, epochs=4, batch_size=32, validation_data=(validation_gen,validation_gen))
i want to get a VAE-CNN
and there is a error:ValueError: Graph disconnected: cannot obtain value for tensor Tensor("input_1:0", shape=(None, 80, 24, 1), dtype=float32) at layer "input_1". The following previous layers were accessed without issue: []
why is it and how to solve?
I am very new to machine learning and have build a VAE from the Keras VAE code example. I only changed a few layers in the model. I trained the model on the Kaggle cats and dogs dataset and then tried to reconstruct a few images. All the reconstructed images looked the same just like these Reconstructed Images. What could be the cause of this? Is it due to a bad model, short training time or do I have a mistake in reconstructing the images?
The encoder model:
latent_dim = 2
encoder_inputs = keras.Input(shape=(328, 328, 3))
x = layers.Conv2D(32, 3, strides=2, padding="same")(encoder_inputs)
x = layers.Activation("relu")(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(64, 3,strides=2, padding="same")(x)
x = layers.Activation("relu")(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(128, 3,strides=2, padding="same")(x) #neu
x = layers.Activation("relu")(x)
x = layers.BatchNormalization()(x)
x = layers.Flatten()(x)
x = layers.Dense(16, activation="relu")(x)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()([z_mean, z_log_var])
encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name="encoder")
encoder.summary()
The decoder model:
x = layers.Dense(41 * 41 * 128, activation="relu")(latent_inputs)
x = layers.Reshape((41, 41, 128))(x)
x = layers.Conv2DTranspose(128, 3, activation="relu", strides=2, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x)
x = layers.BatchNormalization()(x)
decoder_outputs = layers.Conv2DTranspose(3, 3, activation="sigmoid", padding="same")(x)
decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder")
decoder.summary()
The training:
train_data_dir ='/content/PetImages'
nb_train_samples = 200
nb_epoch = 50
batch_size = 32
img_width = 328
img_height = 328
def fixed_generator(generator):
for batch in generator:
yield (batch, batch)
train_datagen = ImageDataGenerator(
rescale=1./255,
)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None)
vae = VAE(encoder, decoder)
vae.compile(optimizer=keras.optimizers.Adam())
vae.fit(
fixed_generator(train_generator),
steps_per_epoch=nb_train_samples,
epochs=nb_epoch,
)
And reconstructing the images:
import matplotlib.pyplot as plt
test2_datagen = ImageDataGenerator(rescale=1./255)
test2_generator = test2_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=10,
class_mode=None)
sample_img = next(test2_generator)
z_points = vae.encoder.predict(sample_img)
reconst_images = vae.decoder.predict(z_points)
fig = plt.figure(figsize=(10, 8))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
n_to_show =2
for i in range(n_to_show):
img = sample_img[i].squeeze()
sub = fig.add_subplot(2, n_to_show, i+1)
sub.axis('off')
sub.imshow(img)
for i in range(n_to_show):
img = reconst_images[i].squeeze()
sub = fig.add_subplot(2, n_to_show, i+n_to_show+1)
sub.axis('off')
sub.imshow(img)
Getting the following error message when setting up a 3D-GAN for ModelNet10:
InvalidArgumentError: Input to reshape is a tensor with 27000 values, but the requested shape has 810000 [Op:Reshape]
In my opinion the batch is not properly created and thereby the shape of the tensor is not valid. Tried different things but canĀ“t get the batch set up..
I am more than thankful for any hints how to clean up my code!
Thanks in advance!
import time
import numpy as np
import tensorflow as tf
np.random.seed(1)
from tensorflow.keras import layers
from IPython import display
# Load the data
modelnet_path = '/modelnet10.npz'
data = np.load(modelnet_path)
X, Y = data['X_train'], data['y_train']
X_test, Y_test = data['X_test'], data['y_test']
X = X.reshape(X.shape[0], 30, 30, 30, 1).astype('float32')
#Hyperparameters
BUFFER_SIZE = 3991
BATCH_SIZE = 30
LEARNING_RATE = 4e-4
BETA_1 = 5e-1
EPOCHS = 100
#Random seed for image generation
n_examples = 16
noise_dim = 100
seed = tf.random.normal([n_examples, noise_dim])
train_dataset = tf.data.Dataset.from_tensor_slices(X).batch(BATCH_SIZE)
# Build the network
def make_discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Reshape((30, 30, 30, 1), input_shape=(30, 30, 30)))
model.add(layers.Conv3D(16, 6, strides=2, activation='relu'))
model.add(layers.Conv3D(64, 5, strides=2, activation='relu'))
model.add(layers.Conv3D(64, 5, strides=2, activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(10))
return model
discriminator = make_discriminator_model()
def make_generator_model():
model = tf.keras.Sequential()
model.add(layers.Dense(15*15*15*128, use_bias=False,input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Reshape((15,15,15,128)))
model.add(layers.Conv3DTranspose(64, (5,5,5), strides=(1,1,1), padding='valid', use_bias=False))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Conv3DTranspose(32, (5,5,5), strides=(2,2,2), padding='valid', use_bias=False, activation='tanh'))
return model
generator = make_generator_model()
#Optimizer & Loss function
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
optimizer = tf.keras.optimizers.Adam(lr=LEARNING_RATE, beta_1=BETA_1)
#Training
def train_step(shapes):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_shapes = generator(noise, training=True)
real_output = discriminator(shapes, training=True)
fake_output = discriminator(generated_shapes, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gen_gradients = gen_tape.gradient(gen_loss, generator.trainable_variables)
disc_gradients = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
optimizer.apply_gradients(zip(gen_gradients, generator.trainable_variables))
optimizer.apply_gradients(zip(disc_gradients, discriminator.trainable_variables))
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for shape_batch in dataset:
train_step(shape_batch)
display.clear_output(wait=True)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
display.clear_output(wait=True)
train(X_test, EPOCHS)
X_test is just a list, so in your training loop, only one sample (30*30*30=27000) feed into the model but the model itself asking for 30(batchsize) * 30 * 30 *30=810000.
modelnet_path = '/modelnet10.npz'
data = np.load(modelnet_path)
X, Y = data['X_train'], data['y_train']
X_test, Y_test = data['X_test'], data['y_test']
X = X.reshape(X.shape[0], 30, 30, 30, 1).astype('float32')
...
train_dataset = tf.data.Dataset.from_tensor_slices(X).batch(BATCH_SIZE)
...
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for shape_batch in dataset:
train_step(shape_batch)
display.clear_output(wait=True)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
display.clear_output(wait=True)
train(X_test, EPOCHS)
Consider to train with the train_dataset you created or generate X_test as tf.dataset.
train(train_dataset , EPOCHS)