I'm trying to build a Neural Network, based on the Inception architecture used for images, but for 1D vectors.
I have based the model I created on this one from the keras getting started guide from this link https://keras.io/getting-started/functional-api-guide/:
tf.keras.backend.clear_session()
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
input_vector = Input(shape=(71276,1),)
tower_1 = tf.keras.layers.Conv1D(filters=64, kernel_size=1, padding='same', activation='relu', name='conv_1')(input_vector)
tower_1 = tf.keras.layers.Conv1D(filters=64, kernel_size=3, padding='same', activation='relu', name='conv_2')(tower_1)
tower_2 = tf.keras.layers.Conv1D(filters=64, kernel_size=1, padding='same', activation='relu', name='conv_3')(input_vector)
tower_2 = tf.keras.layers.Conv1D(filters=64, kernel_size=1, padding='same', activation='relu', name='conv_4')(tower_2)
tower_3 = tf.keras.layers.MaxPooling1D(pool_size=3, strides=1, padding='same')(input_vector)
tower_3 = tf.keras.layers.Conv1D(filters=64, kernel_size=1, padding='same', activation='relu', name='conv_4')(tower_3)
output = tf.keras.layers.concatenate([tower_1, tower_2, tower_3])
model = tf.keras.models.Model(inputs=input_vector, outputs=output)
model.compile(loss='mse',
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=['mae'])
model.summary()
This is my code:
from keras.layers import Conv1D, MaxPooling1D, Input
from keras.models import Model
tf.keras.backend.clear_session()
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
input_vector = Input(shape=(71276,1),)
tower_1 = Conv1D(filters=64, kernel_size=1, padding='same', activation='relu', name='conv_1')(input_vector)
tower_1 = Conv1D(filters=64, kernel_size=3, padding='same', activation='relu', name='conv_1')(tower_1)
tower_2 = Conv1D(filters=64, kernel_size=1, padding='same', activation='relu', name='conv_1')(input_vector)
tower_2 = Conv1D(filters=64, kernel_size=1, padding='same', activation='relu', name='conv_1')(tower_2)
tower_3 = MaxPooling1D(pool_size=3, strides=1, padding='same')(input_vector)
tower_3 = Conv1D(filters=64, kernel_size=1, padding='same', activation='relu', name='conv_1')(tower_3)
output = tf.keras.layers.concatenate([tower_1, tower_2, tower_3])
model = Model(inputs=input_vector, outputs=output)
model.compile(loss='mse',
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=['mae'])
model.summary()
When executing, I'm getting the following error, and don't really understand why:
AttributeError Traceback (most recent call last)
<ipython-input-9-2931ae837421> in <module>()
6 input_vector = Input(shape=(71276,1),)
7
----> 8 tower_1 = tf.keras.layers.Conv1D(filters=64, kernel_size=1, padding='same', activation='relu', name='conv_1')(input_vector)
9 tower_1 = tf.keras.layers.Conv1D(filters=64, kernel_size=3, padding='same', activation='relu', name='conv_2')(tower_1)
10
5 frames
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer.py in <lambda>(t)
2056 `call` method of the layer at the call that created the node.
2057 """
-> 2058 inbound_layers = nest.map_structure(lambda t: t._keras_history.layer,
2059 input_tensors)
2060 node_indices = nest.map_structure(lambda t: t._keras_history.node_index,
AttributeError: 'tuple' object has no attribute 'layer'
I don't have a lot of experience with convolutional layers so it is very possible I have made a very obvious mistake. Searching online I haven't been able to find someone else having the same problem.
I'm running this on Google Colaboratory, in a python 3 runtime.
Any help would be appreciated, thank you!
A few things:
All your layers have the same name? I bet that could cause lots of strange bugs
tower_3 doesn't have the same shape as the other two towers. It's impossible to concatenate. (You're using a MaxPooling1D, check the summary to confirm.)
You are mixing keras and tf.keras, that is certainly a huge problem. Choose only one.
Related
I want to train a VAE that had a huge dataset and decided to use a VAE code made for fashion MNIST and popular modifications for batch-loading using filenames that I found on github. My research collab notebook is here and a sample section of dataset.
But the way the VAE class is written it does not have a call function which should be there according to keras documentation. I am getting the error NotImplementedError: When subclassing the Model class, you should implement a call method.
class VAE(tf.keras.Model):
"""a basic vae class for tensorflow
Extends:
tf.keras.Model
"""
def __init__(self, **kwargs):
super(VAE, self).__init__()
self.__dict__.update(kwargs)
self.enc = tf.keras.Sequential(self.enc)
self.dec = tf.keras.Sequential(self.dec)
def encode(self, x):
mu, sigma = tf.split(self.enc(x), num_or_size_splits=2, axis=1)
return ds.MultivariateNormalDiag(loc=mu, scale_diag=sigma)
def reparameterize(self, mean, logvar):
eps = tf.random.normal(shape=mean.shape)
return eps * tf.exp(logvar * 0.5) + mean
def reconstruct(self, x):
mu, _ = tf.split(self.enc(x), num_or_size_splits=2, axis=1)
return self.decode(mu)
def decode(self, z):
return self.dec(z)
def compute_loss(self, x):
q_z = self.encode(x)
z = q_z.sample()
x_recon = self.decode(z)
p_z = ds.MultivariateNormalDiag(
loc=[0.] * z.shape[-1], scale_diag=[1.] * z.shape[-1]
)
kl_div = ds.kl_divergence(q_z, p_z)
latent_loss = tf.reduce_mean(tf.maximum(kl_div, 0))
recon_loss = tf.reduce_mean(tf.reduce_sum(tf.math.square(x - x_recon), axis=0))
return recon_loss, latent_loss
def compute_gradients(self, x):
with tf.GradientTape() as tape:
loss = self.compute_loss(x)
return tape.gradient(loss, self.trainable_variables)
#tf.function
def train(self, train_x):
gradients = self.compute_gradients(train_x)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
and the encoder and decoder are defined separately and compiled as
N_Z = 8
filt_base = 32
DIMS = (128,128,3)
encoder = [
tf.keras.layers.InputLayer(input_shape=DIMS),
tf.keras.layers.Conv2D(
filters=filt_base, kernel_size=3, strides=(1, 1), activation="relu", padding="same"
),
tf.keras.layers.Conv2D(
filters=filt_base, kernel_size=3, strides=(2, 2), activation="relu", padding="same"
),
tf.keras.layers.Conv2D(
filters=filt_base*2, kernel_size=3, strides=(1, 1), activation="relu", padding="same"
),
tf.keras.layers.Conv2D(
filters=filt_base*2, kernel_size=3, strides=(2, 2), activation="relu", padding="same"
),
tf.keras.layers.Conv2D(
filters=filt_base*3, kernel_size=3, strides=(1, 1), activation="relu", padding="same"
),
tf.keras.layers.Conv2D(
filters=filt_base*3, kernel_size=3, strides=(2, 2), activation="relu", padding="same"
),
tf.keras.layers.Conv2D(
filters=filt_base*4, kernel_size=3, strides=(1, 1), activation="relu", padding="same"
),
tf.keras.layers.Conv2D(
filters=filt_base*4, kernel_size=3, strides=(2, 2), activation="relu", padding="same"
),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=N_Z*2),
]
decoder = [
tf.keras.layers.Dense(units=8 * 8 * 128, activation="relu"),
tf.keras.layers.Reshape(target_shape=(8, 8, 128)),
tf.keras.layers.Conv2DTranspose(
filters=filt_base*4, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=filt_base*4, kernel_size=3, strides=(1, 1), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=filt_base*3, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=filt_base*3, kernel_size=3, strides=(1, 1), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=filt_base*2, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=filt_base*2, kernel_size=3, strides=(1, 1), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=filt_base, kernel_size=3, strides=(2, 2), padding="SAME", activation="relu"
),
tf.keras.layers.Conv2DTranspose(
filters=1, kernel_size=3, strides=(1, 1), padding="SAME", activation="sigmoid"
),
]
optimizer = tf.keras.optimizers.Adam(1e-3)
model = VAE(
enc = encoder,
dec = decoder,
optimizer = optimizer,
)
model.compile(optimizer=optimizer)
and trying to train the model using fit_generator function
num_epochs = 50
model.fit_generator(generator=my_training_batch_generator,
steps_per_epoch=(num_training_samples // batch_size),
epochs=num_epochs,
verbose=1,
validation_data=my_validation_batch_generator,
validation_steps=(num_validation_samples // batch_size),
use_multiprocessing=True,
workers=16,
max_queue_size=32)
I am new to machine learning and any help to resolve the issue would be appreciated. I think the issue is with the def train line in class VAE.
An optional request is if the training can be done so that I can see the reconstruction after each epoch would be appreciated. I already have a plot_reconstruction function in the research collab notebook for this purpose that needs to be called.
APaul31,
Specifically in your code I suggest adding call() function to the VAE class:
def call(self, x):
q_z = self.encode(x)
z = q_z.sample()
x_recon = self.decode(z)
I also suggest to use more standard approach to your task, especially as a beginner:
use tf.keras.preprocessing.image_dataset_from_directory() for image loading. Tutorial here.
use custom Model.train_step() to calculate VAE losses instead of multiple functions in your VAE class. Example here.
Currently I am having the error of TypeError: 'NoneType' object is not callable
The problem is with fit method, when you are passing a data generator use fit_generator instead of fit. In the collab it's calling fit
Also, note that you can use flow_from_directory method instead of image_dataset_from_directory to lazily generate batches, it won't load the whole data into memory
https://keras.io/api/preprocessing/image/#flowfromdirectory-method.
I want to use Subclassing/inheritance of the keras Model class. When I want to compile my model it isn't.
I started with keras recently but used a lot of pytorch before.
I currently run tensorflow and keras on version 1.10 and 2.16 respectively and really dont know why I cant compile the model. I tried updating tf to version 1.13 but nothing changed.
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from keras.layers import Input,Conv2D,MaxPooling2D,UpSampling2D,BatchNormalization
from keras import Model, layers
print(tf.VERSION)
print(tf.keras.__version__)
batch_size = 128
epochs = 50
inChannel = 1
img_width, img_height = 64, 64
input_shape = (img_width, img_height, 1)
class AE_64x64(Model):
def __init__(self):
super(AE_64x64, self).__init__()
'''
data_format: channels last
'''
self.conv1 = Conv2D(filters=30, kernel_size=(7,7), activation='relu', padding='same', strides=2)(Input(shape=input_shape))
self.conv2 = Conv2D(filters=40, kernel_size=(5,5), activation='relu', padding='same', strides=2)
self.batchnorm = BatchNormalization(axis=2)
self.max_pool = MaxPooling2D((3,3),padding='same')
self.conv3 = Conv2D(filters=50, kernel_size=(3,3), activation='relu', padding='same', strides=2)
self.conv4 = Conv2D(filters=60, kernel_size=(3,3), activation='relu')
self.b1 = Conv2D(filters=80, kernel_size=(3,3), activation='relu')
self.b2 = Conv2D(filters=99, kernel_size=(3,3), activation='relu')
self.conv6 = Conv2D(filters=50, kernel_size=(3,3), activation='relu')
self.conv7 = Conv2D(filters=40, kernel_size=(3,3), activation='relu')
self.conv8 = Conv2D(filters=30, kernel_size=(3,3), activation='relu')
self.conv9 = Conv2D(filters=1, kernel_size=(3,3), activation='relu')
def call(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.batchnorm(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.max_pool(x)
x = self.batchnorm(x)
x = self.b1(x)
x = self.b2(x)
x = self.batchnorm(x)
x = self.conv5(x)
x = self.conv6(x)
x = self.batchnorm(x)
x = self.conv7(x)
x = self.conv8(x)
x = self.batchnorm(x)
x = self.conv9(x)
return x
AE_Model = AE_64x64()
AE_Model.compile(loss='mean_squared_error',optimizer=tf.train.AdamOptimizer(),metrics= ['mean_squared_error'])
AE_Model.summary()
I expected a summary output but instead I received this error message:
RuntimeError: You must compile your model before using it.
Is there a logical mistake in the code or a Hardware/Version problem?
you at least have to build your model. Or you fit your model with data.
anyway, when I run your code without data I get this result
AE_Model.compile(loss='mean_squared_error',optimizer=tf.train.AdamOptimizer(),metrics= ['mean_squared_error'])
AE_Model.build(input_shape)
AE_Model.summary()
1.13.1
2.2.4-tf
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
I work on sentiment analysis task and i want to add SVM layer on top CNN as a final classifier, how can i do that without using hing-loss?
tweet_input = Input(shape=(seq_len,), dtype='int32')
tweet_encoder = Embedding(vocabulary_size, EMBEDDING_DIM,
input_length=seq_len, trainable=True)(tweet_input)
bigram_branch = Conv1D(filters=64, kernel_size=2, padding='same',
activation='relu', strides=1)(tweet_encoder)
bigram_branch = GlobalMaxPooling1D()(bigram_branch)
trigram_branch = Conv1D(filters=32, kernel_size=3, padding='same',
activation='relu', strides=1)(tweet_encoder)
trigram_branch = GlobalMaxPooling1D()(trigram_branch)
fourgram_branch = Conv1D(filters=16, kernel_size=4, padding='same',
activation='relu', strides=1)(tweet_encoder)
fourgram_branch = GlobalMaxPooling1D()(fourgram_branch)
merged = concatenate([bigram_branch, trigram_branch, fourgram_branch], axis=1)
merged = Dense(512, activation='softmax')(merged)
merged = Dropout(0.8)(merged)
merged = Dense(2)(merged)
output = Activation('sigmoid')(merged)
model = Model(inputs=[tweet_input], outputs=[output])
adam=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='hinge',
optimizer= adam,
metrics=['accuracy'])
model.summary()
I'm just getting started with Keras and with Deep learning, so the answer to my question could be obvious to some, but for me it isn't.
I made a model to colorize some black and white photos following the article on Floydhub (where I'm training it) and it works just fine when I train it with similar pictures (such as human faces) but as soon as I use a larger dataset as an input with different pictures, the loss just remains stable and doesn't get better.
I've tried different learning rates and optimizers but just cannot get a good result.
What could I change to get a better result?
This is the code (thanks to Emil Wallner for the article on Floydhub)
# Get images
X = []
for filename in os.listdir('/data/images/Train/'):
X.append(img_to_array(load_img('/data/images/Train/'+filename)))
X = np.array(X, dtype=float)
Xtrain = 1.0/255*X
#Load weights
inception = InceptionResNetV2(weights=None, include_top=True)
inception.load_weights('/data/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5')
inception.graph = tf.get_default_graph()
embed_input = Input(shape=(1000,))
#Encoder
encoder_input = Input(shape=(256, 256, 1,))
encoder_output = Conv2D(64, (3,3), activation='relu', padding='same', strides=2)(encoder_input)
encoder_output = Conv2D(128, (3,3), activation='relu', padding='same')(encoder_output)
encoder_output = Conv2D(128, (3,3), activation='relu', padding='same', strides=2)(encoder_output)
encoder_output = Conv2D(256, (3,3), activation='relu', padding='same')(encoder_output)
encoder_output = Conv2D(256, (3,3), activation='relu', padding='same', strides=2)(encoder_output)
encoder_output = Conv2D(512, (3,3), activation='relu', padding='same')(encoder_output)
encoder_output = Conv2D(512, (3,3), activation='relu', padding='same')(encoder_output)
encoder_output = Conv2D(256, (3,3), activation='relu', padding='same')(encoder_output)
#Fusion
fusion_output = RepeatVector(32 * 32)(embed_input)
fusion_output = Reshape(([32, 32, 1000]))(fusion_output)
fusion_output = concatenate([encoder_output, fusion_output], axis=3)
fusion_output = Conv2D(256, (1, 1), activation='relu', padding='same')(fusion_output)
#Decoder
decoder_output = Conv2D(128, (3,3), activation='relu', padding='same')(fusion_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
decoder_output = Conv2D(64, (3,3), activation='relu', padding='same')(decoder_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
decoder_output = Conv2D(32, (3,3), activation='relu', padding='same')(decoder_output)
decoder_output = Conv2D(16, (3,3), activation='relu', padding='same')(decoder_output)
decoder_output = Conv2D(2, (3, 3), activation='tanh', padding='same')(decoder_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
model = Model(inputs=[encoder_input, embed_input], outputs=decoder_output)
#Create embedding
def create_inception_embedding(grayscaled_rgb):
grayscaled_rgb_resized = []
for i in grayscaled_rgb:
i = resize(i, (299, 299, 3), mode='constant')
grayscaled_rgb_resized.append(i)
grayscaled_rgb_resized = np.array(grayscaled_rgb_resized)
grayscaled_rgb_resized = preprocess_input(grayscaled_rgb_resized)
with inception.graph.as_default():
embed = inception.predict(grayscaled_rgb_resized)
return embed
# Image transformer
datagen = ImageDataGenerator(
shear_range=0.4,
zoom_range=0.4,
rotation_range=40,
horizontal_flip=True)
#Generate training data
batch_size = 20
def image_a_b_gen(batch_size):
for batch in datagen.flow(Xtrain, batch_size=batch_size):
grayscaled_rgb = gray2rgb(rgb2gray(batch))
embed = create_inception_embedding(grayscaled_rgb)
lab_batch = rgb2lab(batch)
X_batch = lab_batch[:,:,:,0]
X_batch = X_batch.reshape(X_batch.shape+(1,))
Y_batch = lab_batch[:,:,:,1:] / 128
yield ([X_batch, create_inception_embedding(grayscaled_rgb)], Y_batch)
#Train model
tensorboard = TensorBoard(log_dir="/output")
model.compile(optimizer='adam', loss='mse')
model.fit_generator(image_a_b_gen(batch_size), callbacks=[tensorboard], epochs=1000, steps_per_epoch=20)
#Make a prediction on the unseen images
color_me = []
for filename in os.listdir('../Test/'):
color_me.append(img_to_array(load_img('../Test/'+filename)))
color_me = np.array(color_me, dtype=float)
color_me = 1.0/255*color_me
color_me = gray2rgb(rgb2gray(color_me))
color_me_embed = create_inception_embedding(color_me)
color_me = rgb2lab(color_me)[:,:,:,0]
color_me = color_me.reshape(color_me.shape+(1,))
# Test model
output = model.predict([color_me, color_me_embed])
output = output * 128
# Output colorizations
for i in range(len(output)):
cur = np.zeros((256, 256, 3))
cur[:,:,0] = color_me[i][:,:,0]
cur[:,:,1:] = output[i]
imsave("result/img_"+str(i)+".png", lab2rgb(cur))
You can check this question for problem description. I create 7 models by a for loop then train them (didn't mention training process in the code):
for i in range(1, 8):
j=str(i)
img_input = Input(shape=(img_width,img_height,1),name='input')
x = Conv2D(32, (3,3), activation='relu', padding='same', name='conv1-'+j)(img_input)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1-'+j)(x)
x = Conv2D(64, (3,3), activation='relu', padding='same', name='conv2-'+j)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2-'+j)(x)
x = Conv2D(128, (3,3), activation='relu', padding='same', name='conv3-'+j)(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3-'+j)(x)
x = Flatten()(x)
x = Dense(512, name='dense1-'+j)(x)
x = Dense(512, name='dense2-'+j)(x)
predictions = Dense(6, activation='softmax', name='predictions-'+j)(x)
model = Model(inputs=img_input, outputs=predictions)
model.compile(optimizer='Adam', loss='binary_crossentropy',
metrics=['accuracy'])
Popping the last softmax layer of each model and saving the rest of the models into a list named as models[]:
inputTensor = Input(shape=(img_width,img_height,1),name='inputs')
for i in range (1,8): #save models into a models[] list
j = str(i)
models[i] = load_model(path+j+"/weights.best.hdf5") #load models from disk
models[i].layers.pop() #Pop last layer
models[i].outputs= [models[i].layers[-1].output] #Fix layer pop bug
models[i].layers[-1].outbound_nodes = [] #Fix layer pop bug
for layer in models[i].layers: #Make intermediate
layer.trainable=False #layers untrainable
Implementing a concat to all of these models and then trying to save the finalModel:
outputTensors= [models[m](inputTensor) for m in models]
output = Concatenate()(outputTensors)
predictions = Dense(6, activation='softmax', name='predictionss')(output)
model=Model(inputTensor, predictions)
model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['accuracy'])
model.save('model')
But when saving occurs it throws an error:
Traceback (most recent call last):
File "<ipython-input-43-6221fb2664c1>", line 10, in <module>
model.save('model')
File "C:\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\topology.py", line 2553, in save
save_model(self, filepath, overwrite, include_optimizer)
File "C:\Anaconda3\envs\tensorflow\lib\site-packages\keras\models.py", line 107, in save_model
'config': model.get_config()
File "C:\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\topology.py", line 2326, in get_config
layer_config = layer.get_config()
File "C:\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\topology.py", line 2390, in get_config
new_node_index = node_conversion_map[node_key]
KeyError: 'predictions-1_ib-0
And I can't overcome this error.
Appreciate your helps.