Implementing custom convolutional layer in Keras - error when loading model - python

I have implemented a minimal example of Wavenet, closely following the steps from here - https://github.com/basveeling/wavenet.
The issue is, that the model uses a custom layer, which works fine during training but once the model is reloaded, Keras cannot find the Causal Layer, even though I am using custom objects.
I am using tensorflow 1.13 and keras 2.2.4
Here is an example of the first three key/value pairs for objects.
objects = {'initial_causal_conv': <class 'wavenet_utils.CausalConv1D'>,
'dilated_conv_1_tanh_s0': <class 'wavenet_utils.CausalConv1D'>,
'dilated_conv_1_sigm_s0': <class 'wavenet_utils.CausalConv1D'>,
'...': <class 'wavenet_utils.CausalConv1D'>,
'...': <class 'wavenet_utils.CausalConv1D'>}
model.fit(x=[x_tr1, x_tr2],
y=y_tr1,
epochs=epochs,
batch_size=batch_size,
validation_data=([x_vl1, x_vl2], y_vl1),
callbacks=[checkpoint, early_stopping],
verbose=verbose,
shuffle=True,
class_weight=class_weight)
model = load_model('model.h5', custom_objects=objects)
Which then returns this error:
Traceback (most recent call last):
File "/home/xxx/PycharmProjects/WAVE/DATA_NN.py", line 48, in <module>
objects=objects)
File "/home/xxx/PycharmProjects/WAVE/functions.py", line 572, in run_neural_net
model = load_model('model_conv.h5', custom_objects=objects)
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/engine/saving.py", line 419, in load_model
model = _deserialize_model(f, custom_objects, compile)
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/engine/saving.py", line 225, in _deserialize_model
model = model_from_config(model_config, custom_objects=custom_objects)
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/engine/saving.py", line 458, in model_from_config
return deserialize(config, custom_objects=custom_objects)
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/layers/__init__.py", line 55, in deserialize
printable_module_name='layer')
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/utils/generic_utils.py", line 145, in deserialize_keras_object
list(custom_objects.items())))
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/engine/network.py", line 1022, in from_config
process_layer(layer_data)
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/engine/network.py", line 1008, in process_layer
custom_objects=custom_objects)
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/layers/__init__.py", line 55, in deserialize
printable_module_name='layer')
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/utils/generic_utils.py", line 138, in deserialize_keras_object
': ' + class_name)
ValueError: Unknown layer: CausalConv1D
When building the model, CausalConv1D must be imported from wavenet_utils.py
Below is the full build_model function
And here is wavenet_utils, containing the class CausalConv1D:
from keras.layers import Conv1D
from keras.utils.conv_utils import conv_output_length
import tensorflow as tf
class CausalConv1D(Conv1D):
def __init__(self, filters, kernel_size, init='glorot_uniform', activation=None,
padding='valid', strides=1, dilation_rate=1, bias_regularizer=None,
activity_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, causal=False,
output_dim=1,
**kwargs):
self.output_dim = output_dim
super(CausalConv1D, self).__init__(filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=init,
activity_regularizer=activity_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
self.causal = causal
if self.causal and padding != 'valid':
raise ValueError("Causal mode dictates border_mode=valid.")
def build(self, input_shape):
super(CausalConv1D, self).build(input_shape)
def call(self, x):
if self.causal:
def asymmetric_temporal_padding(x, left_pad=1, right_pad=1):
pattern = [[0, 0], [left_pad, right_pad], [0, 0]]
return tf.pad(x, pattern)
x = asymmetric_temporal_padding(x, self.dilation_rate[0] * (self.kernel_size[0] - 1), 0)
return super(CausalConv1D, self).call(x)
def compute_output_shape(self, input_shape):
input_length = input_shape[1]
if self.causal:
input_length += self.dilation_rate[0] * (self.kernel_size[0] - 1)
length = conv_output_length(input_length,
self.kernel_size[0],
self.padding,
self.strides[0],
dilation=self.dilation_rate[0])
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.output_dim
return (input_shape[0], length, self.filters)
def get_config(self):
base_config = super(CausalConv1D, self).get_config()
base_config['output_dim'] = self.output_dim
return base_config
EDIT:
I have tried this approach before as well.
objects = {'CausalConv1D': <class 'wavenet_utils.CausalConv1D'>}
model.fit(x=[x_tr1, x_tr2],
y=y_tr1,
epochs=epochs,
batch_size=batch_size,
validation_data=([x_vl1, x_vl2], y_vl1),
callbacks=[checkpoint, early_stopping],
verbose=verbose,
shuffle=True,
class_weight=class_weight)
model = load_model('model.h5', custom_objects=objects)
Which then returns this error:
Traceback (most recent call last):
File "/home/xxx/PycharmProjects/WAVE/DATA_NN.py", line 47, in <module>
objects=objects)
File "/home/xxx/PycharmProjects/WAVE/functions.py", line 574, in run_neural_net
model = load_model('model.h5', custom_objects=objects)
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/engine/saving.py", line 419, in load_model
model = _deserialize_model(f, custom_objects, compile)
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/engine/saving.py", line 225, in _deserialize_model
model = model_from_config(model_config, custom_objects=custom_objects)
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/engine/saving.py", line 458, in model_from_config
return deserialize(config, custom_objects=custom_objects)
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/layers/__init__.py", line 55, in deserialize
printable_module_name='layer')
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/utils/generic_utils.py", line 145, in deserialize_keras_object
list(custom_objects.items())))
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/engine/network.py", line 1022, in from_config
process_layer(layer_data)
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/engine/network.py", line 1008, in process_layer
custom_objects=custom_objects)
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/layers/__init__.py", line 55, in deserialize
printable_module_name='layer')
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/utils/generic_utils.py", line 147, in deserialize_keras_object
return cls.from_config(config['config'])
File "/home/xxx/PycharmProjects/WAVE/venv/lib/python3.6/site-packages/keras/engine/base_layer.py", line 1109, in from_config
return cls(**config)
File "/home/xxx/PycharmProjects/WAVE/wavenet_utils.py", line 26, in __init__
**kwargs)
TypeError: __init__() got multiple values for keyword argument 'kernel_initializer'
Could this be the issue mentioned here https://github.com/keras-team/keras/issues/12316?
And if so, is there any way around it?

There is only one custom object, which is CausalConv1D.
objects = {'CausalConv1D': wavenet_utils.CausalConv1D}
Now you must be sure that your get_config method is correct and has everything needed in the __init__ method of your layer.
It misses the causal property and has a kernel_initializer coming from the base class that is not supported by your __init__ method.
Let's list every property you need, and then check which ones are in the base config:
filters: in base
kernel_size: in base
init: not in base, but there is kernel_initializer in base!!!!!
kernel_initializer is a config item that your __init__ method doesn't support
rename this init parameter to kernel_initializer
activation: in base
padding: in base
strides: in base
dilation_rate: in base
bias_regularizer: in base
activity_regularizer: in base
kernel_constraint: in base
bias_constraint: in base
use_bias: in base
causal: not in base!
must add this in your config! (or the model will always use the default value)
output_dim: not in base!
**kwargs: in base
Layer's __init__:
def __init__(self, filters, kernel_size,
############## here:
kernel_initializer='glorot_uniform',
#############
activation=None,
padding='valid', strides=1, dilation_rate=1, bias_regularizer=None,
activity_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, causal=False,
output_dim=1,
**kwargs):
Layer's get_config
It must contain all __init__ params that are not in the base class:
def get_config(self):
base_config = super(CausalConv1D, self).get_config()
base_config['causal'] = self.causal
base_config['output_dim'] = self.output_dim
return base_config

Somehow, no approach I've tried so far has been able to correctly load the model when using load_model. Below is a simple work around which only saves the weights, then deletes the existing model, builds a new one and compiles it again, and loads saved the weights which do save correctly, even with custom layers present.
model = build_model()
checkpoint = ModelCheckpoint('model.h5', monitor='val_acc',
verbose=1, save_best_only=True, save_weights_only=True, mode='max')
model.fit(x, y)
del model
model = build_model()
model.load_weights('model.h5')
model.predict(x_test)

Related

Error with ELMO embeddings - TensorArray has size zero, but element shape [?,256] is not fully defined. Currently only static shapes are supported

I Am trying to implement ELMO embeddings via tensorflow in a neural network. Here is a code snippet of my network :
def get_elmo_embeds_model():
input_text = tf.keras.layers.Input(shape=(1,), dtype=tf.string)
embedding = tf.keras.layers.Lambda(ELMoEmbedding, output_shape=(1024, ))(input_text)
print(embedding.shape)
conv_1d_layer = tf.keras.layers.Conv1D(256,5,activation='relu')(embedding)
max_pool_1 = tf.keras.layers.MaxPooling1D(5)(conv_1d_layer)
x = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(256,name="BiLSTM")) (max_pool_1)
dropout_2 = tf.keras.layers.Dropout(0.2)(x)
flatten_1 = tf.keras.layers.Flatten()(dropout_2)
pred = tf.keras.layers.Dense(1, activation='sigmoid')(flatten_1)
model = tf.keras.models.Model(inputs=[input_text], outputs=pred)
return model
text_only_model = get_elmo_embeds_model()
text_only_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy',precision_mat,recall_mat,f1_mat])
text_only_model.summary()
with tf.Session() as session:
K.set_session(session)
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
history = text_only_model.fit_generator(text_elmo_train,epochs=EPOCHS, validation_steps=VALIDATION_STEPS,
steps_per_epoch=STEPS_PER_EPOCH,validation_data = text_elmo_valid)
When running this model, I Am getting the following Error :
steps_per_epoch=STEPS_PER_EPOCH,validation_data = text_elmo_valid)
File "/home/.local/lib/python3.7/site-packages/keras/engine/training_v1.py", line 796, in fit
use_multiprocessing=use_multiprocessing)
File "/home/.local/lib/python3.7/site-packages/keras/engine/training_generator_v1.py", line 586, in fit
steps_name='steps_per_epoch')
File "/home/.local/lib/python3.7/site-packages/keras/engine/training_generator_v1.py", line 306, in model_iteration
steps_name='validation_steps')
File "/home/.local/lib/python3.7/site-packages/keras/engine/training_generator_v1.py", line 252, in model_iteration
batch_outs = batch_function(*batch_data)
File "/home/.local/lib/python3.7/site-packages/keras/engine/training_v1.py", line 1152, in test_on_batch
outputs = self.test_function(inputs) # pylint: disable=not-callable
File "/home/.local/lib/python3.7/site-packages/keras/backend.py", line 4187, in __call__
run_metadata=self.run_metadata)
File "/home/.conda/envs/test_multimod/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1485, in __call__
run_metadata_ptr)
tensorflow.python.framework.errors_impl.UnimplementedError: TensorArray has size zero, but element shape [?,256] is not fully defined. Currently only static shapes are supported when packing zero-size TensorArrays.
I have checked the data which is being passed out and it has no null values in it, but still this error exists while running this function.

Input_shape is None in custom layer

I'm building my own layer in Tensorflow 2.1 and using it in custom model. However when I'm trying to learn something, the layer is trying to build itself when called for the first time, and it needs input_shape to do it. As far as I know, it should compute it because it's getting an actual input, but it seems that input_size is None.
My question is: what I did wrong and how to correct that?
Below I'm attaching an example to reproduce the problem.
My code (MinimalRNNCell is copied from tensorflow website https://www.tensorflow.org/api_docs/python/tf/keras/layers/RNN):
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow.keras import Model
import numpy as np
class MinimalRNNCell(Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
class RNNXModel(Model):
def __init__(self, size):
super(RNNXModel, self).__init__()
self.minimalrnn=MinimalRNNCell(size)
def call(self, inputs):
out=self.minimalrnn(input)
return out
x=np.array([[[1,2,3],[4,5,6],[7,8,9]],[[10,11,12],[13,14,15],[16,17,18]]])
y=np.array([[1,2,3],[10,11,12]])
model=RNNXModel(3)
model.compile(optimizer='sgd', loss='mse')
model.fit(x,y,epochs=10, batch_size=1)
Error I'm getting:
Traceback (most recent call last):
File "/home/.../test.py", line 64, in <module>
model.fit(x,y,epochs=10, batch_size=1)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py", line 819, in fit
use_multiprocessing=use_multiprocessing)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 235, in fit
use_multiprocessing=use_multiprocessing)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 593, in _process_training_inputs
use_multiprocessing=use_multiprocessing)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training_v2.py", line 646, in _process_inputs
x, y, sample_weight=sample_weights)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py", line 2346, in _standardize_user_data
all_inputs, y_input, dict_inputs = self._build_model_with_inputs(x, y)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py", line 2572, in _build_model_with_inputs
self._set_inputs(cast_inputs)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py", line 2659, in _set_inputs
outputs = self(inputs, **kwargs)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
File "/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/autograph/impl/api.py", line 237, in wrapper
raise e.ag_error_metadata.to_exception(e)
TypeError: in converted code:
/home/.../test.py:36 call *
out=self.minimalrnn(input)
/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py:818 __call__
self._maybe_build(inputs)
/home/.../.venv/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py:2116 _maybe_build
self.build(input_shapes)
/home/.../test.py:14 build
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
TypeError: 'NoneType' object is not subscriptable
There is a typo (input should be inputs). input is a built-in function (documentation).
class RNNXModel(Model):
def __init__(self, size):
super(RNNXModel, self).__init__()
self.minimalrnn=MinimalRNNCell(size)
def call(self, inputs):
out=self.minimalrnn(inputs) # changed from `input`
return out

ValueError: Passing a dictionary input to a Sequential Model which doesn't have FeatureLayer as the first layer is an error

I've tried running the following code, but got this error:
File
"C:\Users\TomerK\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\training.py",
line 819, in fit
use_multiprocessing=use_multiprocessing)
File
"C:\Users\TomerK\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py",
line 235, in fit
use_multiprocessing=use_multiprocessing)
File
"C:\Users\TomerK\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py",
line 593, in _process_training_inputs
use_multiprocessing=use_multiprocessing)
File
"C:\Users\TomerK\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py",
line 706, in _process_inputs
use_multiprocessing=use_multiprocessing)
File
"C:\Users\TomerK\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py",
line 702, in init
x = standardize_function(x)
File
"C:\Users\TomerK\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py",
line 660, in standardize_function
standardize(dataset, extract_tensors_from_dataset=False)
File
"C:\Users\TomerK\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\training.py",
line 2346, in _standardize_user_data
all_inputs, y_input, dict_inputs = self._build_model_with_inputs(x, y)
File
"C:\Users\TomerK\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\training.py",
line 2572, in _build_model_with_inputs
self._set_inputs(cast_inputs)
File
"C:\Users\TomerK\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\training.py",
line 2647, in _set_inputs
inputs = self._set_input_attrs(inputs)
File
"C:\Users\TomerK\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\training\tracking\base.py",
line 457, in _method_wrapper
result = method(self, *args, **kwargs)
File
"C:\Users\TomerK\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\training.py",
line 2681, in _set_input_attrs
raise ValueError('Passing a dictionary input to a Sequential Model '
ValueError: Passing a dictionary input to a Sequential Model which
doesn't have FeatureLayer as the first layer is an error.
Code:
# -*- coding: utf-8 -*-
import os
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import tensorflow_datasets as tfds
try:
model = keras.models.load_model("passrockmodel.h5")
except:
print('\nDownloading Train Dataset...\n')
train_dataset = tfds.load(name="rock_you", split="train[:75%]")
assert isinstance(train_dataset, tf.data.Dataset)
print('\nDownloading Test Dataset...\n')
test_dataset = tfds.load("rock_you", split='train[-25%:]')
assert isinstance(test_dataset, tf.data.Dataset)
model = tf.keras.Sequential([
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid'),
])
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(train_dataset, epochs=20)
model.save("passrockmodel.h5")
test_loss, test_accuracy = model.evaluate(test_dataset)
print('\nPredicting...\n')
predictions = model.predict(test_dataset)
print(predictions[0])
I've had your problem yesterday. Here's what solved it for me:
your first layer should be of type tf.keras.layers.DenseFeatures
This first layer must be instantiated with an array of tf.feature_column objects. It happens that all my columns were numeric so my array was:
featureColumns = [tf.feature_column.numeric_column(columnNames[i], normalizer_fn= lambda x: (x - mean[i])/std[i]) for i in range(len(columnNames[:-1]))]
Note: the normalizer_fn arg is very useful as you can see, as well. It can eliminate the need of any additional normalization preprocessing layer should you need it.
And so my layer became:
layers.DenseFeatures(feature_columns=featureColumns, trainable=True)
I believe this should solve the error mentioned above in your question. Which is quoted as
ValueError: Passing a dictionary input to a Sequential Model which
doesn't have FeatureLayer as the first layer is an error.

Value passed to parameter 'shape' has DataType float32 not in list of allowed values: int32, int64

def build_model(dropout=0.2, lstm_units=200, fc_hidden=100):
# prepare data
train_x, train_y = to_supervised(train, n_input)
# define parameters
verbose, epochs, batch_size = 0, 30, 24
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
# reshape output into [samples, timesteps, features]
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
# define model
model = Sequential()
model.add(LSTM(lstm_units, activation='relu', input_shape=(n_timesteps, n_features)))
model.add(Dropout(dropout))
model.add(RepeatVector(n_outputs))
model.add(LSTM(lstm_units, activation='relu', return_sequences=True))
model.add(Dropout(dropout))
model.add(TimeDistributed(Dense(fc_hidden, activation='relu')))
model.add(TimeDistributed(Dense(1)))
model.compile(loss='mse', optimizer='adam')
# early_stop = EarlyStopping(monitor='loss', patience=10, verbose=0)
# fit network
history=model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose, validation_split=0.1, shuffle=False)
return history.history['loss'][-1]
def bayesian_opt():
optimizer = BayesianOptimization(
f=build_model,
pbounds={'dropout': (0.0, 0.5), 'lstm_units': (32, 250), 'fc_hidden': (32, 256),},
)
optimizer.maximize(
init_points=10,
n_iter=30,
)
bayesian_opt()
Traceback (most recent call last):
File "<ipython-input-75-7ba49e57a6c9>", line 13, in <module>
bayesian_opt()
File "<ipython-input-75-7ba49e57a6c9>", line 9, in bayesian_opt
n_iter=30,
File "C:\Users\ASUS\Anaconda3\lib\site-packages\bayes_opt\bayesian_optimization.py", line 174, in maximize
self.probe(x_probe, lazy=False)
File "C:\Users\ASUS\Anaconda3\lib\site-packages\bayes_opt\bayesian_optimization.py", line 112, in probe
self._space.probe(params)
File "C:\Users\ASUS\Anaconda3\lib\site-packages\bayes_opt\target_space.py", line 194, in probe
target = self.target_func(**params)
File "<ipython-input-74-db3bb0c0b2de>", line 11, in build_model
model.add(LSTM(lstm_units, activation='relu', input_shape=(n_timesteps, n_features)))
File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\engine\sequential.py", line 166, in add
layer(x)
File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\layers\recurrent.py", line 536, in __call__
return super(RNN, self).__call__(inputs, **kwargs)
File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\engine\base_layer.py", line 463, in __call__
self.build(unpack_singleton(input_shapes))
File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\layers\recurrent.py", line 497, in build
self.cell.build(step_input_shape)
File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\layers\recurrent.py", line 1914, in build
constraint=self.kernel_constraint)
File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\engine\base_layer.py", line 279, in add_weight
weight = K.variable(initializer(shape, dtype=dtype),
File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\initializers.py", line 227, in __call__
dtype=dtype, seed=self.seed)
File "C:\Users\ASUS\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py", line 4357, in random_uniform
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
File "C:\Users\ASUS\Anaconda3\lib\site-packages\tensorflow\python\keras\backend.py", line 5253, in random_uniform
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
File "C:\Users\ASUS\Anaconda3\lib\site-packages\tensorflow\python\ops\random_ops.py", line 247, in random_uniform
rnd = gen_random_ops.random_uniform(shape, dtype, seed=seed1, seed2=seed2)
File "C:\Users\ASUS\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_random_ops.py", line 858, in random_uniform
name=name)
File "C:\Users\ASUS\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 626, in _apply_op_helper
param_name=input_name)
File "C:\Users\ASUS\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 60, in _SatisfiesTypeConstraint
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
TypeError: Value passed to parameter 'shape' has DataType float32 not in list of allowed values: int32, int64
train and n_input are global variable
Value passed to parameter 'shape' has DataType float32 not in list of allowed values: int32, int64
I had tried to int all the shape related but it didn't work
What's wrong with the shape?
def _SatisfiesTypeConstraint(dtype, attr_def, param_name):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"Value passed to parameter '%s' has DataType %s not in list of "
"allowed values: %s" %
(param_name, dtypes.as_dtype(dtype).name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
This is the _SatisfiesTypeConstraint(dtype, attr_def, param_name)
It seems that there is a problem with the datatypes.
In dealing with these problems, you might wanna use the tf.cast to convert the float to integer.
Kindly refer to the below code.
x = tf.constant([1.8, 2.2], dtype=tf.float32)
tf.dtypes.cast(x, tf.int32) # [1, 2], dtype=tf.int32
You could also refer to the TensorFlow official documentation here

keras model does not work when it is part of the class

So I have a class
class Trainer:
def __init__(self,episodes):
self.factorModel()
def factorModel(self):
self.model = Sequential()
self.model.add(Conv2D(50, (3, 3), activation='relu', input_shape=(3,200,200),dim_ordering="th",strides=4))
self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2) ))
self.model.add(Conv2D(64, (5, 5), activation='relu') )
self.model.add(MaxPooling2D(pool_size=(2, 2) ))
self.model.add(Dense(1000, activation='relu'))
self.model.add(Flatten())
self.model.add(Dense(4, activation='softmax'))
self.model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=0.01), metrics=['accuracy'])
def do(self,state):
self.model.predict(np.array(state))[0]
When I try to call do I got error like ValueError: Tensor Tensor("dense_2/Softmax:0", shape=(?, 4), dtype=float32) is not an element of this graph. The problem occurs when I try run do function as a thread when I use the same model and the same config but i do not run do function as a thread everything works fine
full error message
File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 754, in run
self.__target(*self.__args, **self.__kwargs)
File "path", line 141, in do
self.model.predict_classes(state)[0]
File "path/.local/lib/python2.7/site-packages/keras/engine/sequential.py", line 268, in predict_classes
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
File "path/.local/lib/python2.7/site-packages/keras/engine/training.py", line 1456, in predict
self._make_predict_function()
File "path/.local/lib/python2.7/site-packages/keras/engine/training.py", line 378, in _make_predict_function
**kwargs)
File "path/.local/lib/python2.7/site-packages/keras/backend/tensorflow_backend.py", line 3009, in function
**kwargs)
File "path/.local/lib/python2.7/site-packages/tensorflow/python/keras/backend.py", line 3479, in function
return GraphExecutionFunction(inputs, outputs, updates=updates, **kwargs)
File "path/.local/lib/python2.7/site-packages/tensorflow/python/keras/backend.py", line 3142, in __init__
with ops.control_dependencies([self.outputs[0]]):
File "path/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 5426, in control_dependencies
return get_default_graph().control_dependencies(control_inputs)
File "path/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 4867, in control_dependencies
c = self.as_graph_element(c)
File "path/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3796, in as_graph_element
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
File "path/.local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3875, in _as_graph_element_locked
raise ValueError("Tensor %s is not an element of this graph." % obj)
ValueError: Tensor Tensor("dense_2/Softmax:0", shape=(?, 4), dtype=float32) is not an element of this graph.
I tried the solution from this question link so I try to call self.model._make_predict_function() after self.factorModel() but in result i got this error
InvalidArgumentError: Tensor conv2d_1_input:0, specified in either feed_devices or fetch_devices was not found in the Graph
Ok I found this question link so probably there is no way to make prediction in Thread
So I made some changes according to suggestions to the code so now it looks like this:
class Trainer:
def __init__(self,episodes):
self.factorModel()
self.graph = tf.get_default_graph()
def factorModel(self):
self.model = Sequential()
self.model.add(Conv2D(50, (3, 3), activation='relu', input_shape=(3,200,200),dim_ordering="th",strides=4))
self.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2) ))
self.model.add(Conv2D(64, (5, 5), activation='relu') )
self.model.add(MaxPooling2D(pool_size=(2, 2) ))
self.model.add(Dense(1000, activation='relu'))
self.model.add(Flatten())
self.model.add(Dense(4, activation='softmax'))
self.model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=0.01), metrics=['accuracy'])
def do(self,state):
with self.graph.as_default():
self.model.predict(np.array(state))[0]
and as a result I got following error
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 754, in run
self.__target(*self.__args, **self.__kwargs)
File "path/Desktop/marioQProject/new_class_trainer.py", line 151, in do
self.model.predict_classes(state)[0]
File "path/.local/lib/python2.7/site-packages/keras/engine/sequential.py", line 268, in predict_classes
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
File "path/.local/lib/python2.7/site-packages/keras/engine/training.py", line 1462, in predict
callbacks=callbacks)
File "path/.local/lib/python2.7/site-packages/keras/engine/training_arrays.py", line 324, in predict_loop
batch_outs = f(ins_batch)
File "patha/.local/lib/python2.7/site-packages/tensorflow/python/keras/backend.py", line 3292, in __call__
run_metadata=self.run_metadata)
File "path/.local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1458, in __call__
run_metadata_ptr)
FailedPreconditionError: Error while reading resource variable conv2d_1/bias from Container: localhost. This could mean that the variable was uninitialized. Not found: Resource localhost/conv2d_1/bias/N10tensorflow3VarE does not exist.
[[{{node conv2d_1/Reshape/ReadVariableOp}}]]
Tensorflow is not really friendly with multithread but there's a workaround.
Do this
class Trainer:
def __init__(self):
self.factorModel()
self.graph = tf.get_default_graph() # [1]
def do(self, state):
with self.graph.as_default(): # [2]
return self.model.predict(np.array(state))[0]
def factorModel(self):
self.model = Sequential()
self.model.add(Conv2D(50, (3, 3), activation='relu', input_shape=(10, 10, 3), strides=4))
self.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
t = Trainer()
def fn():
t.do(np.zeros((1, 10, 10, 3)))
if __name__ == '__main__':
thread_one = threading.Thread(target=fn)
thread_two = threading.Thread(target=fn)
thread_one.start()
thread_two.start()
BTW if you don't specifically need channel first ordering then I recommend you to use TF standard channel last instead. Weather you get images directly with opencv or convert Pillow images to ndarray using numpy you'll get channel last by default.
Edit
Have you tried to make sure the model works before send to threading, like
class Trainer:
def __init__(self, episodes, model, graph):
self.graph = graph
self.model = model
model = Sequential()
model.add(Conv2D(...))
.
.
.
# make sure it runs here
model.predict(np.zeros((1, 3, 200, 200)))
# if you don't need to train then try not compile first
graph = tf.get_default_graph()
trainer = Trainer(episodes, model, graph)
Also the callable model instead of Sequential, like
from keras import models, layers
inp = layers.Input((200, 200, 3))
x = layers.Conv2D(50, (3, 3), activation='relu',strides=4)(inp)
x = layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2) )(x)
x = layers.Conv2D(64, (5, 5), activation='relu')(x)
.
.
.
x = layers.Dense(4, activation='softmax')(x)
model = models.Model(inp, x)

Categories

Resources