Keras Error: Please provide data which shares the same first dimension - python

I try to use a customize layer in keras. It's a simple layer just a matmul with trainable params.
from tensorflow import keras
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.optimizers import RMSprop
from keras.layers import Layer
from tensorflow.keras import backend as K
class MultiLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MultiLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer
if self.output_dim[0] != input_shape[1]:
raise Exception("expect input shape with [{},?], but get input with shape {}".format(self.output_dim[0],input_shape), input_shape)
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[2], self.output_dim[0]),
initializer='uniform',
trainable=True)
super(MultiLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
return K.dot(x, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
x_fake = np.random.random((10,28,28))
y_fake = [np.diag(np.ones(28))]*10
input_shape = np.shape(x_fake)[1:]
print(input_shape)
ipt = Input(name='inputs',shape=input_shape)
layer = MultiLayer((input_shape[0],input_shape[0]),name="dev")(ipt)
#layer = Flatten()(layer)
model = Model(inputs=ipt,outputs=layer)
model.summary()
rms = RMSprop()
model.compile(loss="rms", optimizer=rms, metrics=['accuracy'])
model.fit(x_fake,y_fake)
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
inputs (InputLayer) [(None, 28, 28)] 0
_________________________________________________________________
dev (MultiLayer) (None, 28, 28) 784
=================================================================
Total params: 784
Trainable params: 784
Non-trainable params: 0
_________________________________________________________________
but when I fit this model, error occurs.
ValueError: Data cardinality is ambiguous:
x sizes: 10
y sizes: 28, 28, 28, 28, 28, 28, 28, 28, 28, 28
Please provide data which shares the same first dimension.
I don't know what the sizes mean.
How to fix it?

model.output_shape must match y_fake.shape; your [np.diag(np.ones((28,28)))]*10 is a list of 10 arrays, each of shape (28, 28), so TF thinks you have 10 outputs. I presume you intended np.stack(... * 10).
Next, ensure all imports are consistent (either from tensorflow.keras or from keras). Lastly, "rms" isn't a supported loss, I replaced it with "mse". All fixes below.
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.layers import Layer
from tensorflow.keras import backend as K
class MultiLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MultiLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer
if self.output_dim[0] != input_shape[1]:
raise Exception(("expect input shape with [{},?], but get input with "
"shape {}").format(self.output_dim[0],input_shape),
input_shape)
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[2], self.output_dim[0]),
initializer='uniform',
trainable=True)
super(MultiLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
return K.dot(x, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
x_fake = np.random.random((10,28,28))
y_fake = np.stack([np.diag(np.ones((28)))]*10) # (10, 28, 28)
input_shape = np.shape(x_fake)[1:]
print(input_shape)
ipt = Input(name='inputs', shape=input_shape)
layer = MultiLayer((input_shape[0],input_shape[0]),name="dev")(ipt)
model = Model(inputs=ipt, outputs=layer)
model.summary()
rms = RMSprop()
model.compile(loss="mse", optimizer=rms, metrics=['accuracy'])
model.fit(x_fake, y_fake)

Related

tensorflow.keras.Model inherit

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class KerasSupervisedModelWrapper(keras.Model):
def __init__(self, batch_size, **kwargs):
super().__init__()
self.batch_size = batch_size
def summary(self, input_shape): # temporary fix for a bug
x = layers.Input(shape=input_shape)
model = keras.Model(inputs=[x], outputs=self.call(x))
return model.summary()
class ExampleModel(KerasSupervisedModelWrapper):
def __init__(self, batch_size):
super().__init__(batch_size)
self.conv1 = layers.Conv2D(32, kernel_size=(3, 3), activation='relu')
def call(self, x):
x = self.conv1(x)
return x
model = MyModel(15)
model.summary([28, 28, 1])
output:
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 28, 28, 1)] 0
conv2d_2 (Conv2D) (None, 26, 26, 32) 320
=================================================================
Total params: 320
Trainable params: 320
Non-trainable params: 0
_________________________________________________________________
I'm writting a wrapper for keras model to pre-define some useful method and variables as above.
And I'd like to modify the wrapper to get some layers to compose model as the keras.Sequential does.
Therefore, I added Sequential method that assigns new call method as below.
class KerasSupervisedModelWrapper(keras.Model):
...(continue)...
#staticmethod
def Sequential(layers, **kwargs):
model = KerasSupervisedModelWrapper(**kwargs)
pipe = keras.Sequential(layers)
def call(self, x):
return pipe(x)
model.call = call
return model
However, it seems not working as I intended. Instead, it shows below error message.
model = KerasSupervisedModelWrapper.Sequential([
layers.Conv2D(32, kernel_size=(3, 3), activation="relu")
], batch_size=15)
model.summary((28, 28, 1))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
/tmp/ipykernel_91471/2826773946.py in <module>
1 # model.build((None, 28, 28, 1))
2 # model.compile('adam', loss=keras.losses.SparseCategoricalCrossentropy(), metrics=['accuracy'])
----> 3 model.summary((28, 28, 1))
/tmp/ipykernel_91471/3696340317.py in summary(self, input_shape)
10 def summary(self, input_shape): # temporary fix for a bug
11 x = layers.Input(shape=input_shape)
---> 12 model = keras.Model(inputs=[x], outputs=self.call(x))
13 return model.summary()
14
TypeError: call() missing 1 required positional argument: 'x'
What can I do for the wrapper to get keras.Sequential model while usuing other properties?
You could try something like this:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class KerasSupervisedModelWrapper(keras.Model):
def __init__(self, batch_size, **kwargs):
super().__init__()
self.batch_size = batch_size
def summary(self, input_shape): # temporary fix for a bug
x = layers.Input(shape=input_shape)
model = keras.Model(inputs=[x], outputs=self.call(x))
return model.summary()
#staticmethod
def Sequential(layers, **kwargs):
model = KerasSupervisedModelWrapper(**kwargs)
pipe = keras.Sequential(layers)
model.call = pipe
return model
class ExampleModel(KerasSupervisedModelWrapper):
def __init__(self, batch_size):
super().__init__(batch_size)
self.conv1 = layers.Conv2D(32, kernel_size=(3, 3), activation='relu')
def call(self, x):
x = self.conv1(x)
return x
model = ExampleModel(15)
model.summary([28, 28, 1])
model = KerasSupervisedModelWrapper.Sequential([
layers.Conv2D(32, kernel_size=(3, 3), activation="relu")
], batch_size=15)
model.summary((28, 28, 1))
print(model(tf.random.normal((1, 28, 28, 1))).shape)
Model: "model_9"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_14 (InputLayer) [(None, 28, 28, 1)] 0
conv2d_17 (Conv2D) (None, 26, 26, 32) 320
=================================================================
Total params: 320
Trainable params: 320
Non-trainable params: 0
_________________________________________________________________
Model: "model_10"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_15 (InputLayer) [(None, 28, 28, 1)] 0
sequential_8 (Sequential) (None, 26, 26, 32) 320
=================================================================
Total params: 320
Trainable params: 320
Non-trainable params: 0
_________________________________________________________________
(1, 26, 26, 32)

Input 0 of layer lstm_24 is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: [64, 8]

I have a duelling double deep Q network model which works with two dense layers and I trying to convert it inot two LSTM layers as my model deal with time series. When I change the dense layer in the code, this error appear and I was unable to deal with it. I know that this problem has been solved many times here, but these solutions aren't working.
The code that works with two dense layers is write as follow:
class DuelingDeepQNetwork(keras.Model):
def __init__(self, n_actions, fc1_dims, fc2_dims):
super(DuelingDeepQNetwork, self).__init__()
self.dense1 = keras.layers.Dense(fc1_dims, activation='relu')
self.dense2 = keras.layers.Dense(fc2_dims, activation='relu')
self.V = keras.layers.Dense(1, activation=None)
self.A = keras.layers.Dense(n_actions, activation=None)
def call(self, state):
x = self.dense1(state)
x = self.dense2(x)
V = self.V(x)
A = self.A(x)
Q = (V + (A - tf.math.reduce_mean(A, axis=1, keepdims=True)))
return Q
def advantage(self, state):
x = self.dense1(state)
x = self.dense2(x)
A = self.A(x)
return A
It works without error but when I turn the two first dense layers into LSTM as follow:
class DuelingDeepQNetwork(keras.Model):
def __init__(self, n_actions, fc1_dims, fc2_dims):
super(DuelingDeepQNetwork, self).__init__()
self.dense1 = keras.layers.LSTM(fc1_dims, activation='relu')
self.dense2 = keras.layers.LSTM(fc2_dims, activation='relu')
self.V = keras.layers.Dense(1, activation=None)
self.A = keras.layers.Dense(n_actions, activation=None)
This error appears:
Input 0 of layer lstm_24 is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: [64, 8]
Following this question "expected ndim=3, found ndim=2 I already tried to set the input shape using "state = state.reshape(64, 1, 8)" before run the neural network as follow:
def choose_action(self, observation):
if np.random.random() < self.epsilon:
action = np.random.choice(self.action_space)
else:
state = np.array([observation])
state = state.reshape(64, 1, 8) #<--------
actions = self.q_eval.advantage(state)
action = tf.math.argmax(actions, axis=1).numpy()[0,0]
return action
But I get the exact same error. I also tried to add the argument "return_sequences=True" in both layers but it didn't work aswell.
I don't know what to do and I have to hand in it in one week, someone to enlighten me?
EDIT
I'm using fc1_dims = 64, fc2_dims = 32 and n_actions = 2. The model uses 8 variables and have batch size of 64.
I uploaded the code in github so you can execute it, if you want. The project is not finished so I will not write a proper read-me for now.
[github with code][2]
So below code works for me without any issues.
class DuelingDeepQNetwork(keras.Model):
def __init__(self, n_actions, fc1_dims, fc2_dims):
super(DuelingDeepQNetwork, self).__init__()
self.dense1 = keras.layers.LSTM(fc1_dims, activation='relu', return_sequences=True)
self.dense2 = keras.layers.LSTM(fc2_dims, activation='relu')
self.V = keras.layers.Dense(1, activation=None)
self.A = keras.layers.Dense(n_actions, activation=None)
def call(self, state):
x = self.dense1(state)
x = self.dense2(x)
V = self.V(x)
A = self.A(x)
Q = (V + (A - tf.math.reduce_mean(A, axis=1, keepdims=True)))
return Q
def advantage(self, state):
x = self.dense1(state)
x = self.dense2(x)
A = self.A(x)
return A
And then calling the model as shown below:
LSTMModel = DuelingDeepQNetwork(2, 64, 32)
LSTMModel.build(input_shape=(None,1,8))
LSTMModel.summary()
The result is as shown below:
Model: "dueling_deep_q_network_7"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
lstm_12 (LSTM) multiple 18688
_________________________________________________________________
lstm_13 (LSTM) multiple 12416
_________________________________________________________________
dense_16 (Dense) multiple 33
_________________________________________________________________
dense_17 (Dense) multiple 66
=================================================================

Saving Keras models with Custom Layers

I am trying to save a Keras model in a H5 file. The Keras model has a custom layer.
When I try to restore the model, I get the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-5-0fbff9b56a9d> in <module>()
1 model.save('model.h5')
2 del model
----> 3 model = tf.keras.models.load_model('model.h5')
8 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/utils/generic_utils.py in class_and_config_for_serialized_keras_object(config, module_objects, custom_objects, printable_module_name)
319 cls = get_registered_object(class_name, custom_objects, module_objects)
320 if cls is None:
--> 321 raise ValueError('Unknown ' + printable_module_name + ': ' + class_name)
322
323 cls_config = config['config']
ValueError: Unknown layer: CustomLayer
Could you please tell me how I am supposed to save and load weights of all the custom Keras layers too? (Also, there was no warning when saving, will it be possible to load models from H5 files which I have already saved but can't load back now?)
Here is the minimal working code sample (MCVE) for this error, as well as the full expanded message: Google Colab Notebook
Just for completeness, this is the code I used to make my custom layer.
get_config and from_config are both working fine.
class CustomLayer(tf.keras.layers.Layer):
def __init__(self, k, name=None):
super(CustomLayer, self).__init__(name=name)
self.k = k
def get_config(self):
return {'k': self.k}
def call(self, input):
return tf.multiply(input, 2)
model = tf.keras.models.Sequential([
tf.keras.Input(name='input_layer', shape=(10,)),
CustomLayer(10, name='custom_layer'),
tf.keras.layers.Dense(1, activation='sigmoid', name='output_layer')
])
model.save('model.h5')
model = tf.keras.models.load_model('model.h5')
Correction number 1 is to use Custom_Objects while loading the Saved Model i.e., replace the code,
new_model = tf.keras.models.load_model('model.h5')
with
new_model = tf.keras.models.load_model('model.h5', custom_objects={'CustomLayer': CustomLayer})
Since we are using Custom Layers to build the Model and before Saving it, we should use Custom Objects while Loading it.
Correction number 2 is to add **kwargs in the __init__ function of the Custom Layer like
def __init__(self, k, name=None, **kwargs):
super(CustomLayer, self).__init__(name=name)
self.k = k
super(CustomLayer, self).__init__(**kwargs)
Complete working code is shown below:
import tensorflow as tf
class CustomLayer(tf.keras.layers.Layer):
def __init__(self, k, name=None, **kwargs):
super(CustomLayer, self).__init__(name=name)
self.k = k
super(CustomLayer, self).__init__(**kwargs)
def get_config(self):
config = super(CustomLayer, self).get_config()
config.update({"k": self.k})
return config
def call(self, input):
return tf.multiply(input, 2)
model = tf.keras.models.Sequential([
tf.keras.Input(name='input_layer', shape=(10,)),
CustomLayer(10, name='custom_layer'),
tf.keras.layers.Dense(1, activation='sigmoid', name='output_layer')
])
tf.keras.models.save_model(model, 'model.h5')
new_model = tf.keras.models.load_model('model.h5', custom_objects={'CustomLayer': CustomLayer})
print(new_model.summary())
Output of the above code is shown below:
WARNING:tensorflow:No training configuration found in the save file, so the model was *not* compiled. Compile it manually.
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
custom_layer_1 (CustomLayer) (None, 10) 0
_________________________________________________________________
output_layer (Dense) (None, 1) 11
=================================================================
Total params: 11
Trainable params: 11
Non-trainable params: 0
Hope this helps. Happy Learning!
You can provide manually the mapping custom_objects in the load_model method as mentioned in the answer https://stackoverflow.com/a/62326857/8056572 but it can be tedious when you have a lot of custom layers (or any custom callables defined. e.g. metrics, losses, optimizers, ...).
Tensorflow provides a utils function to do it automatically: tf.keras.utils.register_keras_serializable
You have to update your CustomLayer as follows:
import tensorflow as tf
#tf.keras.utils.register_keras_serializable()
class CustomLayer(tf.keras.layers.Layer):
def __init__(self, k, **kwargs):
self.k = k
super(CustomLayer, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["k"] = self.k
return config
def call(self, input):
return tf.multiply(input, 2)
Here is the complete working code:
import tensorflow as tf
#tf.keras.utils.register_keras_serializable()
class CustomLayer(tf.keras.layers.Layer):
def __init__(self, k, **kwargs):
self.k = k
super(CustomLayer, self).__init__(**kwargs)
def get_config(self):
config = super().get_config()
config["k"] = self.k
return config
def call(self, input):
return tf.multiply(input, 2)
def main():
model = tf.keras.models.Sequential(
[
tf.keras.Input(name='input_layer', shape=(10,)),
CustomLayer(10, name='custom_layer'),
tf.keras.layers.Dense(1, activation='sigmoid', name='output_layer')
]
)
print("SUMMARY OF THE MODEL CREATED")
print("-" * 60)
print(model.summary())
model.save('model.h5')
del model
print()
print()
model = tf.keras.models.load_model('model.h5')
print("SUMMARY OF THE MODEL LOADED")
print("-" * 60)
print(model.summary())
if __name__ == "__main__":
main()
And the corresponding output:
SUMMARY OF THE MODEL CREATED
------------------------------------------------------------
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
custom_layer (CustomLayer) (None, 10) 0
_________________________________________________________________
output_layer (Dense) (None, 1) 11
=================================================================
Total params: 11
Trainable params: 11
Non-trainable params: 0
_________________________________________________________________
None
WARNING:tensorflow:No training configuration found in the save file, so the model was *not* compiled. Compile it manually.
SUMMARY OF THE MODEL LOADED
------------------------------------------------------------
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
custom_layer (CustomLayer) (None, 10) 0
_________________________________________________________________
output_layer (Dense) (None, 1) 11
=================================================================
Total params: 11
Trainable params: 11
Non-trainable params: 0
_________________________________________________________________
None

How to convert this code from Keras to Tensorflow?

I am trying to convert code from Keras to tensorflow, I don't have much idea about Keras api, I am a Tensorflow user, Here is Keras code :
rawmeta = layers.Input(shape=(1,), dtype="string")
emb = elmolayer()(rawmeta)
d1 = layers.Dense(256, activation='relu')(emb)
yhat = layers.Dense(31, activation='softmax', name = "output_node")(d1)
model = Model(inputs=[rawmeta], outputs=yhat)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
Where elmolayer defined as follows :
class elmolayer(Layer):
def __init__(self, **kwargs):
self.dimensions = 1024
self.trainable=True
super(elmolayer, self).__init__(**kwargs)
def build(self, input_shape):
self.elmo = hub.Module('https://tfhub.dev/google/elmo/2', trainable=self.trainable,
name="{}_module".format(self.name))
self.trainable_weights += K.tf.trainable_variables(scope="^{}_module/.*".format(self.name))
super(elmolayer, self).build(input_shape)
def call(self, x, mask=None):
result = self.elmo(K.squeeze(K.cast(x, tf.string), axis=1),
as_dict=True,
signature='default',
)['default']
return result
def compute_mask(self, inputs, mask=None):
return K.not_equal(inputs, '--PAD--')
def compute_output_shape(self, input_shape):
return (input_shape[0], self.dimensions)
My Tensorflow implementation of this code is :
class Base_model(object):
def __init__(self, elmo_embedding_matrix):
tf.reset_default_graph()
# define placeholders
sentences = tf.placeholder(tf.int32, [None, None], name='sentences')
y_true = tf.placeholder(tf.int32, [None, None], name='labels' )
self.elmo = tf.get_variable(name="relation_embedding", shape=[elmo_embedding_matrix.shape[0],elmo_embedding_matrix.shape[1]],
initializer=tf.constant_initializer(np.array(elmo_embedding_matrix)),
trainable=True,dtype=tf.float32)
embedding_lookup = tf.nn.embedding_lookup(self.elmo,sentences)
d1 = tf.layers.dense(embedding_lookup, 256, tf.nn.relu)
y_pred = tf.layers.dense(d1, 31, tf.nn.softmax)
matches = tf.equal(tf.argmax(y_pred,1),tf.argmax(y_true,1))
acc = tf.reduce_mean(tf.cast(matches,tf.float32))
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true,logits=y_pred))
train = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cross_entropy)
My confusion is the last dense layer in keras model is :
yhat = layers.Dense(31, activation='softmax', name = "output_node")(d1)
While in tensorflow code if i am using tf.nn.softmax_cross_entropy_with_logits_v2 then should i pass second dense layer to softmax eg.,
y_pred = tf.layers.dense(d1, 31, tf.nn.softmax)
Because if i am using softmax here then tf.nn.softmax_cross_entropy_with_logits_v2 will use softmax again on logits.
How to convert that Keras code to Tensorflow?
Specifying the comment here (Answer Section) even though it is present in Comments Section, for the benefit of the Community.
The Tensorflow equivalent Code for the Keras Code to represent Output Layer,
yhat = layers.Dense(31, activation='softmax', name = "output_node")(d1)
is
y_logits = tf.layers.dense(d1, 31, tf.nn.softmax)
y_pred = tf.nn.softmax(y_logits)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true,logits=y_logits))
Hope this helps. Happy Learning!

Get batch size in Keras custom layer and use tensorflow operations (tf.Variable)

I would like to write a Keras custom layer with tensorflow operations, that require the batch size as input. Apparently I'm struggling in every nook and cranny.
Suppose a very simple layer:
(1) get batch size
(2) create a tf.Variable (let's call it my_var) based on the batch size, then some tf.random ops to alter my_var
(3) finally, return input multiplied with my_var
What I tried so far:
class TestLayer(Layer):
def __init__(self, **kwargs):
self.num_batch = None
self.my_var = None
super(TestLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.batch_size = input_shape[0]
var_init = tf.ones(self.batch_size, dtype = x.dtype)
self.my_var = tf.Variable(var_init, trainable=False, validate_shape=False)
# some tensorflow random operations to alter self.my_var
super(TestLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
return self.my_var * x
def compute_output_shape(self, input_shape):
return input_shape
Now creating a very simple model:
# define model
input_layer = Input(shape = (2, 2, 3), name = 'input_layer')
x = TestLayer()(input_layer)
# connect model
my_mod = Model(inputs = input_layer, outputs = x)
my_mod.summary()
Unfortunately, what ever I try/change in the code, I get multiple errors, most of them with very cryptical tracebacks (ValueError: Cannot convert a partially known TensorShape to a Tensor: or ValueError: None values not supported.).
Any general suggestions? Thanks in advance.
You need to specify batch size if you want to create a variable of size batch_size. Additionally, if you want to print a summary the tf.Variable must have a fixed shape (validatate_shape=True) and it must be broadcastable to be successfully multiplied by the input:
import tensorflow as tf
from tensorflow.keras.layers import Layer, Input
from tensorflow.keras.models import Model
class TestLayer(Layer):
def __init__(self, **kwargs):
self.num_batch = None
self.my_var = None
super(TestLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.batch_size = input_shape[0]
var_init = tf.ones(self.batch_size, dtype=tf.float32)[..., None, None, None]
self.my_var = tf.Variable(var_init, trainable=False, validate_shape=True)
super(TestLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
res = self.my_var * x
return res
def compute_output_shape(self, input_shape):
return input_shape
# define model
input_layer = Input(shape=(2, 2, 3), name='input_layer', batch_size=10)
x = TestLayer()(input_layer)
# connect model
my_mod = Model(inputs=input_layer, outputs=x)
my_mod.summary()
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_layer (InputLayer) (10, 2, 2, 3) 0
_________________________________________________________________
test_layer (TestLayer) (10, 2, 2, 3) 0
=================================================================
Total params: 0
Trainable params: 0
Non-trainable params: 0

Categories

Resources