This model doesn't work.
import tensorflow as tf
class LNSimpleRNNCell(tf.keras.layers.Layer):
def __init__(self, units, activation="tanh", **kwargs):
super().__init__(**kwargs)
self.state_size = units
self.output_size = units
self.simple_rnn_cell = tf.keras.layers.LSTMCell(units, activation=None)
self.layer_norm = tf.keras.layers.LayerNormalization()
self.activation = tf.keras.activations.get(activation)
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
if inputs is not None:
batch_size = tf.shape(inputs)[0]
dtype = inputs.dtype
else:
return [tf.zeros([batch_size, self.state_size], dtype=dtype)]
#tf.function
def call(self, inputs, states):
outputs, new_states = self.simple_rnn_cell(inputs, states)
norm_outputs = self.activation(self.layer_norm(outputs))
return norm_outputs, [norm_outputs]
model = tf.keras.models.Sequential([
tf.keras.layers.RNN(LNSimpleRNNCell(units=73), return_sequences=True, input_shape=[73, 17]),
tf.keras.layers.RNN(LNSimpleRNNCell(units=73), return_sequences=True),
tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(units=1, activation=None))
])
model.summary()
I get the following error:
IndexError: Exception encountered when calling layer "lstm_cell" " f"(type LSTMCell).
tuple index out of range
Call arguments received by layer "lstm_cell" "f"(type LSTMCell):
• inputs=tf.Tensor(shape=(None, 17), dtype=float32)
• states=('tf.Tensor(shape=(None, 73), dtype=float32)',)
• training=None
Call arguments received by layer "ln_simple_rnn_cell" "f"(type LNSimpleRNNCell):
• inputs=tf.Tensor(shape=(None, 17), dtype=float32)
• states=('tf.Tensor(shape=(None, 73), dtype=float32)',)
In class LNSimpleRNNCell, it operates when LSTMCell is changed to SimpleRNNCell. It seems that something more is needed in the process of change.
Related
I'm writting a custom keras model here is my code:
class Model(tf.keras.Model):
def __init__(self, first_layer, num_classes):
super(Model, self).__init__()
self.layer_1 = tf.keras.layers.Dense(first_layer, activation='relu')
self.layer_2 = tf.keras.layers.Dense(num_classes, activation='softmax')
def call(self,inp):
output = self.layer_1(inp)
output = self.layer_2(output)
return output
but I get this error:
ValueError: The last dimension of the inputs to `Dense` should be defined. Found `None`.
I found the tf.keras.input, but all the examples with that are with sequential models for example with a sequential model this is the solution with keras.input:
encoder_input = keras.Input(shape=(28, 28, 1), name="img")
x = layers.Conv2D(16, 3, activation="relu")(encoder_input)
x = layers.Conv2D(32, 3, activation="relu")(x)
but how I introduce this in a custom keras model?
please
version: TF 2.6
Edited code:
import tensorflow as tf
class Model(tf.keras.Model):
def __init__(self, first_layer, num_classes):
super(Model, self).__init__()
self.layer_1 = tf.keras.layers.Dense(first_layer, activation='relu')
self.layer_2 = tf.keras.layers.Dense(num_classes, activation='softmax')
def call(self,inp):
output = self.layer_1(inp)
output = self.layer_2(output)
return output
encoder_input = tf.keras.Input(shape=(28, 28, 1))
x = tf.keras.layers.Conv2D(16, 3, activation="relu")
#x = layers.Conv2D(32, 3, activation="relu")(x)
model = Model(5,3) #instantiated the model
y=model(encoder_input)#took input
print(y)
Output:
KerasTensor(type_spec=TensorSpec(shape=(None, 28, 28, 3),
dtype=tf.float32, name=None), name='model_4/dense_6/Softmax:0',
description="created by layer 'model_4'")
Reference:
https://www.tensorflow.org/guide/keras/custom_layers_and_models
im trying to create a keras model through subclassing using:
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = Dense(64, activation='relu')
self.dense2 = Dense(10)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel(tf.random.uniform([1, 10]))
model.summary()
I want it to be equal to this squential api:
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(64, activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
but after running those lines i get:
TypeError: __init__() takes 1 positional argument but 2 were given
can u assist?
You passed the input data as arguments to the model constructor. You need to instantiate it
model = MyModel()
Then you can pass input tensors
model(tf.random.uniform([1, 10]))
And this will work
<tf.Tensor: shape=(1, 10), dtype=float32, numpy=
array([[-0.10745259, -0.21291552, -0.04618738, -0.0118152 , -0.1662825 ,
0.8145975 , 0.44216082, -0.07359659, 0.68233466, -0.15205911]],
dtype=float32)>
I'm currently testing some modified versions of dropout in Keras and one of them involves adjusting the weights during the training of a customized dense layer. I however have not been able to run it without error yet. I suspect is has something to do with eager execution but I'm not sure.
class Linear(keras.layers.Layer):
def __init__(self, units, **kwargs):
super(Linear, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs, training=False):
prob = 0.0/10
if training:
w = np.matrix(self.w)
# w = self.w
shape = w.shape
size = shape[0] * shape[1]
arr = np.random.choice([0,1], size=size, p=[prob, 1 - prob]) #random array of 1's and 0's
arr = arr.reshape(shape) #reshape it to same dimensions as weights
new_weights = np.multiply(arr, w) #element wise multiplication
self.w = new_weights
return tf.matmul(inputs, self.w) + self.b
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(128, (3, 3), activation='relu',padding='same'))
model.add(layers.MaxPooling2D())
model.add(layers.Conv2D(4, (3, 3), activation='relu',padding='same'))
model.add(layers.MaxPooling2D())
model.add(layers.Flatten())
model.add(Linear(3)) #Custom layer
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss = 'CategoricalCrossentropy',
optimizer = 'adam',
metrics=['accuracy'])
epochs = 1
history = model.fit(train_dataset, validation_data=validation_dataset, epochs=epochs)
Error: TypeError: Expected binary or unicode string, got <tf.Tensor 'sequential_3/linear_3/mul:0' shape=(4, 3) dtype=float32>
self.w has to be tensorflow.Variable. However after multiplication in call() it becomes tensorflow.Tensor. Just find another way to do the same thing in call()
Try this code:
def call(self, inputs, training=False):
prob = 0.0/10
if training:
w = np.matrix(self.w)
shape = w.shape
size = shape[0] * shape[1]
arr = np.random.choice([0,1], size=size, p=[prob, 1 - prob]) #random array of 1's and 0's
arr = arr.reshape(shape) #reshape it to same dimensions as weights
# CHANGED 3 LINES BELOW:
arr = tf.convert_to_tensor(arr, dtype=tf.float32)
new_weights = tf.multiply(arr, self.w)
self.w.assign(new_weights) # Assign preserves tf.Variable
return tf.matmul(inputs, self.w) + self.b
I try to use a customize layer in keras. It's a simple layer just a matmul with trainable params.
from tensorflow import keras
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.optimizers import RMSprop
from keras.layers import Layer
from tensorflow.keras import backend as K
class MultiLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MultiLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer
if self.output_dim[0] != input_shape[1]:
raise Exception("expect input shape with [{},?], but get input with shape {}".format(self.output_dim[0],input_shape), input_shape)
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[2], self.output_dim[0]),
initializer='uniform',
trainable=True)
super(MultiLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
return K.dot(x, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
x_fake = np.random.random((10,28,28))
y_fake = [np.diag(np.ones(28))]*10
input_shape = np.shape(x_fake)[1:]
print(input_shape)
ipt = Input(name='inputs',shape=input_shape)
layer = MultiLayer((input_shape[0],input_shape[0]),name="dev")(ipt)
#layer = Flatten()(layer)
model = Model(inputs=ipt,outputs=layer)
model.summary()
rms = RMSprop()
model.compile(loss="rms", optimizer=rms, metrics=['accuracy'])
model.fit(x_fake,y_fake)
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
inputs (InputLayer) [(None, 28, 28)] 0
_________________________________________________________________
dev (MultiLayer) (None, 28, 28) 784
=================================================================
Total params: 784
Trainable params: 784
Non-trainable params: 0
_________________________________________________________________
but when I fit this model, error occurs.
ValueError: Data cardinality is ambiguous:
x sizes: 10
y sizes: 28, 28, 28, 28, 28, 28, 28, 28, 28, 28
Please provide data which shares the same first dimension.
I don't know what the sizes mean.
How to fix it?
model.output_shape must match y_fake.shape; your [np.diag(np.ones((28,28)))]*10 is a list of 10 arrays, each of shape (28, 28), so TF thinks you have 10 outputs. I presume you intended np.stack(... * 10).
Next, ensure all imports are consistent (either from tensorflow.keras or from keras). Lastly, "rms" isn't a supported loss, I replaced it with "mse". All fixes below.
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.layers import Layer
from tensorflow.keras import backend as K
class MultiLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MultiLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer
if self.output_dim[0] != input_shape[1]:
raise Exception(("expect input shape with [{},?], but get input with "
"shape {}").format(self.output_dim[0],input_shape),
input_shape)
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[2], self.output_dim[0]),
initializer='uniform',
trainable=True)
super(MultiLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
return K.dot(x, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
x_fake = np.random.random((10,28,28))
y_fake = np.stack([np.diag(np.ones((28)))]*10) # (10, 28, 28)
input_shape = np.shape(x_fake)[1:]
print(input_shape)
ipt = Input(name='inputs', shape=input_shape)
layer = MultiLayer((input_shape[0],input_shape[0]),name="dev")(ipt)
model = Model(inputs=ipt, outputs=layer)
model.summary()
rms = RMSprop()
model.compile(loss="mse", optimizer=rms, metrics=['accuracy'])
model.fit(x_fake, y_fake)
I am trying to implement a Keras custom layer whose super class is another custom layer. The super class take one input and it's working fine. But the sub class which is supposed to take two inputs isn't accepting multiple inputs.
I have tried to pair the super class with Layer. But it was the same. I think there's some problem in building step.
class SpadeLayer(ConvSN2D, Layer):
def __init__(self, filters, kernel_size, **kwargs):
super(SpadeLayer, self).__init__(filters, kernel_size, **kwargs)
def build(self, input_shape):
self.bn = BatchNormalization(center=False, scale=False)
self.conv0 = ConvSN2D(128, kernel_size , strides=1, padding='same',kernel_initializer='glorot_uniform')
self.conv1 = ConvSN2D(filters, kernel_size , strides=1, padding='same',kernel_initializer='glorot_uniform')
self.conv2 = ConvSN2D(filters, kernel_size , strides=1, padding='same',kernel_initializer='glorot_uniform')
super(SpadeLayer, self).build(input_shape)
def call(self, inputs):
_, f_h, f_w, _ = inputs[0].get_shape().as_list()
segmap_down = K.tf.image.resize_images(inputs[1], (f_h, f_w))
init_conv = self.conv0()(segmap_down)
gamma = self.conv1()(init_conv)
beta = self.conv1()(init_conv)
return (self.bn(features) * gamma) + beta
layer = SpadeLayer(3, 3)
input1 = Input(shape=(16,16,32))
input2 = Input(shape=(16,16,1))
output = layer([input1, input2])
model = tf.keras.Model(inputs=[input], outputs=[output])
model.summary()
I am getting following error:
ValueError Traceback (most recent call last)
<ipython-input-39-45f88bca3df3> in <module>
2 input1 = Input(shape=(16,16,32))
3 input2 = Input(shape=(16,16,1))
----> 4 output = layer([input1, input2])
5 model = tf.keras.Model(inputs=[input], outputs=[output])
6 model.summary()
C:\Anaconda3\envs\env_tf\lib\site-packages\keras\engine\base_layer.py in __call__(self, inputs, **kwargs)
412 # Raise exceptions in case the input is not compatible
413 # with the input_spec specified in the layer constructor.
--> 414 self.assert_input_compatibility(inputs)
415
416 # Collect input shapes to build layer.
C:\Anaconda3\envs\env_tf\lib\site-packages\keras\engine\base_layer.py in assert_input_compatibility(self, inputs)
297 'but it received ' + str(len(inputs)) +
298 ' input tensors. Input received: ' +
--> 299 str(inputs))
300 for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):
301 if spec is None:
ValueError: Layer spade_layer_11 expects 1 inputs, but it received 2 input tensors. Input received: [<tf.Tensor 'input_21:0' shape=(?, 16, 16, 32) dtype=float32>, <tf.Tensor 'input_22:0' shape=(?, 16, 16, 1) dtype=float32>]
I just don't know what I can do to rectify it. I have check many resources, but haven't found anything similar.