I try to make a simple LSTM network with 2 layers stacked. To that purpose I use MultiRNNCell. I followed tutorials and other stack topics, but I still have a problem to run my network. Below you can find declaration of initial state I found on stack.
cell_count = 10 # timesteps
num_hidden = 4 # hidden layer num of features
num_classes = 1
num_layers = 2
state_size = 4
init_c = tf.Variable(tf.zeros([batch_size, cell_count]), trainable=False)
init_h = tf.Variable(tf.zeros([batch_size, cell_count]), trainable=False)
initial_state = rnn.LSTMStateTuple(init_c, init_h) #[num_layers, 2, batch_size, state_size])
Below you can find how my model looks like:
def generate_model_graph(self, data):
L1 = self.generate_layer(self.cell_count)
L2 = self.generate_layer(self.cell_count)
#outputs from L1
L1_outs, _ = L1(data, self.initial_state)
#reverse output array
L2_inputs = L1_outs[::-1]
L2_outs, _ = L2(L2_inputs, self.initial_state)
predicted_vals = tf.add(tf.matmul(self.weights["out"], L2_outs), self.biases["out"])
L2_out = tf.nn.sigmoid(predicted_vals)
return L2_out
def generate_layer(self, size):
cells = [rnn.BasicLSTMCell(self.num_hidden) for _ in range(size)]
return rnn.MultiRNNCell(cells)
And run session:
def train_model(self, generator):
tr, cost = self.define_model()
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for _ in range(self.n_epochs):
batch_x, batch_y = self._prepare_data(generator)
init_state = tf.zeros((self.cell_count, self.num_hidden))
t, c = sess.run([tr, cost], feed_dict={self.X: batch_x, self.Y:batch_y, self.initial_state:init_state})
print(c)
Unfortunately, I still get an error saying 'Variable' object is not iterable.
File "detector_lstm_v2.py", line 104, in <module>
c.train_model(data_gen)
File "detector_lstm_v2.py", line 38, in train_model
tr, cost = self.define_model()
File "detector_lstm_v2.py", line 51, in define_model
predicted_vals = self.generate_model_graph(self.X)
File "detector_lstm_v2.py", line 65, in generate_model_graph
L1_outs, _ = L1(data, self.initial_state)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/rnn_cell_impl.py", line 232, in __call__
return super(RNNCell, self).__call__(inputs, state)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/layers/base.py", line 329, in __call__
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py", line 703, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/rnn_cell_impl.py", line 1325, in call
cur_inp, new_state = cell(cur_inp, cur_state)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/rnn_cell_impl.py", line 339, in __call__
*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/layers/base.py", line 329, in __call__
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py", line 703, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/rnn_cell_impl.py", line 633, in call
c, h = state
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 491, in __iter__
raise TypeError("'Variable' object is not iterable.")
TypeError: 'Variable' object is not iterable.
Does any know how to solve this problem?
You are creating a multi layer rnn cell but you are passing a single state.
Use this to create your state:
initial_state = L1.zero_state()
or use it to initialize the variable if you need a variable.
There are some "naming“ problems in your code that make me think you are misunderstanding something here.
There are different parameters:
The hidden size of your layers: it is the units attribute of the RNNCell constructor. All the states of your cell nees to have a shape [bacth_size, hidden_size] (and not cell count]
Your cell_count in your code is not determining the length of the sequence but "how deep" your network is.
The length of the sequence is automatically determined on the input sequence you are passing to your model (which needs to be a list of tensors).
I recommend you to have a look at the TF tutorial on Recurrent Neural Networks here and maybe this answer here to understand what a RNNCell is w.r.t. RNN literature (it is a layer and not a single cell).
Related
I Am trying to implement ELMO embeddings via tensorflow in a neural network. Here is a code snippet of my network :
def get_elmo_embeds_model():
input_text = tf.keras.layers.Input(shape=(1,), dtype=tf.string)
embedding = tf.keras.layers.Lambda(ELMoEmbedding, output_shape=(1024, ))(input_text)
print(embedding.shape)
conv_1d_layer = tf.keras.layers.Conv1D(256,5,activation='relu')(embedding)
max_pool_1 = tf.keras.layers.MaxPooling1D(5)(conv_1d_layer)
x = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(256,name="BiLSTM")) (max_pool_1)
dropout_2 = tf.keras.layers.Dropout(0.2)(x)
flatten_1 = tf.keras.layers.Flatten()(dropout_2)
pred = tf.keras.layers.Dense(1, activation='sigmoid')(flatten_1)
model = tf.keras.models.Model(inputs=[input_text], outputs=pred)
return model
text_only_model = get_elmo_embeds_model()
text_only_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy',precision_mat,recall_mat,f1_mat])
text_only_model.summary()
with tf.Session() as session:
K.set_session(session)
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
history = text_only_model.fit_generator(text_elmo_train,epochs=EPOCHS, validation_steps=VALIDATION_STEPS,
steps_per_epoch=STEPS_PER_EPOCH,validation_data = text_elmo_valid)
When running this model, I Am getting the following Error :
steps_per_epoch=STEPS_PER_EPOCH,validation_data = text_elmo_valid)
File "/home/.local/lib/python3.7/site-packages/keras/engine/training_v1.py", line 796, in fit
use_multiprocessing=use_multiprocessing)
File "/home/.local/lib/python3.7/site-packages/keras/engine/training_generator_v1.py", line 586, in fit
steps_name='steps_per_epoch')
File "/home/.local/lib/python3.7/site-packages/keras/engine/training_generator_v1.py", line 306, in model_iteration
steps_name='validation_steps')
File "/home/.local/lib/python3.7/site-packages/keras/engine/training_generator_v1.py", line 252, in model_iteration
batch_outs = batch_function(*batch_data)
File "/home/.local/lib/python3.7/site-packages/keras/engine/training_v1.py", line 1152, in test_on_batch
outputs = self.test_function(inputs) # pylint: disable=not-callable
File "/home/.local/lib/python3.7/site-packages/keras/backend.py", line 4187, in __call__
run_metadata=self.run_metadata)
File "/home/.conda/envs/test_multimod/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1485, in __call__
run_metadata_ptr)
tensorflow.python.framework.errors_impl.UnimplementedError: TensorArray has size zero, but element shape [?,256] is not fully defined. Currently only static shapes are supported when packing zero-size TensorArrays.
I have checked the data which is being passed out and it has no null values in it, but still this error exists while running this function.
I'm trying to do dataparallel into GRU's network as explained in the docs and I keep getting the same error
"""Defines the neural network, losss function and metrics"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, params, anchor_is_phrase):
"""
Simple LSTM, used to generate the LSTM for both the word and video
embeddings.
Args:
params: (Params) contains vocab_size, embedding_dim, lstm_hidden_dim
is_phrase: is word lstm or the vid lstm
"""
super(Net, self).__init__()
if anchor_is_phrase:
self.lstm = nn.DataParallel(nn.GRU(params.word_embedding_dim, params.hidden_dim, 1)).cuda()#, batch_first=True)
else:
self.lstm = nn.DataParallel(nn.GRU(params.vid_embedding_dim, params.hidden_dim, 1)).cuda() #, batch_first=True)
def forward(self, s, anchor_is_phrase = False):
"""
Forward prop.
"""
s, _ = self.lstm(s)
s.data.contiguous()
return s
The error happens at line s, _ = self.lstm(s) in the previous code :
here: s, _ = self.lstm(s)
s.data.contiguous()
return s
I get the following error message :
s, _ = self.lstm(s)
File "/home/pavelameen/miniconda3/envs/TD2/lib/python3.6/site-packages/torch/nn/modules/module.py", line 493, in __call__
result = self.forward(*input, **kwargs)
File "/home/pavelameen/miniconda3/envs/TD2/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py", line 152, in forward
outputs = self.parallel_apply(replicas, inputs, kwargs)
File "/home/pavelameen/miniconda3/envs/TD2/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py", line 162, in parallel_apply
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
File "/home/pavelameen/miniconda3/envs/TD2/lib/python3.6/site-packages/torch/nn/parallel/parallel_apply.py", line 83, in parallel_apply
raise output
File "/home/pavelameen/miniconda3/envs/TD2/lib/python3.6/site-packages/torch/nn/parallel/parallel_apply.py", line 59, in _worker
output = module(*input, **kwargs)
File "/home/pavelameen/miniconda3/envs/TD2/lib/python3.6/site-packages/torch/nn/modules/module.py", line 493, in __call__
result = self.forward(*input, **kwargs)
File "/home/pavelameen/miniconda3/envs/TD2/lib/python3.6/site-packages/torch/nn/modules/rnn.py", line 193, in forward
max_batch_size = input.size(0) if self.batch_first else input.size(1)
AttributeError: 'tuple' object has no attribute 'size'
the interesting part is i try to output the type of s in line 27 and i get PackedSequence , why it convert to tuple in lstm forward method?
nn.GRU expects (line 181) either a PackedSequence or a tesnor as input. As mentioned in the error, you are passing a tuple object s intead.
I'm trying to create a recurrent neural network with the Keras functional API in TensorFlow. The RNN takes in tweets and classifies them as positive or negative.
attention_input = keras.Input(shape=(512,), name='attention')
a = keras.layers.Dense(1, activation='sigmoid')(attention_input)
attention_output = keras.layers.Multiply()([attention_input, a])
attention = keras.Model(inputs=attention_input, outputs=attention_output, name='attention_model')
inputs1 = keras.Input(shape=(100,), name='lstm')
x = keras.layers.Embedding(len(tokenizer.word_counts)+1,
100,
weights=[embedding_matrix],
input_length=100,
trainable=True)(inputs1)
x = keras.layers.Bidirectional(tf.keras.layers.LSTM(256, return_sequences=True))(x)
x = keras.layers.TimeDistributed(attention)(x)
x = tf.unstack(x, num=256)
t_sum = x[0]
for i in range(256 - 1):
t_sum = keras.layers.Add()([t_sum, x[i+1]])
lstm = keras.Model(inputs=inputs1, outputs=t_sum, name='lstm_model')
inputs2 = keras.Input(shape=(100,), name='dense')
x = keras.layers.Dense(256, activation='relu')(inputs2)
x = keras.layers.Dropout(0.2)(x)
x = keras.layers.Dense(128, activation='relu')(x)
x = keras.layers.Dropout(0.2)(x)
outputs2 = keras.layers.Dense(1, activation='sigmoid')(x)
dense = keras.Model(inputs=inputs2, outputs=outputs2, name='txt_model')
inputs = keras.Input(shape=(100,), name='text')
x = lstm(inputs)
outputs = dense(x)
model = keras.Model(inputs=inputs, outputs=outputs, name='text_model')
model.compile(
loss = 'binary_crossentropy',
optimizer = 'adam',
metrics = ['acc',
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall()])
I get the following runtime error
2019-04-13 10:29:34.855192: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
Traceback (most recent call last):
File ".\main.py", line 25, in <module>
' -> '.join(permutation).lower() : { ** results.get(' -> '.join(permutation).lower(), {}), ** framework.runtime.evaluate(path, permutation, classifiers, cached) }
File "C:\Users\steff\Desktop\Skole\MsT\framework\framework\runtime.py", line 30, in evaluate
classifier.lower() : framework.classifiers.list[classifier.lower()](data)
File "C:\Users\steff\Desktop\Skole\MsT\framework\framework\classifiers\rnn.py", line 93, in evaluate
x = lstm(inputs)
File "C:\Users\steff\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\keras\engine\base_layer.py", line 612, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "C:\Users\steff\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\keras\engine\network.py", line 870, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File "C:\Users\steff\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\keras\engine\network.py", line 1011, in _run_internal_graph
output_tensors = layer(computed_tensors, **kwargs)
File "C:\Users\steff\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\keras\engine\base_layer.py", line 669, in __call__
self.set_weights(self._initial_weights)
File "C:\Users\steff\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\keras\engine\base_layer.py", line 938, in set_weights
param_values = backend.batch_get_value(params)
File "C:\Users\steff\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\keras\backend.py", line 2837, in batch_get_value
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
RuntimeError: Cannot get value inside Tensorflow graph function.
I can see from the errors that it has something to do with my LSTM model, but I can't see what is the cause of the problem.
I think that you are using Tensorflow 2.0. If this is the case then using the parameter embeddings_initializer= instead of weights= worked.
x = tf.keras.layers.Embedding(vocabulary_size, embedding_dim, embeddings_initializer=tf.keras.initializers.Constant(embedding_matrix), trainable=False)
I am trying to build an LSTM model for time series data. The details of dataset:
Input data is time series with 800 subjects, each having a 2D array data of 60 rows and 200 columns. I loaded the entire data as a Tensor of shape [800,60,200] and labels for the classification problem is of shape [800,1].
I made a dictionary of the data using the following code :
class DataCurate(Dataset):
def __init__(self, l1,l2, transform=None):
self.l1=l1
self.l2=l2
self.transform=transform
def __len__(self):
return len(self.l1)
def __getitem__(self, index):
array=self.l1[index]
label=self.l2[index]
sample = {'time_data': array, 'labels': label}
return sample
the data and labels are in variables x & y. I call data=Datacurate(x,y)
Later on I build and LSTM model for classification problem using the code:
class RNNModel(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNNModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers,batch_first=True)
self.linear = nn.Linear(hidden_size, num_layers, bias=True)
self.softmax = nn.LogSoftmax()
def forward(self, x)
self.lstm.flatten_parameters()
out_packed, state = self.lstm(x) # RNN
print("lstm output size: {out.size()}"+str(out_packed.size()))
out = self.linear(out_packed[-1]) # linear transform
print("linear output size {out.size()} "+str(out.size()))
log_probs = F.log_softmax(out,dim=1)
print("softmax output size {log_probs.size()}"+str(log_probs.size()))
return log_probs
This gives me an error when I run the training script:
UserWarning: RNN module weights are not part of single contiguous chunk of memory. This means they need to be compacted at every call, possibly greatly increasing memory usage. To compact weights again call flatten_parameters().
out_packed, state = self.lstm(x) # RNN
Traceback (most recent call last):
File "main_2.py", line 100, in <module>
output = model(train_inputs.transpose(0,1))
File "/usr/local/lib/python3.5/dist-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/media/iab/disk_a/meghal/test/quickdraw_tutorial_dataset_v1/pytorch_RNN_examples/model.py", line 26, in forward
out_packed, state = self.lstm(x) # RNN
File "/usr/local/lib/python3.5/dist-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/torch/nn/modules/rnn.py", line 192, in forward
output, hidden = func(input, self.all_weights, hx, batch_sizes)
File "/usr/local/lib/python3.5/dist-packages/torch/nn/_functions/rnn.py", line 324, in forward
return func(input, *fargs, **fkwargs)
File "/usr/local/lib/python3.5/dist-packages/torch/nn/_functions/rnn.py", line 288, in forward
dropout_ts)
RuntimeError: param_from.type() == param_to.type() ASSERT FAILED at /pytorch/aten/src/ATen/native/cudnn/RNN.cpp:491, please report a bug to PyTorch. parameter types mismatch
I don't know what this means and how do I resolve it. I am completely new to LSTM.
I'm trying to develop a recurrent neural network in tensorflow 1.1.0 and i wrote a function that should return a LSTM.
def LSTM(x, num_units, num, num_layers=3):
cells = []
for i in range(num_layers):
cell = LSTMCell(num_units=num_units, state_is_tuple=True)
cell = DropoutWrapper(cell=cell, output_keep_prob=0.5)
cells.append(cell)
lstm = MultiRNNCell(cells=cells, state_is_tuple=True)
val, state = tf.nn.dynamic_rnn(lstm, x, dtype=tf.float32)
val = tf.transpose(val, [1, 0, 2]) # rendo il risultato una sequenza
last = tf.gather(val, int(val.get_shape()[0]) - 1) # prendo l'output dell'ultimo elemento
return last
This function actually works but if i try to reuse it more than one time i get the following error:
C:\ProgramData\Anaconda3\envs\obra\python.exe C:/Users/Simone/Desktop/Cobra/LSTM_Function_Filtro.py
Traceback (most recent call last):
File "C:/Users/Simone/Desktop/Cobra/LSTM_Function_Filtro.py", line 81, in <module>
Lstm2 = tf.nn.relu(tf.matmul(lyrs.LSTM(concat1, num_hidden, 1), W2) + B2)
File "C:\Users\Simone\Desktop\Cobra\Layers_OK.py", line 62, in LSTM
val, state = tf.nn.dynamic_rnn(lstm, x, dtype=tf.float32)
File "C:\ProgramData\Anaconda3\envs\obra\lib\site-packages\tensorflow\python\ops\rnn.py", line 553, in dynamic_rnn
dtype=dtype)
File "C:\ProgramData\Anaconda3\envs\obra\lib\site-packages\tensorflow\python\ops\rnn.py", line 720, in _dynamic_rnn_loop
swap_memory=swap_memory)
File "C:\ProgramData\Anaconda3\envs\obra\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 2623, in while_loop
result = context.BuildLoop(cond, body, loop_vars, shape_invariants)
File "C:\ProgramData\Anaconda3\envs\obra\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 2456, in BuildLoop
pred, body, original_loop_vars, loop_vars, shape_invariants)
File "C:\ProgramData\Anaconda3\envs\obra\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 2406, in _BuildLoop
body_result = body(*packed_vars_for_body)
File "C:\ProgramData\Anaconda3\envs\obra\lib\site-packages\tensorflow\python\ops\rnn.py", line 705, in _time_step
(output, new_state) = call_cell()
File "C:\ProgramData\Anaconda3\envs\obra\lib\site-packages\tensorflow\python\ops\rnn.py", line 691, in <lambda>
call_cell = lambda: cell(input_t, state)
File "C:\ProgramData\Anaconda3\envs\obra\lib\site-packages\tensorflow\contrib\rnn\python\ops\core_rnn_cell_impl.py", line 953, in __call__
cur_inp, new_state = cell(cur_inp, cur_state)
File "C:\ProgramData\Anaconda3\envs\obra\lib\site-packages\tensorflow\contrib\rnn\python\ops\core_rnn_cell_impl.py", line 713, in __call__
output, new_state = self._cell(inputs, state, scope)
File "C:\ProgramData\Anaconda3\envs\obra\lib\site-packages\tensorflow\contrib\rnn\python\ops\core_rnn_cell_impl.py", line 398, in __call__
reuse=self._reuse) as unit_scope:
File "C:\ProgramData\Anaconda3\envs\obra\lib\contextlib.py", line 59, in __enter__
return next(self.gen)
File "C:\ProgramData\Anaconda3\envs\obra\lib\site-packages\tensorflow\contrib\rnn\python\ops\core_rnn_cell_impl.py", line 93, in _checked_scope
"the argument reuse=True." % (scope_name, type(cell).__name__))
ValueError: Attempt to have a second RNNCell use the weights of a variable scope that already has weights: 'rnn/multi_rnn_cell/cell_0/lstm_cell'; and the cell was not constructed as LSTMCell(..., reuse=True). To share the weights of an RNNCell, simply reuse it in your second calculation, or create a new one with the argument reuse=True.
Also i tried to add tf.get_variable_scope().reuse_variables() at the end of the for cycle, but i get the error
Variable rnn/multi_rnn_cell/cell_0/lstm_cell/weights does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?
If I am not mistaken you want to share the parameters or create another RNN by using the same code. If so you can use tf.variable_scope as follows:
def LSTM(x, num_units, num_layers=3, reuse=False, scope="MultiRNNCell"):
with tf.variable_scope(name_or_scope=scope, reuse=reuse):
cells = []
for i in range(num_layers):
cell = tf.nn.rnn_cell.LSTMCell(num_units=num_units, state_is_tuple=True)
cell = tf.nn.rnn_cell.DropoutWrapper(cell=cell, output_keep_prob=0.5)
cells.append(cell)
lstm = tf.nn.rnn_cell.MultiRNNCell(cells=cells, state_is_tuple=True)
val, state = tf.nn.dynamic_rnn(lstm, x, dtype=tf.float32)
val = tf.transpose(val, [1, 0, 2]) # rendo il risultato una sequenza
last = tf.gather(val, int(val.get_shape()[0]) - 1) # prendo l'output dell'ultimo elemento
return last
At your first use you should pass reuse argument False so that tensorflow creates the variables. To share the parameters with another RNN then passing True is enough. If you want to create a new model then I suggest you passing a new scope name together with reuse=False. The following example runs should make it easier to follow. I created a dummy placeholder.
def list_parameters():
num_param = 0
for v in tf.global_variables():
print(v.name)
num_param += np.prod(v.get_shape().as_list())
print("# of parameters: " + str(num_param))
x = tf.placeholder(dtype=tf.float32,
shape=[32, 50, 100],
name='input_data')
lstm1 = LSTM(x, 64, 3, reuse=False, scope="MultiRNNCell")
list_parameters()
MultiRNNCell/rnn/multi_rnn_cell/cell_0/lstm_cell/kernel:0
MultiRNNCell/rnn/multi_rnn_cell/cell_0/lstm_cell/bias:0
MultiRNNCell/rnn/multi_rnn_cell/cell_1/lstm_cell/kernel:0
MultiRNNCell/rnn/multi_rnn_cell/cell_1/lstm_cell/bias:0
MultiRNNCell/rnn/multi_rnn_cell/cell_2/lstm_cell/kernel:0
MultiRNNCell/rnn/multi_rnn_cell/cell_2/lstm_cell/bias:0
# of parameters: 108288
lstm2 = LSTM(x, 64, 3, reuse=True, scope="MultiRNNCell")
list_parameters()
MultiRNNCell/rnn/multi_rnn_cell/cell_0/lstm_cell/kernel:0
MultiRNNCell/rnn/multi_rnn_cell/cell_0/lstm_cell/bias:0
MultiRNNCell/rnn/multi_rnn_cell/cell_1/lstm_cell/kernel:0
MultiRNNCell/rnn/multi_rnn_cell/cell_1/lstm_cell/bias:0
MultiRNNCell/rnn/multi_rnn_cell/cell_2/lstm_cell/kernel:0
MultiRNNCell/rnn/multi_rnn_cell/cell_2/lstm_cell/bias:0
# of parameters: 108288
Note that lstm1 and lstm2 are sharing parameters.
lstm3 = LSTM(x, 64, 3, reuse=False, scope="NewMultiRNNCell")
list_parameters()
MultiRNNCell/rnn/multi_rnn_cell/cell_0/lstm_cell/kernel:0
MultiRNNCell/rnn/multi_rnn_cell/cell_0/lstm_cell/bias:0
MultiRNNCell/rnn/multi_rnn_cell/cell_1/lstm_cell/kernel:0
MultiRNNCell/rnn/multi_rnn_cell/cell_1/lstm_cell/bias:0
MultiRNNCell/rnn/multi_rnn_cell/cell_2/lstm_cell/kernel:0
MultiRNNCell/rnn/multi_rnn_cell/cell_2/lstm_cell/bias:0
NewMultiRNNCell/rnn/multi_rnn_cell/cell_0/lstm_cell/kernel:0
NewMultiRNNCell/rnn/multi_rnn_cell/cell_0/lstm_cell/bias:0
NewMultiRNNCell/rnn/multi_rnn_cell/cell_1/lstm_cell/kernel:0
NewMultiRNNCell/rnn/multi_rnn_cell/cell_1/lstm_cell/bias:0
NewMultiRNNCell/rnn/multi_rnn_cell/cell_2/lstm_cell/kernel:0
NewMultiRNNCell/rnn/multi_rnn_cell/cell_2/lstm_cell/bias:0
# of parameters: 216576
A new set of parameters are created for lstm3 because scope, and hence variable names are different. Finally, this post clearly explains variable naming.