Can we feed a value without defining tf.placeholder? - python

Here is the code https://github.com/tensorflow/tensorflow/blob/r0.10/tensorflow/models/rnn/ptb/ptb_word_lm.py,I wonder why we can feed the model by:
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
Because initial_state isn't with tf.placeholder,so how can we feed it?
In the code, it defines a class. And defines self._initial_state = cell.zero_state(batch_size, data_type()), then state = self._initial_state, and (cell_output, state) = cell(inputs[:, time_step, :], state). After that, self._final_state = state. What's more, it defines a function in the class:
#property
def final_state(self):
return self._final_state
And here comes
state = m.initial_state.eval()
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
And I have ran the code locally, it quite have the difference without state in feeddict.
Can anyone help?

Because initial_state isn't with tf.placeholder,so how can we feed it?
Placeholder is just a tensorflow tensor; we can feed any tensor using feed_dict mechanism,
And I have ran the code locally, it quite have the difference without state in feed_dict.
Here if you don't feed the learned state from previous batch, state will be initialized as zero for the next batch, hence there will be degradation of results.

Related

Jax / Neural Tangents `linearize` induces CUDA_ERROR_OUT_OF_MEMORY

Cross-posting from GitHub: https://github.com/google/neural-tangents/issues/144
We're trying to fine-tune a linearized Vision Transformer by adapting code from https://github.com/google-research/vision_transformer/blob/main/vit_jax.ipynb.
We're running into a really puzzling problem: when we load a model, we can train it, and when we linearize it, we can still the pre-linearized model to train. However, when we try using the linearized model, we get:
RuntimeError: Internal: Failed to load in-memory CUBIN: CUDA_ERROR_OUT_OF_MEMORY: out of memory
This error emerges regardless of whether we are using 1 GPU or multiple. It also emerges whether we are using a large batch (512) or small (1).
We manually tested that a forward pass raises no error, and that a backward pass raises no error. We suspect that the error might arise from the following code (although we could be wrong!):
Their code:
def make_update_fn(*, apply_fn, accum_steps, lr_fn):
"""Returns update step for data parallel training."""
def update_fn(opt, step, batch, rng):
_, new_rng = jax.random.split(rng)
# Bind the rng key to the device id (which is unique across hosts)
# Note: This is only used for multi-host training (i.e. multiple computers
# each with multiple accelerators).
dropout_rng = jax.random.fold_in(rng, jax.lax.axis_index('batch'))
def cross_entropy_loss(*, logits, labels):
logp = jax.nn.log_softmax(logits)
return -jnp.mean(jnp.sum(logp * labels, axis=1))
def loss_fn(params, images, labels):
logits = apply_fn(
dict(params=params),
rngs=dict(dropout=dropout_rng),
inputs=images,
train=True)
return cross_entropy_loss(logits=logits, labels=labels)
l, g = utils.accumulate_gradient(
jax.value_and_grad(loss_fn), opt.target, batch['image'], batch['label'],
accum_steps)
g = jax.tree_map(lambda x: jax.lax.pmean(x, axis_name='batch'), g)
l = jax.lax.pmean(l, axis_name='batch')
opt = opt.apply_gradient(g, learning_rate=lr_fn(step))
return opt, l, new_rng
return jax.pmap(update_fn, axis_name='batch', donate_argnums=(0,))
That function is then called via:
# Check out train.make_update_fn in the editor on the right side for details.
lr_fn = utils.create_learning_rate_schedule(total_steps, base_lr, decay_type, warmup_steps)
update_fn_repl = train.make_update_fn(
apply_fn=vit_apply, accum_steps=accum_steps, lr_fn=lr_fn)
# We use a momentum optimizer that uses half precision for state to save
# memory. It als implements the gradient clipping.
opt = momentum_clip.Optimizer(grad_norm_clip=grad_norm_clip).create(params)
opt_repl = flax.jax_utils.replicate(opt)
The training loop where the memory error arises:
losses = []
lrs = []
# Completes in ~20 min on the TPU runtime.
for step, batch in zip(
tqdm.trange(1, total_steps + 1),
ds_train.as_numpy_iterator(),
):
opt_repl, loss_repl, update_rng_repl = update_fn_repl(
opt_repl, flax.jax_utils.replicate(step), batch, update_rng_repl) # ERROR IS HERE
losses.append(loss_repl[0])
lrs.append(lr_fn(step))
In order to linearize the ViT, we do the following:
def vit_apply(params, input):
return model.apply(dict(params=params), input, train=True)
f_lin = nt.linearize(vit_apply, params)

PyTorch: "one of the variables needed for gradient computation has been modified by an inplace operation"

I'm training a PyTorch RNN on a text file of song lyrics to predict the next character given a character.
Here's how my RNN is defined:
import torch.nn as nn
import torch.optim
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
# from input, previous hidden state to new hidden state
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
# from input, previous hidden state to output
self.i2o = nn.Linear(input_size + hidden_size, output_size)
# softmax on output
self.softmax = nn.LogSoftmax(dim = 1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
#get new hidden state
hidden = self.i2h(combined)
#get output
output = self.i2o(combined)
#apply softmax
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
rnn = RNN(input_size = num_chars, hidden_size = 200, output_size = num_chars)
criterion = nn.NLLLoss()
lr = 0.01
optimizer = torch.optim.AdamW(rnn.parameters(), lr = lr)
Here's my training function:
def train(train, target):
hidden = rnn.initHidden()
loss = 0
for i in range(len(train)):
optimizer.zero_grad()
# get output, hidden state from rnn given input char, hidden state
output, hidden = rnn(train[i].unsqueeze(0), hidden)
#returns the index with '1' - indentifying the index of the right character
target_class = (target[i] == 1).nonzero(as_tuple=True)[0]
loss += criterion(output, target_class)
loss.backward(retain_graph = True)
optimizer.step()
print("done " + str(i) + " loop")
return output, loss.item() / train.size(0)
When I run my training function, I get this error:
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.FloatTensor [274, 74]], which is output 0 of TBackward, is at version 5; expected version 3 instead. Hint: the backtrace further above shows the operation that failed to compute its gradient. The variable in question was changed in there or anywhere later. Good luck!
Interestingly, it makes it through two complete loops of the training function before giving me that error.
Now, when I remove the retain_graph = True from loss.backward(), I get this error:
RuntimeError: Trying to backward through the graph a second time (or directly access saved variables after they have already been freed). Saved intermediate values of the graph are freed when you call .backward() or autograd.grad(). Specify retain_graph=True if you need to backward through the graph a second time or if you need to access saved variables after calling backward.
It shouldn't be trying to go backward through the graph multiple times here. Perhaps the graph is not getting cleared between training loops?
The issue is you are accumulating your loss values (and at the same time, the computation graphs associated attached to them) on variable loss, here:
loss += criterion(output, target_class)
In turn, this means at every iteration you are trying to backpropagate through the current and previous loss values that were computed in previous inferences. In this particular instance where you are looping through your dataset, it isn't the right thing to do.
A simple fix is to accumulate loss's underlying value, i.e. the scalar value, not the tensor itself, using item. And, backpropagate on the current loss tensor:
total_loss = 0
for i in range(len(train)):
optimizer.zero_grad()
output, hidden = rnn(train[i].unsqueeze(0), hidden)
target_class = (target[i] == 1).nonzero(as_tuple=True)[0]
loss = criterion(output, target_class)
loss.backward()
total_loss += loss.item()
Since you are updating the model's parameter straight after having done the backpropagation, you don't need to retain the graph in memory.

TensorFlow: Remember LSTM state for next batch (stateful LSTM)

Given a trained LSTM model I want to perform inference for single timesteps, i.e. seq_length = 1 in the example below. After each timestep the internal LSTM (memory and hidden) states need to be remembered for the next 'batch'. For the very beginning of the inference the internal LSTM states init_c, init_h are computed given the input. These are then stored in a LSTMStateTuple object which is passed to the LSTM. During training this state is updated every timestep. However for inference I want the state to be saved in between batches, i.e. the initial states only need to be computed at the very beginning and after that the LSTM states should be saved after each 'batch' (n=1).
I found this related StackOverflow question: Tensorflow, best way to save state in RNNs?. However this only works if state_is_tuple=False, but this behavior is soon to be deprecated by TensorFlow (see rnn_cell.py). Keras seems to have a nice wrapper to make stateful LSTMs possible but I don't know the best way to achieve this in TensorFlow. This issue on the TensorFlow GitHub is also related to my question: https://github.com/tensorflow/tensorflow/issues/2838
Anyone good suggestions for building a stateful LSTM model?
inputs = tf.placeholder(tf.float32, shape=[None, seq_length, 84, 84], name="inputs")
targets = tf.placeholder(tf.float32, shape=[None, seq_length], name="targets")
num_lstm_layers = 2
with tf.variable_scope("LSTM") as scope:
lstm_cell = tf.nn.rnn_cell.LSTMCell(512, initializer=initializer, state_is_tuple=True)
self.lstm = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * num_lstm_layers, state_is_tuple=True)
init_c = # compute initial LSTM memory state using contents in placeholder 'inputs'
init_h = # compute initial LSTM hidden state using contents in placeholder 'inputs'
self.state = [tf.nn.rnn_cell.LSTMStateTuple(init_c, init_h)] * num_lstm_layers
outputs = []
for step in range(seq_length):
if step != 0:
scope.reuse_variables()
# CNN features, as input for LSTM
x_t = # ...
# LSTM step through time
output, self.state = self.lstm(x_t, self.state)
outputs.append(output)
I found out it was easiest to save the whole state for all layers in a placeholder.
init_state = np.zeros((num_layers, 2, batch_size, state_size))
...
state_placeholder = tf.placeholder(tf.float32, [num_layers, 2, batch_size, state_size])
Then unpack it and create a tuple of LSTMStateTuples before using the native tensorflow RNN Api.
l = tf.unpack(state_placeholder, axis=0)
rnn_tuple_state = tuple(
[tf.nn.rnn_cell.LSTMStateTuple(l[idx][0], l[idx][1])
for idx in range(num_layers)]
)
RNN passes in the API:
cell = tf.nn.rnn_cell.LSTMCell(state_size, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell([cell]*num_layers, state_is_tuple=True)
outputs, state = tf.nn.dynamic_rnn(cell, x_input_batch, initial_state=rnn_tuple_state)
The state - variable will then be feeded to the next batch as a placeholder.
Tensorflow, best way to save state in RNNs? was actually my original question. The code bellow is how I use the state tuples.
with tf.variable_scope('decoder') as scope:
rnn_cell = tf.nn.rnn_cell.MultiRNNCell \
([
tf.nn.rnn_cell.LSTMCell(512, num_proj = 256, state_is_tuple = True),
tf.nn.rnn_cell.LSTMCell(512, num_proj = WORD_VEC_SIZE, state_is_tuple = True)
], state_is_tuple = True)
state = [[tf.zeros((BATCH_SIZE, sz)) for sz in sz_outer] for sz_outer in rnn_cell.state_size]
for t in range(TIME_STEPS):
if t:
last = y_[t - 1] if TRAINING else y[t - 1]
else:
last = tf.zeros((BATCH_SIZE, WORD_VEC_SIZE))
y[t] = tf.concat(1, (y[t], last))
y[t], state = rnn_cell(y[t], state)
scope.reuse_variables()
Rather than using tf.nn.rnn_cell.LSTMStateTuple I just create a lists of lists which works fine. In this example I am not saving the state. However you could easily have made state out of variables and just used assign to save the values.

Tensorflow, best way to save state in RNNs?

I currently have the following code for a series of chained together RNNs in tensorflow. I am not using MultiRNN since I was to do something later on with the output of each layer.
for r in range(RNNS):
with tf.variable_scope('recurent_%d' % r) as scope:
state = [tf.zeros((BATCH_SIZE, sz)) for sz in rnn_func.state_size]
time_outputs = [None] * TIME_STEPS
for t in range(TIME_STEPS):
rnn_input = getTimeStep(rnn_outputs[r - 1], t)
time_outputs[t], state = rnn_func(rnn_input, state)
time_outputs[t] = tf.reshape(time_outputs[t], (-1, 1, RNN_SIZE))
scope.reuse_variables()
rnn_outputs[r] = tf.concat(1, time_outputs)
Currently I have a fixed number of time steps. However I would like to change it to have only one timestep but remember the state between batches. I would therefore need to create a state variable for each layer and assign it the final state of each of the layers. Something like this.
for r in range(RNNS):
with tf.variable_scope('recurent_%d' % r) as scope:
saved_state = tf.get_variable('saved_state', ...)
rnn_outputs[r], state = rnn_func(rnn_outputs[r - 1], saved_state)
saved_state = tf.assign(saved_state, state)
Then for each of the layers I would need to evaluate the saved state in my sess.run function as well as calling my training function. I would need to do this for every rnn layer. This seems like kind of a hassle. I would need to track every saved state and evaluate it in run. Also then run would need to copy the state from my GPU to host memory which would be inefficient and unnecessary. Is there a better way of doing this?
Here is the code to update the LSTM's initial state, when state_is_tuple=True by defining state variables. It also supports multiple layers.
We define two functions - one for getting the state variables with an initial zero state and one function for returning an operation, which we can pass to session.run in order to update the state variables with the LSTM's last hidden state.
def get_state_variables(batch_size, cell):
# For each layer, get the initial state and make a variable out of it
# to enable updating its value.
state_variables = []
for state_c, state_h in cell.zero_state(batch_size, tf.float32):
state_variables.append(tf.contrib.rnn.LSTMStateTuple(
tf.Variable(state_c, trainable=False),
tf.Variable(state_h, trainable=False)))
# Return as a tuple, so that it can be fed to dynamic_rnn as an initial state
return tuple(state_variables)
def get_state_update_op(state_variables, new_states):
# Add an operation to update the train states with the last state tensors
update_ops = []
for state_variable, new_state in zip(state_variables, new_states):
# Assign the new state to the state variables on this layer
update_ops.extend([state_variable[0].assign(new_state[0]),
state_variable[1].assign(new_state[1])])
# Return a tuple in order to combine all update_ops into a single operation.
# The tuple's actual value should not be used.
return tf.tuple(update_ops)
We can use that to update the LSTM's state after each batch. Note that I use tf.nn.dynamic_rnn for unrolling:
data = tf.placeholder(tf.float32, (batch_size, max_length, frame_size))
cell_layer = tf.contrib.rnn.GRUCell(256)
cell = tf.contrib.rnn.MultiRNNCell([cell] * num_layers)
# For each layer, get the initial state. states will be a tuple of LSTMStateTuples.
states = get_state_variables(batch_size, cell)
# Unroll the LSTM
outputs, new_states = tf.nn.dynamic_rnn(cell, data, initial_state=states)
# Add an operation to update the train states with the last state tensors.
update_op = get_state_update_op(states, new_states)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run([outputs, update_op], {data: ...})
The main difference to this answer is that state_is_tuple=True makes the LSTM's state a LSTMStateTuple containing two variables (cell state and hidden state) instead of just a single variable. Using multiple layers then makes the LSTM's state a tuple of LSTMStateTuples - one per layer.
Resetting to zero
When using a trained model for prediction / decoding, you might want to reset the state to zero. Then, you can make use of this function:
def get_state_reset_op(state_variables, cell, batch_size):
# Return an operation to set each variable in a list of LSTMStateTuples to zero
zero_states = cell.zero_state(batch_size, tf.float32)
return get_state_update_op(state_variables, zero_states)
For example like above:
reset_state_op = get_state_reset_op(state, cell, max_batch_size)
# Reset the state to zero before feeding input
sess.run([reset_state_op])
sess.run([outputs, update_op], {data: ...})
I am now saving the RNN states using the tf.control_dependencies. Here is an example.
saved_states = [tf.get_variable('saved_state_%d' % i, shape = (BATCH_SIZE, sz), trainable = False, initializer = tf.constant_initializer()) for i, sz in enumerate(rnn.state_size)]
W = tf.get_variable('W', shape = (2 * RNN_SIZE, RNN_SIZE), initializer = tf.truncated_normal_initializer(0.0, 1 / np.sqrt(2 * RNN_SIZE)))
b = tf.get_variable('b', shape = (RNN_SIZE,), initializer = tf.constant_initializer())
rnn_output, states = rnn(last_output, saved_states)
with tf.control_dependencies([tf.assign(a, b) for a, b in zip(saved_states, states)]):
dense_input = tf.concat(1, (last_output, rnn_output))
dense_output = tf.tanh(tf.matmul(dense_input, W) + b)
last_output = dense_output + last_output
I just make sure that part of my graph is dependent on saving the state.
These two links are also related and useful for this question:
https://github.com/tensorflow/tensorflow/issues/2695
https://github.com/tensorflow/tensorflow/issues/2838

How to get Tensorflow seq2seq embedding output

I am attempting to train a sequence to sequence model using tensorflow and have been looking at their example code.
I want to be able to access the vector embeddings created by the encoder as they seem to have some interesting properties.
However, it really isn't clear to me how this can be.
In the vector representations of words example they talk a lot about what these embeddings can be used for and then don't appear to provide a simple way of accessing them, unless I am mistaken.
Any help figuring out how to access them would be greatly appreciated.
As with all Tensorflow operations, most variables are dynamically created. There are different ways to access these variables ( and their values ). Here, the variable you are interested in is part of the set of trained variables. To access these, we can thus use the tf.trainable_variables() function:
for var in tf.trainable_variables():
print var.name
which will give us - for a GRU seq2seq model, the following list:
embedding_rnn_seq2seq/RNN/EmbeddingWrapper/embedding:0
embedding_rnn_seq2seq/RNN/GRUCell/Gates/Linear/Matrix:0
embedding_rnn_seq2seq/RNN/GRUCell/Gates/Linear/Bias:0
embedding_rnn_seq2seq/RNN/GRUCell/Candidate/Linear/Matrix:0
embedding_rnn_seq2seq/RNN/GRUCell/Candidate/Linear/Bias:0
embedding_rnn_seq2seq/embedding_rnn_decoder/embedding:0
embedding_rnn_seq2seq/embedding_rnn_decoder/rnn_decoder/GRUCell/Gates/Linear/Matrix:0
embedding_rnn_seq2seq/embedding_rnn_decoder/rnn_decoder/GRUCell/Gates/Linear/Bias:0
embedding_rnn_seq2seq/embedding_rnn_decoder/rnn_decoder/GRUCell/Candidate/Linear/Matrix:0
embedding_rnn_seq2seq/embedding_rnn_decoder/rnn_decoder/GRUCell/Candidate/Linear/Bias:0
embedding_rnn_seq2seq/embedding_rnn_decoder/rnn_decoder/OutputProjectionWrapper/Linear/Matrix:0
embedding_rnn_seq2seq/embedding_rnn_decoder/rnn_decoder/OutputProjectionWrapper/Linear/Bias:0
This tells us that the embedding is called embedding_rnn_seq2seq/RNN/EmbeddingWrapper/embedding:0, which we can then use to retrieve a pointer to that variable in our earlier iterator:
for var in tf.trainable_variables():
print var.name
if var.name == 'embedding_rnn_seq2seq/RNN/EmbeddingWrapper/embedding:0':
embedding_op = var
This we can then pass along with other ops to our session-run:
_, loss_t, summary, embedding = sess.run([train_op, loss, summary_op, embedding_op], feed_dict)
and we have ourselves the (batch-list of) embeddings ...
There is a related post, but it is based on tensorflow-0.6 which is quite out of date. So I update his answer in tensorflow-0.8 which is also similar to that in the newest version.
(*represent where to modify)
losses = []
outputs = []
*states = []
with ops.op_scope(all_inputs, name, "model_with_buckets"):
for j, bucket in enumerate(buckets):
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
reuse=True if j > 0 else None):
*bucket_outputs, _ ,bucket_states= seq2seq(encoder_inputs[:bucket[0]],
decoder_inputs[:bucket[1]])
outputs.append(bucket_outputs)
if per_example_loss:
losses.append(sequence_loss_by_example(
outputs[-1], targets[:bucket[1]], weights[:bucket[1]],
softmax_loss_function=softmax_loss_function))
else:
losses.append(sequence_loss(
outputs[-1], targets[:bucket[1]], weights[:bucket[1]],
softmax_loss_function=softmax_loss_function))
return outputs, losses, *states
at python/ops/seq2seq, modify embedding_attention_seq2seq()
if isinstance(feed_previous, bool):
*outputs, states = embedding_attention_decoder(
decoder_inputs, encoder_state, attention_states, cell,
num_decoder_symbols, embedding_size, num_heads=num_heads,
output_size=output_size, output_projection=output_projection,
feed_previous=feed_previous,
initial_state_attention=initial_state_attention)
*return outputs, states, encoder_state
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(variable_scope.get_variable_scope(),reuse=reuse):
outputs, state = embedding_attention_decoder(
decoder_inputs, encoder_state, attention_states, cell,
num_decoder_symbols, embedding_size, num_heads=num_heads,
output_size=output_size, output_projection=output_projection,
feed_previous=feed_previous_bool,
update_embedding_for_previous=False,
initial_state_attention=initial_state_attention)
return outputs + [state]
outputs_and_state = control_flow_ops.cond(feed_previous, lambda: decoder(True), lambda: decoder(False))
*return outputs_and_state[:-1], outputs_and_state[-1], encoder_state
at model/rnn/translate/seq2seq_model.py modify init()
if forward_only:
*self.outputs, self.losses, self.states= tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
# If we use output projection, we need to project outputs for decoding.
if output_projection is not None:
for b in xrange(len(buckets)):
self.outputs[b] = [
tf.matmul(output, output_projection[0]) + output_projection[1]
for output in self.outputs[b]
]
else:
*self.outputs, self.losses, _ = tf.nn.seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function)
at model/rnn/translate/seq2seq_model.py modify step()
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
*return None, outputs[0], outputs[1:], outputs[-1] # No gradient norm, loss, outputs.
with all these done, we can get the encoded states by calling :
_, _, output_logits, states = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
print (states)
in the translate.py.

Categories

Resources