How to manipulate Variables in Tensorflow - python

I want to do something as simple as this a = a + b, example code as follow
sess = tf.InteractiveSession()
embed = tf.Variable(tf.random_uniform([10, 2], -1, 1))
saver = tf.train.Saver([embed])
saver.restore(sess, 'save/model.ckpt')
new_embed = tf.Variable(tf.random_uniform([5, 2], -1, 1))
init = tf.initialize_variables([new_embed])
sess.run(init)
embed = tf.Variable(tf.concat(0, [embed, new_embed]))
However the last line won't execute because embed becomes an uninitialized value.
What I wish to accomplish here is to restore a variable from a file and concat with a new variable, i.e. make the [10, 2] variable to be a [15, 2] variable, where the first 10 rows are from the stored variable.
I was thinking to restore the [10, 2] variable to a new variable say old_ebmed, but I couldn't find a way to do so.
Any help would be appreciated.

I found a way to restore the variable to a varialbe with a different name
import tensorflow as tf
sess = tf.InteractiveSession()
old_embed = tf.Variable(tf.constant(0.0, shape = [10, 2]))
restorer = tf.train.Saver({'embed': old_embed})
restorer.restore(sess, 'test/d.ckpt')
new_embed = tf.Variable(tf.random_uniform([5, 2], -1, 1))
init_new = tf.initialize_variables([new_embed])
sess.run(init_new)
embed = tf.Variable(tf.concat(0, [old_embed, new_embed]))
init_embed = tf.initialize_variables([embed])
sess.run(init_embed)
saver = tf.train.Saver({'embed': embed})
saver.save(sess, 'test/d.ckpt')

Related

Restore tf variables in a different graph

I want to use my pretrained separable convolution (which is a part of a bigger module) in another separable convolution in a other model.
In the trained module I tried
with tf.variable_scope('sep_conv_ker' + str(input_shape[-1])):
sep_conv2d = tf.reshape(
tf.layers.separable_conv2d(inputs_flatten,input_shape[-1] ,
[1,input_shape[-2]]
trainable=trainable),
[inputs_flatten.shape[0],1,input_shape[-1],INNER_LAYER_WIDTH])
and
all_variables = tf.trainable_variables()
scope1_variables = tf.contrib.framework.filter_variables(all_variables, include_patterns=['sep_conv_ker'])
sep_conv_weights_saver = tf.train.Saver(scope1_variables, sharded=True, max_to_keep=20)
Inside sess.run
sep_conv_weights_saver.save(sess,os.path.join(LOG_DIR + MODEL_SPEC_LOG_DIR,
"init_weights",MODEL_SPEC_SUFFIX + 'epoch_' + str(epoch) + '.ckpt'))
But I cannot understand when and how should I load the weights to the separable convolution in the other module, it has different name, and different scope,
Furthermore, as I'm using a defined tf.layer does it mean I need to access each individual weight in the new graph and assign it?
My current solution doesn't work and I think that the weights are being initialized after the assignment somehow Furthermore, loading a whole new graph just for few weights seems weird, isn't it?
###IN THE OLD GRAPH###
all_variables = tf.trainable_variables()
scope1_variables = tf.contrib.framework.filter_variables(all_variables, include_patterns=['sep_conv_ker'])
vars = dict((var.op.name.split("/")[-1] + str(idx), var) for idx,var in enumerate(scope1_variables))
sep_conv_weights_saver = tf.train.Saver(vars, sharded=True, max_to_keep=20)
In the new graph is a function that basiclly takes the variables from the old graph and assigning them, loading the meta_graph is redundant
def load_pretrained(sess):
sep_conv2d_vars = [var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if ("sep_conv_ker" in var.op.name)]
var_dict = dict((var.op.name.split("/")[-1] + str(idx), var) for idx, var in enumerate(sep_conv2d_vars))
new_saver = tf.train.import_meta_graph(
tf.train.latest_checkpoint('log/train/sep_conv_ker/global_neighbors40/init_weights') + '.meta')
# saver = tf.train.Saver(var_list=var_dict)
new_saver.restore(sess,
tf.train.latest_checkpoint('log/train/sep_conv_ker/global_neighbors40/init_weights'))
graph = tf.get_default_graph()
sep_conv2d_trained = dict(("".join(var.op.name.split("/")[-2:]),var) for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if ("sep_conv_ker_init" in var.op.name))
for var in sep_conv2d_vars:
tf.assign(var,sep_conv2d_trained["".join(var.op.name.split("/")[-2:])])
You need to make sure that the variables have the same in the variable file and in the graph where you load the variables. You can write a script that will convert the variables names.
With tf.contrib.framework.list_variables(ckpt), you can find out what variables of what shapes you have in the checkpoint and create respective variables with the new names (I believe, you can write a regex that will fix the names) and correct shape.
Then you load the original variables with tf.contrib.framework.load_checkpoint(ckpt) assign ops tf.assign(var, loaded) that will assigning the variables with new names with the saved values.
Runn the assign ops in a session.
Save the new variables.
Minimum example:
Original model (variables in scope "regression"):
import tensorflow as tf
x = tf.placeholder(tf.float32, [None, 3])
regression = tf.layers.dense(x, 1, name="regression")
session = tf.Session()
session.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.trainable_variables())
saver.save(session, './model')
Renaming script:
import tensorflow as tf
assign_ops = []
reader = tf.contrib.framework.load_checkpoint("./model")
for name, shape in tf.contrib.framework.list_variables("./model"):
new_name = name.replace("regression/", "foo/bar/")
new_var = tf.get_variable(new_name, shape)
assign_ops.append(tf.assign(new_var, reader.get_tensor(name)))
session = tf.Session()
saver = tf.train.Saver(tf.trainable_variables())
session.run(assign_ops)
saver.save(session, './model-renamed')
Model where you load the renamed variables (the same variables in score "foo/bar"):
import tensorflow as tf
with tf.variable_scope("foo"):
x = tf.placeholder(tf.float32, [None, 3])
regression = tf.layers.dense(x, 1, name="bar")
session = tf.Session()
session.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.trainable_variables())
saver.restore(session, './model-renamed')

Why does this TensorFlow snippet throw an error in feeding?

Code
#!/usr/bin/env python3
import tensorflow as tf
import numpy as np
def customOps(n):
x = tf.placeholder(tf.float32)
v1 = tf.reduce_sum(x,1)
v2 = tf.reduce_sum(x,0)
v = tf.nn.softmax(tf.concat([v1, v2], 0))
index = np.argmax(v)
if index > n/3:
finalval = tf.norm(v1-v2, ord='euclidean')
else:
finalval = tf.norm(v1+v2, ord='euclidean')
return finalval
if __name__ == '__main__':
mat = np.asarray([[0, 1], [1, 0]], dtype = np.float32)
n = mat.shape[0]
finalVal = customOps(n)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
outVal = sess.run(finalVal, feed_dict={x:mat})
print(outVal)
sess.close()
Error Thrown
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_5' with dtype float [[{{node Placeholder_5}} = Placeholder[dtype=DT_FLOAT, shape=<unknown>, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
The error is thrown at sess.run(init) line in the above snippet. I am feeding a float type array through feed_dict and I am not sure why the error is being thrown.
Where is the error and why?
Why the error:
Because you ran the same snippet multiple times in an unclean graph (i.e, your graph has multiple copies of the network).
The reason I can say this is the _5 at the end of the node name in the error message. TF assigns a default name to all tensors in the graph using incremental indices in case a name is already taken. Placeholder_5 means that in the same graph there is at least 5 Placeholder instances without a custom default name assigned which, given your code, should be impossible unless you called the function multiple times without cleaning up the graph.
How to fix it:
Run in a clean graph: Put tf.reset_default_graph() before finalVal = customOps(n).
Note: Your code has more issues than that (for example, you have x in the main branch, but x is a local variable of customOps), but the cause of the error you have is the one stated above.
Below you find a tested and working version of your code that addresses both issues.
import tensorflow as tf
import numpy as np
def customOps(n):
x = tf.placeholder(tf.float32)
v1 = tf.reduce_sum(x,1)
v2 = tf.reduce_sum(x,0)
v = tf.nn.softmax(tf.concat([v1, v2], 0))
index = np.argmax(v)
if index > n/3:
finalval = tf.norm(v1-v2, ord='euclidean')
else:
finalval = tf.norm(v1+v2, ord='euclidean')
return x, finalval
if __name__ == '__main__':
mat = np.asarray([[0, 1], [1, 0]], dtype = np.float32)
n = mat.shape[0]
tf.reset_default_graph()
x, finalVal = customOps(n)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
outVal = sess.run(finalVal, feed_dict={x:mat})
print(outVal)
sess.close()

Tensorflow Saver restores all variables no matter which ones I specified

I'm trying to save and restore a subset of variables from Tensorflow graph, so that everything I don't need is discarded and their weights don't take memory. The common advice to pass list or dict of desired variables to tf.train.Saver doesn't work: the saver restores all the variables no matter what.
A minimal working example:
import os
import tensorflow as tf
sess = tf.Session()
with sess.as_default():
v1 = tf.get_variable("v1", [5, 5, 3])
v2 = tf.get_variable("v2", [5, 5, 3])
saver = tf.train.Saver([v2])
initializer2 = tf.variables_initializer([v1, v2])
sess.run(initializer2)
saver.save(sess, '/path/to/tf_model')
sess2 = tf.Session()
checkpoint = '/path/to/tf_model.meta'
saver.restore(sess2, tf.train.latest_checkpoint(os.path.dirname(checkpoint)))
with sess2.as_default(), sess2.graph.as_default():
loaded_vars = tf.trainable_variables()
print(loaded_vars)
outputs
[<tf.Variable 'v1:0' shape=(5, 5, 3) dtype=float32_ref>,
<tf.Variable 'v2:0' shape=(5, 5, 3) dtype=float32_ref>]
Nevertheless, print(saver._var_list) outputs
[<tf.Variable 'v2:0' shape=(5, 5, 3) dtype=float32_ref>]
What's wrong here?
This is what you want to do. Please examine the code carefully.
To save the selected variables
import tensorflow as tf
tf.reset_default_graph()
# =============================================================================
# to save
# =============================================================================
# create variables
v1 = tf.get_variable(name="v1", initializer=[5, 5, 3])
v2 = tf.get_variable(name="v2", initializer=[5, 5, 3])
# initialize variables
init_op = tf.global_variables_initializer()
# ops to save variable v2
saver = tf.train.Saver({"my_v2": v2})
with tf.Session() as sess:
sess.run(init_op)
save_path = saver.save(sess, './tf_vars/model.ckpt')
print("Model saved in file: %s" % save_path)
'Output':
Model saved in file: ./tf_vars/model.ckpt
To restore the saved variables
# =============================================================================
# to restore
# =============================================================================
# Create some variables.
v1 = tf.Variable(initial_value=[0, 0, 0], name="v1")
v2 = tf.Variable(initial_value=[0, 0, 0], name="v2")
# initialize variables
init_op = tf.global_variables_initializer()
# ops to restore variable v2.
saver = tf.train.Saver({"my_v2": v2})
with tf.Session() as sess:
sess.run(init_op)
# Restore variables from disk.
saver.restore(sess, './tf_vars/model.ckpt')
print("v1: %s" % v1.eval())
print("v2: %s" % v2.eval())
print("V2 variable restored.")
'Output':
v1: [0 0 0]
v2: [5 5 3]
V2 variable restored.
The tf.trainable_variables() return list of variable objects stored in a graph. By default over here both your variables v1 and v2 will be stored in the graph. When you use saver = tf.train.Saver([v2]), you are only saving the variable v2 and you are not saving any value for v1. But still, the variable v1 exists in your graph. This is the reason why we could see all the variable when you print(loaded_vars). You could actually check whether a variable has a value(initialized) or not using this piece of code
uninitialized_vars = []
for var in tf.all_variables():
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninitialized_vars.append(var)
print(uninitialized_vars)
Hope this helps!
Also, it is not needed to initialize all the variables (tf.global_variable) if you know what variables to initialize.
tf.reset_default_graph()
# Create some variables.
v1 = tf.get_variable("v1", [3], initializer = tf.zeros_initializer)
v2 = tf.get_variable("v2", [5], initializer = tf.zeros_initializer)
# Add ops to save and restore only `v2` using the name "v2"
saver = tf.train.Saver({"v2": v2})
# Use the saver object normally after that.
with tf.Session() as sess:
# Initialize v1 since the saver will not.
v1.initializer.run()
saver.restore(sess, "/tmp/model.ckpt")
print("v1 : %s" % v1.eval())
print("v2 : %s" % v2.eval())

why this tensorflow tutorial code not working

Now i'm trying lstm tutorial, look some one's book. But it didn't work. What's the problem? :
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import pprint
pp = pprint.PrettyPrinter(indent=4)
sess = tf.InteractiveSession()
a = [1, 0, 0, 0]
b = [0, 1, 0, 0]
c = [0, 0, 1, 0]
d = [0, 0, 0, 1]
init=tf.global_variables_initializer()
with tf.variable_scope('one_cell') as scope:
hidden_size = 2
cell = tf.contrib.rnn.BasicRNNCell(num_units=hidden_size)
print(cell.output_size, cell.state_size)
x_data = np.array([[a]], dtype=np.float32)
pp.pprint(x_data)
outputs, _states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32)
sess.run(init)
pp.pprint(outputs.eval())
Error message is like that. Please solve this problem.
Attempting to use uninitialized value one_cell/rnn/basic_rnn_cell/weights
[[Node: one_cell/rnn/basic_rnn_cell/weights/read = Identity[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](one_cell/rnn/basic_rnn_cell/weights)]]
You haven't initialized some graph variables, as the error mentioned. Shift your code to this and it will work.
outputs, _states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32)
init=tf.global_variables_initializer()
sess.run(init)
Best practice is to have init right at the end of your graph and before sess.run.
EDIT: Refer to What does tf.global_variables_initializer() do under the hood? for more insights.
You define the operation init before creating your variables. Thus this operation will be performed only on the variables defined at that time, even if you run it after creating your variables.
So just move the definition of init and you will be fine.

How to restore a Tensorflow model

I'm learning to use Tensorflow and I wrote this Python script that learns from mnist db, save the model and make a prediction on a image:
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
W = tf.Variable(tf.zeros([784, 10], name="W"))
b = tf.Variable(tf.zeros([10]), name="b")
Y = tf.nn.softmax(tf.matmul(tf.reshape(X, [-1, 784]), W) + b)
# ...
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
# ... learning loop
saver.save(sess, "/tmp/my-model")
# Make a prediction with an image
im = numpy.asarray(Image.open("digit.png")) / 255
im = im[numpy.newaxis, :, :, numpy.newaxis]
dict = {X: im}
print("Prediction: ", numpy.array(sess.run(Y, dict)).argmax())
The prediction is correct, but I can't restore the saved model for reusing.
I wrote this other script that tries to restore the model and make the same prediction:
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
W = tf.Variable(tf.zeros([784, 10]), name="W")
b = tf.Variable(tf.ones([10]) / 10, name="b")
Y = tf.nn.softmax(tf.matmul(tf.reshape(X, [-1, 784]), W) + b)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
saver = tf.train.import_meta_graph('/tmp/my-model.meta')
saver.restore(sess, tf.train.latest_checkpoint('/tmp/'))
# Make a prediction with an image
im = numpy.asarray(Image.open("digit.png")) / 255
im = im[numpy.newaxis, :, :, numpy.newaxis]
dict = {X: im}
print("Prediction: ", numpy.array(sess.run(Y, dict)).argmax())
but the prediction is wrong.
How can I restore my variables and make a prediction?
Thanks
When test, comment this line
# saver = tf.train.import_meta_graph('/tmp/my-model.meta')
will solve your problem.
import_meta_graph will create a new Graph/model saved in the '.meta' file and the new model will co-exist with the model you created manually. The saver is assigned to the new model, so saver.restore restores the trained weights to the new model, but the sess runs using the model you created manually.

Categories

Resources