tensorflow reload mode to session from graph def - python

I have an exported tensorflow saved model which is used for serving.
I want to "reload" it from graphdef object, which I can broadcast for usage with spark.
I load it using:
sess = tf.Session()
tf.saved_model.loader.load(sess, ['serve'], folder)
sess.run('dense_1/Softmax:0', {'input_1:0': input_image}) # works
Then, to load it again to different session, I've tried:
graph_def = sess.graph.as_graph_def()
# then, to load
with tf.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(graph_def, name="")
sess.run('dense_1/Softmax:0', {'input_1:0': input_image})
I get the error:
FailedPreconditionError: Attempting to use uninitialized value dense_1/kernel
I've tried adding
sess.run(tf.global_variables_initializer())
But still the same error.
What am I missing?

You cannot copy variable values from one session to another through the graphdef. The variable values are stored within the session, and the graph definition only contains the structure of the graph. You need to "export" the variable values from one session and then restore them in the other. If you want to avoid using checkpoints or similar tooling, you can use a function that should work in most cases like this:
import tensorflow as tf
# Gets variable values as a list of pairs with the name and the value
def get_variable_values(sess):
# Find variable operations
var_ops = [op for op in sess.graph.get_operations() if op.type == 'VariableV2']
# Get the values
var_values = []
for v in var_ops:
try:
var_values.append(sess.run(v.outputs[0]))
except tf.errors.FailedPreconditionError:
# Uninitialized variables are ignored
pass
# Return the pairs list
return [(op.name, val) for op, val in zip(var_ops, var_values)]
# Restore the variable values
def restore_var_values(sess, var_values):
# Find the variable initialization operations
assign_ops = [sess.graph.get_operation_by_name(v + '/Assign') for v, _ in var_values]
# Run the initialization operations with the given variable values
sess.run(assign_ops, feed_dict={op.inputs[1]: val
for op, (_, val) in zip(assign_ops, var_values)})
# Test
with tf.Graph().as_default(), tf.Session() as sess:
v = tf.Variable(0., tf.float32, name='a')
v.load(3., sess)
var_values = get_variable_values(sess)
graph_def = tf.get_default_graph().as_graph_def()
with tf.Graph().as_default(), tf.Session() as sess:
tf.import_graph_def(graph_def, name="")
restore_var_values(sess, var_values)
print(sess.run('a:0'))
# 3.0

Related

How to assign new values to a tensorflow constant?

I am loading a TensorFlow model from a .pb file. I want to change the weights of all the layers. I am able to extract the weights but I am not able to change the weights.
I converted the graph_def model to TensorFlow model but even then I cannot assign a new value to the weights as the weights are stored in a tensor of type "Const".
b = graph_tf.get_tensor_by_name("Variable_1:0")
tf.assign(b, np.ones((1,1,64,64)))
I am getting the following error:
AttributeError: 'Tensor' object has no attribute 'assign'
Please provide a way to resolve this issue. Thanks in advance.
Here is one way you can achieve something like that. You want to replace some constant operations with variables initialized to the value of those operations, so you can first extract those constant values, and then create the graph with the variables initalized to those. See the example below.
import tensorflow as tf
# Example graph
with tf.Graph().as_default():
inp = tf.placeholder(tf.float32, [None, 3], name='Input')
w = tf.constant([[1.], [2.], [3.]], tf.float32, name='W')
out = tf.squeeze(inp # w, 1, name='Output')
gd = tf.get_default_graph().as_graph_def()
# Extract weight values
with tf.Graph().as_default():
w, = tf.graph_util.import_graph_def(gd, return_elements=['W:0'])
# Get the constant weight values
with tf.Session() as sess:
w_val = sess.run(w)
# Alternatively, since it is a constant,
# you can get the values from the operation attribute directly
w_val = tf.make_ndarray(w.op.get_attr('value'))
# Make new graph
with tf.Graph().as_default():
# Make variables initialized with stored values
w = tf.Variable(w_val, name='W')
init_op = tf.global_variables_initializer()
# Import graph
inp, out = tf.graph_util.import_graph_def(
gd, input_map={'W:0': w},
return_elements=['Input:0', 'Output:0'])
# Change value operation
w_upd = w[2].assign([5.])
# Test
with tf.Session() as sess:
sess.run(init_op)
print(sess.run(w))
# [[1.]
# [2.]
# [3.]]
sess.run(w_upd)
print(sess.run(w))
# [[1.]
# [2.]
# [5.]]

Restore tf variables in a different graph

I want to use my pretrained separable convolution (which is a part of a bigger module) in another separable convolution in a other model.
In the trained module I tried
with tf.variable_scope('sep_conv_ker' + str(input_shape[-1])):
sep_conv2d = tf.reshape(
tf.layers.separable_conv2d(inputs_flatten,input_shape[-1] ,
[1,input_shape[-2]]
trainable=trainable),
[inputs_flatten.shape[0],1,input_shape[-1],INNER_LAYER_WIDTH])
and
all_variables = tf.trainable_variables()
scope1_variables = tf.contrib.framework.filter_variables(all_variables, include_patterns=['sep_conv_ker'])
sep_conv_weights_saver = tf.train.Saver(scope1_variables, sharded=True, max_to_keep=20)
Inside sess.run
sep_conv_weights_saver.save(sess,os.path.join(LOG_DIR + MODEL_SPEC_LOG_DIR,
"init_weights",MODEL_SPEC_SUFFIX + 'epoch_' + str(epoch) + '.ckpt'))
But I cannot understand when and how should I load the weights to the separable convolution in the other module, it has different name, and different scope,
Furthermore, as I'm using a defined tf.layer does it mean I need to access each individual weight in the new graph and assign it?
My current solution doesn't work and I think that the weights are being initialized after the assignment somehow Furthermore, loading a whole new graph just for few weights seems weird, isn't it?
###IN THE OLD GRAPH###
all_variables = tf.trainable_variables()
scope1_variables = tf.contrib.framework.filter_variables(all_variables, include_patterns=['sep_conv_ker'])
vars = dict((var.op.name.split("/")[-1] + str(idx), var) for idx,var in enumerate(scope1_variables))
sep_conv_weights_saver = tf.train.Saver(vars, sharded=True, max_to_keep=20)
In the new graph is a function that basiclly takes the variables from the old graph and assigning them, loading the meta_graph is redundant
def load_pretrained(sess):
sep_conv2d_vars = [var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if ("sep_conv_ker" in var.op.name)]
var_dict = dict((var.op.name.split("/")[-1] + str(idx), var) for idx, var in enumerate(sep_conv2d_vars))
new_saver = tf.train.import_meta_graph(
tf.train.latest_checkpoint('log/train/sep_conv_ker/global_neighbors40/init_weights') + '.meta')
# saver = tf.train.Saver(var_list=var_dict)
new_saver.restore(sess,
tf.train.latest_checkpoint('log/train/sep_conv_ker/global_neighbors40/init_weights'))
graph = tf.get_default_graph()
sep_conv2d_trained = dict(("".join(var.op.name.split("/")[-2:]),var) for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if ("sep_conv_ker_init" in var.op.name))
for var in sep_conv2d_vars:
tf.assign(var,sep_conv2d_trained["".join(var.op.name.split("/")[-2:])])
You need to make sure that the variables have the same in the variable file and in the graph where you load the variables. You can write a script that will convert the variables names.
With tf.contrib.framework.list_variables(ckpt), you can find out what variables of what shapes you have in the checkpoint and create respective variables with the new names (I believe, you can write a regex that will fix the names) and correct shape.
Then you load the original variables with tf.contrib.framework.load_checkpoint(ckpt) assign ops tf.assign(var, loaded) that will assigning the variables with new names with the saved values.
Runn the assign ops in a session.
Save the new variables.
Minimum example:
Original model (variables in scope "regression"):
import tensorflow as tf
x = tf.placeholder(tf.float32, [None, 3])
regression = tf.layers.dense(x, 1, name="regression")
session = tf.Session()
session.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.trainable_variables())
saver.save(session, './model')
Renaming script:
import tensorflow as tf
assign_ops = []
reader = tf.contrib.framework.load_checkpoint("./model")
for name, shape in tf.contrib.framework.list_variables("./model"):
new_name = name.replace("regression/", "foo/bar/")
new_var = tf.get_variable(new_name, shape)
assign_ops.append(tf.assign(new_var, reader.get_tensor(name)))
session = tf.Session()
saver = tf.train.Saver(tf.trainable_variables())
session.run(assign_ops)
saver.save(session, './model-renamed')
Model where you load the renamed variables (the same variables in score "foo/bar"):
import tensorflow as tf
with tf.variable_scope("foo"):
x = tf.placeholder(tf.float32, [None, 3])
regression = tf.layers.dense(x, 1, name="bar")
session = tf.Session()
session.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.trainable_variables())
saver.restore(session, './model-renamed')

Attempting to use uninitialized variable - tensorboard

I am just starting to play around with Tensorboard, and want to create a simple example where I have a loop that calls a function. Inside that function I have a tensor variable that gets incremented by one and then I add it to a summary.
I am getting a FailedPreconditionError: Attempting to use uninitalized value x_scalar
But I thought I was initializing the x_scalar with lines 10 and 14. What is the proper way to initialize?
import tensorflow as tf
tf.reset_default_graph() # To clear the defined variables and operations of the previous cell
# create the scalar variable
x_scalar = tf.get_variable('x_scalar', shape=[], initializer=tf.truncated_normal_initializer(mean=0, stddev=1))
# ____step 1:____ create the scalar summary
first_summary = tf.summary.scalar(name='My_first_scalar_summary', tensor=x_scalar)
step = 1
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter('./graphs', sess.graph)
sess.run(x_scalar.assign(1))
print(sess.run(x_scalar))
print("---------------------------")
def main():
global init
global first_summary
global step
# launch the graph in a session
# with tf.Session() as sess:
# # ____step 2:____ creating the writer inside the session
# writer = tf.summary.FileWriter('./graphs', sess.graph)
for s in range(100):
func()
def func():
global init
global first_summary
global step
global x_scalar
with tf.Session() as sess:
# ____step 2:____ creating the writer inside the session
# loop over several initializations of the variable
sess.run(x_scalar.assign(x_scalar + 1))
# ____step 3:____ evaluate the scalar summary
summary = sess.run(first_summary)
# ____step 4:____ add the summary to the writer (i.e. to the event file)
writer.add_summary(summary, step)
step = step + 1
print('Done with writing the scalar summary')
if __name__ == '__main__':
main()
You initialized your variable in a different tf.Session(). When using your tf.Session() as a context manager the session automatically closes after the block of code has completed.
You could use a checkpoint and metagraph to save your graph+weights and then loading them into your newly created session.
Or you can try passing around a session
sess = tf.Session()
sess.run([CODE])
sess.run([CODE])
sess.run([CODE])
sess.run([CODE])
sess.close()
edited: made a correction

Can tensorflow Saver be used in different graphs with the same structure

The network structure has already been loaded into the default global graph. I want to create another graph with the same structure and load checkpoints into this graph.
If the code is like this, it will throw error: ValueError: No variables to save in the last line. However, the second line works fine. Why? Does GraphDef returned by as_graph_def() contains variable definition/name?
inference_graph_def = tf.get_default_graph().as_graph_def()
saver = tf.train.Saver()
with tf.Graph().as_default():
tf.import_graph_def(inference_graph_def)
saver1 = tf.train.Saver()
If the code like this, it will throw error Cannot interpret feed_dict key as Tensor: The name 'save/Const:0' refers to a Tensor which does not exist in last line. Howerver, it works fine with the 3rd line removed.
inference_graph_def = tf.get_default_graph().as_graph_def()
saver = tf.train.Saver()
with tf.Graph().as_default():
tf.import_graph_def(inference_graph_def)
with session.Session() as sess:
saver.restore(sess, checkpoint_path)
So, does this mean Saver cannot work in different graphs even though they have the same structure?
Any help would be appreciated~
Here's an example of using a MetaGraphDef, which unlike GraphDef saves variable collections, to initialize a new graph using a previously saved graph.
import tensorflow as tf
CHECKPOINT_PATH = "/tmp/first_graph_checkpoint"
with tf.Graph().as_default():
some_variable = tf.get_variable(
name="some_variable",
shape=[2],
dtype=tf.float32)
init_op = tf.global_variables_initializer()
first_meta_graph = tf.train.export_meta_graph()
first_graph_saver = tf.train.Saver()
with tf.Session() as session:
init_op.run()
print("Initialized value in first graph", some_variable.eval())
first_graph_saver.save(
sess=session,
save_path=CHECKPOINT_PATH)
with tf.Graph().as_default():
tf.train.import_meta_graph(first_meta_graph)
second_graph_saver = tf.train.Saver()
with tf.Session() as session:
second_graph_saver.restore(
sess=session,
save_path=CHECKPOINT_PATH)
print("Variable value after restore", tf.global_variables()[0].eval())
Prints something like:
Initialized value in first graph [-0.98926258 -0.09709156]
Variable value after restore [-0.98926258 -0.09709156]
Note that the checkpoint is still important! Loading the MetaGraph does not restore the values of Variables (it doesn't contain those values), just the bookkeeping which tracks their existence (collections). SavedModel format addresses this, bundling MetaGraphs with checkpoints and other metadata for running them.
Edit: By popular demand, here's an example of doing the same thing with a GraphDef. I don't recommend it. Since none of the collections are restored when the GraphDef is loaded, we have to manually specify the Variables we want the Saver to restore; the "import/" default naming scheme is easy enough to fix with a name='' argument to import_graph_def, but removing it isn't super helpful since you'd need to manually fill in the variables collection if you wanted the Saver to work "automatically". Instead I've chosen to specify a mapping manually when creating the Saver.
import tensorflow as tf
CHECKPOINT_PATH = "/tmp/first_graph_checkpoint"
with tf.Graph().as_default():
some_variable = tf.get_variable(
name="some_variable",
shape=[2],
dtype=tf.float32)
init_op = tf.global_variables_initializer()
first_graph_def = tf.get_default_graph().as_graph_def()
first_graph_saver = tf.train.Saver()
with tf.Session() as session:
init_op.run()
print("Initialized value in first graph", some_variable.eval())
first_graph_saver.save(
sess=session,
save_path=CHECKPOINT_PATH)
with tf.Graph().as_default():
tf.import_graph_def(first_graph_def)
variable_to_restore = tf.get_default_graph().get_tensor_by_name(
"import/some_variable:0")
second_graph_saver = tf.train.Saver(var_list={
"some_variable": variable_to_restore
})
with tf.Session() as session:
second_graph_saver.restore(
sess=session,
save_path=CHECKPOINT_PATH)
print("Variable value after restore", variable_to_restore.eval())

Tensorflow ValueError: No variables to save from

I have written a tensorflow CNN and it is already trained. I wish to restore it to run it on a few samples but unfortunately its spitting out:
ValueError: No variables to save
My eval code can be found here:
import tensorflow as tf
import main
import Process
import Input
eval_dir = "/Users/Zanhuang/Desktop/NNP/model.ckpt-30"
checkpoint_dir = "/Users/Zanhuang/Desktop/NNP/checkpoint"
init_op = tf.initialize_all_variables()
saver = tf.train.Saver()
def evaluate():
with tf.Graph().as_default() as g:
sess.run(init_op)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
saver.restore(sess, eval_dir)
images, labels = Process.eval_inputs(eval_data = eval_data)
forward_propgation_results = Process.forward_propagation(images)
top_k_op = tf.nn.in_top_k(forward_propgation_results, labels, 1)
print(top_k_op)
def main(argv=None):
evaluate()
if __name__ == '__main__':
tf.app.run()
The tf.train.Saver must be created after the variables that you want to restore (or save). Additionally it must be created in the same graph as those variables.
Assuming that Process.forward_propagation(…) also creates the variables in your model, adding the saver creation after this line should work:
forward_propgation_results = Process.forward_propagation(images)
In addition, you must pass the new tf.Graph that you created to the tf.Session constructor so you'll need to move the creation of sess inside that with block as well.
The resulting function will be something like:
def evaluate():
with tf.Graph().as_default() as g:
images, labels = Process.eval_inputs(eval_data = eval_data)
forward_propgation_results = Process.forward_propagation(images)
init_op = tf.initialize_all_variables()
saver = tf.train.Saver()
top_k_op = tf.nn.in_top_k(forward_propgation_results, labels, 1)
with tf.Session(graph=g) as sess:
sess.run(init_op)
saver.restore(sess, eval_dir)
print(sess.run(top_k_op))
Simply, there should be at least one tf.variable that is defined before you create your saver object.
You can get the above code running by adding the following line of code before the saver object definition.
The code that you need to add has come between the two ###.
import tensorflow as tf
import main
import Process
import Input
eval_dir = "/Users/Zanhuang/Desktop/NNP/model.ckpt-30"
checkpoint_dir = "/Users/Zanhuang/Desktop/NNP/checkpoint"
init_op = tf.initialize_all_variables()
### Here Comes the fake variable that makes defining a saver object possible.
_ = tf.Variable(initial_value='fake_variable')
###
saver = tf.train.Saver()
...
Note that since TF 0.11 — a long time ago yet after the currently accepted answer — tf.train.Saver gained a defer_build argument in its constructor that allows you to define variables after it has been constructed. However you now need to call its build member function when all variables have been added, typically just before finilizeing your graph.
saver = tf.train.Saver(defer_build=True)
# build you graph here
saver.build()
graph.finalize()
# now entering training loop

Categories

Resources