Tensorflow reuse when inference? - python

Does tensorflow need to set the reuse ==True when finish training and inference?
I have a network like this:
def __build_net(self,placeholder,reuse=False):
with tf.variable_scope('siamse',reuse=reuse):
layer = tf.layers.dense(placeholder,3000,activation=tf.nn.leaky_relu)
layer = tf.layers.batch_normalization(layer)
embedding= tf.layers.dense(layer,300,activation = tf.nn.leaky_relu)
print('Siamse Net has built',flush=True)
return embedding
And I create two network share same parameter:
self.embedding1=self.__build_net(self.centers_placeholder)
self.embedding2=self.__build_net(self.neighbors_placeholder,reuse=True)
I used this network to generate embeddings of some kind of data.
My question is: Do I need to set the reuse to True when doing inference(generate embedding) like this:
with tf.Session() as sess:
self.saver.restore(sess,self.store_path+self.model_type+'_model_'+str(self.model_num)+'_'+str(self.center_size)+'_'+str(self.neighbor_size)+'.ckpt')
embedding = self.__build_net(self.centers_placeholder,reuse=True)
embeddings = sess.run(embedding,feed_dict = {self.centers_placeholder : data})
Or like this:
with tf.Session() as sess:
self.saver.restore(sess,self.store_path+self.model_type+'_model_'+str(self.model_num)+'_'+str(self.center_size)+'_'+str(self.neighbor_size)+'.ckpt')
embedding = self.__build_net(self.centers_placeholder,reuse=False)
embeddings = sess.run(embedding,feed_dict = {self.centers_placeholder : data})
And then, When set the variable scope, do I need to give a name to each layer?
Thanks!

No....reuse means whether you need to use a previously defined variable.
Say, you've created a variable called 'foo/v':
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
print(v.name) ---> foo/v:0
Running the following will give:
with tf.variable_scope("foo"):
v1 = tf.get_variable("v", [1]) ---> gives error as name 'foo/v' exists
print(v1.name)
with tf.variable_scope("foo", reuse=False):
v1 = tf.get_variable("v", [1]) ---> gives error as name 'foo/v' exists
print(v1.name)
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
print(v1.name) ---> foo/v:0
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v1 = tf.get_variable("v", [1])
print(v1.name) ---> foo/v:0
But if you run the following from the very begining:
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
print(v1.name) ---> gives error as 'foo/v' does not exist (thus cannot be reused).
Thus I prefer setting reuse=tf.AUTO_REUSE all the time.
For a detailed explanation, please read How Does Variable Scope Work? from the TensorFlow official guide.
By the way, tf.layers.batch_normalization has a training option that needs to be set False during inference. See the explanations here.

Related

Restore tf variables in a different graph

I want to use my pretrained separable convolution (which is a part of a bigger module) in another separable convolution in a other model.
In the trained module I tried
with tf.variable_scope('sep_conv_ker' + str(input_shape[-1])):
sep_conv2d = tf.reshape(
tf.layers.separable_conv2d(inputs_flatten,input_shape[-1] ,
[1,input_shape[-2]]
trainable=trainable),
[inputs_flatten.shape[0],1,input_shape[-1],INNER_LAYER_WIDTH])
and
all_variables = tf.trainable_variables()
scope1_variables = tf.contrib.framework.filter_variables(all_variables, include_patterns=['sep_conv_ker'])
sep_conv_weights_saver = tf.train.Saver(scope1_variables, sharded=True, max_to_keep=20)
Inside sess.run
sep_conv_weights_saver.save(sess,os.path.join(LOG_DIR + MODEL_SPEC_LOG_DIR,
"init_weights",MODEL_SPEC_SUFFIX + 'epoch_' + str(epoch) + '.ckpt'))
But I cannot understand when and how should I load the weights to the separable convolution in the other module, it has different name, and different scope,
Furthermore, as I'm using a defined tf.layer does it mean I need to access each individual weight in the new graph and assign it?
My current solution doesn't work and I think that the weights are being initialized after the assignment somehow Furthermore, loading a whole new graph just for few weights seems weird, isn't it?
###IN THE OLD GRAPH###
all_variables = tf.trainable_variables()
scope1_variables = tf.contrib.framework.filter_variables(all_variables, include_patterns=['sep_conv_ker'])
vars = dict((var.op.name.split("/")[-1] + str(idx), var) for idx,var in enumerate(scope1_variables))
sep_conv_weights_saver = tf.train.Saver(vars, sharded=True, max_to_keep=20)
In the new graph is a function that basiclly takes the variables from the old graph and assigning them, loading the meta_graph is redundant
def load_pretrained(sess):
sep_conv2d_vars = [var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if ("sep_conv_ker" in var.op.name)]
var_dict = dict((var.op.name.split("/")[-1] + str(idx), var) for idx, var in enumerate(sep_conv2d_vars))
new_saver = tf.train.import_meta_graph(
tf.train.latest_checkpoint('log/train/sep_conv_ker/global_neighbors40/init_weights') + '.meta')
# saver = tf.train.Saver(var_list=var_dict)
new_saver.restore(sess,
tf.train.latest_checkpoint('log/train/sep_conv_ker/global_neighbors40/init_weights'))
graph = tf.get_default_graph()
sep_conv2d_trained = dict(("".join(var.op.name.split("/")[-2:]),var) for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if ("sep_conv_ker_init" in var.op.name))
for var in sep_conv2d_vars:
tf.assign(var,sep_conv2d_trained["".join(var.op.name.split("/")[-2:])])
You need to make sure that the variables have the same in the variable file and in the graph where you load the variables. You can write a script that will convert the variables names.
With tf.contrib.framework.list_variables(ckpt), you can find out what variables of what shapes you have in the checkpoint and create respective variables with the new names (I believe, you can write a regex that will fix the names) and correct shape.
Then you load the original variables with tf.contrib.framework.load_checkpoint(ckpt) assign ops tf.assign(var, loaded) that will assigning the variables with new names with the saved values.
Runn the assign ops in a session.
Save the new variables.
Minimum example:
Original model (variables in scope "regression"):
import tensorflow as tf
x = tf.placeholder(tf.float32, [None, 3])
regression = tf.layers.dense(x, 1, name="regression")
session = tf.Session()
session.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.trainable_variables())
saver.save(session, './model')
Renaming script:
import tensorflow as tf
assign_ops = []
reader = tf.contrib.framework.load_checkpoint("./model")
for name, shape in tf.contrib.framework.list_variables("./model"):
new_name = name.replace("regression/", "foo/bar/")
new_var = tf.get_variable(new_name, shape)
assign_ops.append(tf.assign(new_var, reader.get_tensor(name)))
session = tf.Session()
saver = tf.train.Saver(tf.trainable_variables())
session.run(assign_ops)
saver.save(session, './model-renamed')
Model where you load the renamed variables (the same variables in score "foo/bar"):
import tensorflow as tf
with tf.variable_scope("foo"):
x = tf.placeholder(tf.float32, [None, 3])
regression = tf.layers.dense(x, 1, name="bar")
session = tf.Session()
session.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.trainable_variables())
saver.restore(session, './model-renamed')

Tensorflow Initialise Variables

I just want to see my output so far but I can't get my variables to initialize, the same function to do that worked in another notebook but is not working in this one. I tried two ways and keep getting:
FailedPreconditionError: Attempting to use uninitialized value Variable.
I am using 1.2.1.
mnist = input_data.read_data_sets('./', one_hot=True)
n1=500
n2=300
nclasses=10
batchsize=100
def layers(data):
layer1={'weights':tf.Variable(tf.random_normal([784,n1])),
'bias':tf.Variable(tf.random_normal([n1]))}
layer2={'weights':tf.Variable(tf.random_normal([n1,n2])),
'bias':tf.Variable(tf.random_normal([n2]))}
output={'weights':tf.Variable(tf.random_normal([n2,nclasses])),
'bias':tf.Variable(tf.random_normal([nclasses]))}
l1=tf.add(tf.matmul(data,layer1['weights']),layer1['bias'])
l1=tf.nn.relu(l1)
l2=tf.add(tf.matmul(l1,layer2['weights']),layer2['bias'])
l2=tf.nn.relu(l2)
output=tf.add(tf.matmul(l2,output['weights']),output['bias'])
return output
session=tf.Session().
session.run(tf.global_variables_initializer())
result=session.run(layers(mnist.test.images))
print(type(result))
tried as well-
with tf.Session() as sess:
session.run(tf.global_variables_initializer())
result=sess.run(layers(mnist.test.images))
print(type(result))
Your issue is that the graph is constructed within the function call layers. But you initialized all variables before you construct your graph.
Hence, you need to write
output_op = layers(mnist.test.images)
session.run(tf.global_variables_initializer())
result = session.run(output_op)
op)
Then the graph is constructed and TensorFlow can initialize all variables. Full working example:
import tensorflow as tf
import numpy as np
def fake_mnist():
return np.random.randn(1, 28 * 28)
n1 = 500
n2 = 300
nclasses = 10
batchsize = 100
def layers(data):
layer1 = {'weights': tf.Variable(tf.random_normal([784, n1])),
'bias': tf.Variable(tf.random_normal([n1]))}
layer2 = {'weights': tf.Variable(tf.random_normal([n1, n2])),
'bias': tf.Variable(tf.random_normal([n2]))}
output = {'weights': tf.Variable(tf.random_normal([n2, nclasses])),
'bias': tf.Variable(tf.random_normal([nclasses]))}
l1 = tf.add(tf.matmul(data, layer1['weights']), layer1['bias'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, layer2['weights']), layer2['bias'])
l2 = tf.nn.relu(l2)
output = tf.add(tf.matmul(l2, output['weights']), output['bias'])
return output
with tf.Session() as sess:
data_inpt = tf.placeholder(tf.float32)
output_op = layers(data_inpt)
sess.run(tf.global_variables_initializer())
result = sess.run(output_op, {data_inpt: fake_mnist()})
print(type(result))
print(result)
I highly doubt, that your code is running in any another notebook file. I guess in the other notebook file you have executed the cell with layers multiple time such that in the second call of tf.global_variables_initializer the variables in the graph already exists. But the code you posted is definitely not correct.
since you mentioned that the code works on another notebook, so it could be tf version issue, so instead of using session.run(tf.global_variables_initializer()), try session.run(tf.initialize_all_variables()), btw tf.initialize_all_variables() is currently deprecated.

Tensorflow program give different answers after deployed on aws lambda

I have wrote a program with Tensorflow that identifies a number of figures in an image. The model is trained with a function and then used with another function to label the figures. The training have been done on my computer and the resulting model upload to aws with the solve function.
I my computer it works well, but when create a lambda in aws it works strange and start giving different answers with the same test data.
The model in the solve function is this:
# Recreate neural network from model file generated during training
# input
x = tf.placeholder(tf.float32, [None, size_of_image])
# weights
W = tf.Variable(tf.zeros([size_of_image, num_chars]))
# biases
b = tf.Variable(tf.zeros([num_chars]))
The solve function code to label the figures is this:
for testi in range(captcha_letters_num):
# load model from file
saver = tf.train.import_meta_graph(model_path + '.meta',
clear_devices=True)
saver.restore(sess, model_path)
# Data to label
test_x = np.asarray(char_imgs[testi], dtype=np.float32)
predict_op = model(test_x, W, b)
op = sess.run(predict_op, feed_dict={x: test_x})
# find max probability from the probability distribution returned by softmax
max_probability = op[0][0]
max_probability_index = -1
for i in range(num_chars):
if op[0][i] > max_probability:
max_probability = op[0][i]
max_probability_index = i
# append it to final output
final_text += char_map_list[max_probability_index]
# Reset the model so it can be used again
tf.reset_default_graph()
With the same test data it gives different answers, don't know why.
Solved!
What I finally do was to keep the Session outside the loop and initialize the variables. After ending the loop, reset the graph.
saver = tf.train.Saver()
sess = tf.Session()
# Initialize variables
sess.run(tf.global_variables_initializer())
.
.
.
# passing each of the 5 characters through the NNet
for testi in range(captcha_letters_num):
# Data to label
test_x = np.asarray(char_imgs[testi], dtype=np.float32)
predict_op = model(test_x, W, b)
op = sess.run(predict_op, feed_dict={x: test_x})
# find max probability from the probability distribution returned by softmax
max_probability = op[0][0]
max_probability_index = -1
for i in range(num_chars):
if op[0][i] > max_probability:
max_probability = op[0][i]
max_probability_index = i
# append it to final output
final_text += char_map_list[max_probability_index]
# Reset the model so it can be used again
tf.reset_default_graph()
sess.close()

Convert matlab method to python

There is very helpful method in matlab called "getwb()".
For developers that coding neural network, this method returns the weights and biases at the final iteration.
I have neural network (using tensorflow tools).
There is possible to convert this method in some way?
I tried alot with tensorFlow.saver() and restore() but I don`t really understand this issue.
thanks!
EDIT:
my model is:
def neuralNetworkModel(x):
# first step: (input * weights) + bias, linear operation like y = ax + b
# each layer connection to other layer will represent by nodes(i) * nodes(i+1)
for i in range(0,numberOfLayers):
if i == 0:
hiddenLayers.append({"weights": tensorFlow.Variable(tensorFlow.random_normal([sizeOfRow, nodesLayer[i]])),
"biases": tensorFlow.Variable(tensorFlow.random_normal([nodesLayer[i]]))})
elif i > 0 and i < numberOfLayers-1:
hiddenLayers.append({"weights" : tensorFlow.Variable(tensorFlow.random_normal([nodesLayer[i], nodesLayer[i+1]])),
"biases" : tensorFlow.Variable(tensorFlow.random_normal([nodesLayer[i+1]]))})
else:
outputLayer = {"weights": tensorFlow.Variable(tensorFlow.random_normal([nodesLayer[i], classes])),
"biases": tensorFlow.Variable(tensorFlow.random_normal([classes]))}
# create the layers
for i in range(numberOfLayers):
if i == 0:
layers.append(tensorFlow.add(tensorFlow.matmul(x, hiddenLayers[i]["weights"]), hiddenLayers[i]["biases"]))
layers.append(tensorFlow.nn.relu(layers[i])) # pass values to activation function (i.e sigmoid, softmax) and add it to the layer
elif i >0 and i < numberOfLayers-1:
layers.append(tensorFlow.add(tensorFlow.matmul(layers[i-1], hiddenLayers[i]["weights"]), hiddenLayers[i]["biases"]))
layers.append(tensorFlow.nn.relu(layers[i]))
output = tensorFlow.matmul(layers[numberOfLayers-1], outputLayer["weights"]) + outputLayer["biases"]
finalOutput = output
return output
In your code you create a bunch of variables for weights and biases of hidden and output layers. You should be able to retrieve them at any moment (when a session is active) by using tf.Session.run() like follows:
import tensorflow as tf
tf.reset_default_graph()
v = tf.Variable(tf.random_normal((5, 5)))
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
v_val = sess.run(v)
print v_val
I also advise to use tf.learn library that contains useful abstractions like fully_connected layer.

tensorflow.train.import_meta_graph does not work?

I try to simply save and restore a graph, but the simplest example does not work as expected (this is done using version 0.9.0 or 0.10.0 on Linux 64 without CUDA using python 2.7 or 3.5.2)
First I save the graph like this:
import tensorflow as tf
v1 = tf.placeholder('float32')
v2 = tf.placeholder('float32')
v3 = tf.mul(v1,v2)
c1 = tf.constant(22.0)
v4 = tf.add(v3,c1)
sess = tf.Session()
result = sess.run(v4,feed_dict={v1:12.0, v2:3.3})
g1 = tf.train.export_meta_graph("file")
## alternately I also tried:
## g1 = tf.train.export_meta_graph("file",collection_list=["v4"])
This creates a file "file" that is non-empty and also sets g1 to something that looks like a proper graph definition.
Then I try to restore this graph:
import tensorflow as tf
g=tf.train.import_meta_graph("file")
This works without an error, but does not return anything at all.
Can anyone provide the necessary code to simply just save the graph for "v4" and completely restore it so that running this in a new session will produce the same result?
To reuse a MetaGraphDef, you will need to record the names of interesting tensors in your original graph. For example, in the first program, set an explicit name argument in the definition of v1, v2 and v4:
v1 = tf.placeholder(tf.float32, name="v1")
v2 = tf.placeholder(tf.float32, name="v2")
# ...
v4 = tf.add(v3, c1, name="v4")
Then, you can use the string names of the tensors in the original graph in your call to sess.run(). For example, the following snippet should work:
import tensorflow as tf
_ = tf.train.import_meta_graph("./file")
sess = tf.Session()
result = sess.run("v4:0", feed_dict={"v1:0": 12.0, "v2:0": 3.3})
Alternatively, you can use tf.get_default_graph().get_tensor_by_name() to get tf.Tensor objects for the tensors of interest, which you can then pass to sess.run():
import tensorflow as tf
_ = tf.train.import_meta_graph("./file")
g = tf.get_default_graph()
v1 = g.get_tensor_by_name("v1:0")
v2 = g.get_tensor_by_name("v2:0")
v4 = g.get_tensor_by_name("v4:0")
sess = tf.Session()
result = sess.run(v4, feed_dict={v1: 12.0, v2: 3.3})
UPDATE: Based on discussion in the comments, here a the complete example for saving and loading, including saving the variable contents. This illustrates the saving of a variable by doubling the value of variable vx in a separate operation.
Saving:
import tensorflow as tf
v1 = tf.placeholder(tf.float32, name="v1")
v2 = tf.placeholder(tf.float32, name="v2")
v3 = tf.mul(v1, v2)
vx = tf.Variable(10.0, name="vx")
v4 = tf.add(v3, vx, name="v4")
saver = tf.train.Saver([vx])
sess = tf.Session()
sess.run(tf.initialize_all_variables())
sess.run(vx.assign(tf.add(vx, vx)))
result = sess.run(v4, feed_dict={v1:12.0, v2:3.3})
print(result)
saver.save(sess, "./model_ex1")
Restoring:
import tensorflow as tf
saver = tf.train.import_meta_graph("./model_ex1.meta")
sess = tf.Session()
saver.restore(sess, "./model_ex1")
result = sess.run("v4:0", feed_dict={"v1:0": 12.0, "v2:0": 3.3})
print(result)
The bottom line is that, in order to make use of a saved model, you must remember the names of at least some of the nodes (e.g. a training op, an input placeholder, an evaluation tensor, etc.). The MetaGraphDef stores the list of variables that are contained in the model, and helps to restore these from a checkpoint, but you are required to reconstruct the tensors/operations used in training/evaluating the model yourself.
Because tf.train.import_meta_graph is deprecated version now.
replace tf.train.import_meta_graph in your code with tf.compat.v1.train.import_meta_graph
It will solve your error.

Categories

Resources