Assign value to tf variable in function tensorflow - python

How can I assign a value to a tf Variable inside a function?
Based on the link here, it say thats you have to run a sess on the tf tensor. I want to update the tf variable inside the function after few calculations.
Example:
def update(weights):
value_1 = 0
value_2 = 2
........... some code here ...........
weights['layer_1'] = tf.multiply(weights['layer_1'],value_1)
weights['layer_2'] = tf.multiply(weights['layer_2'],value_2)
............some code here.............
I can't do the above code. But how do I use assign to make this code work?

You have to use assign, which take a Tensor which has to be exactly the same shape as the original Variable. If you want to have different shape use the validate_shape=False. But you have to keep in mind that you'll get the actual changes on run time, thus you will code the behavior of your variable not assigning values.
Here an example that shows variable assignment with variable shapes:
import tensorflow as tf
var = tf.Variable(tf.zeros((1, 3)))
new_v = tf.assign(var, tf.ones((5, 7)), validate_shape=False)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
print sess.run([var])
print sess.run([new_v])
For your particular example you could try:
def update(weights):
value_1 = tf.constant(0)
value_2 = tf.constant(2)
........... some code here ...........
weights['layer_1'] = tf.assign(weights['layer_1'], tf.multiply(weights['layer_1'],value_1))
weights['layer_2'] = tf.assign(weights['layer_2'], tf.multiply(weights['layer_2'],value_2))
............some code here.............

This works for me -
import tensorflow as tf
import numpy as np
# function to randomly initialize weights for a specific layer
def assign_var(layer_number):
weight_value = np.random.rand(5,3) # or any calculations you need
weight_var = tf.get_variable('weights_layer_'+str(layer_number))
return tf.assign(weight_var,weight_value)
with tf.Session() as sess:
sess.run(assign_var(1))
sess.run(assign_var(2))
EDIT The problem with the above code is - it keeps adding to the graph every time you call the function.
Alternatively, I think this should be better.
import tensorflow as tf
import numpy as np
var_name = tf.placeholder(tf.string)
weight_value = tf.placeholder(tf.float32)
weight_var = tf.get_variable(var_name)
assign_weights = tf.assign(weight_var,weight_value)
sess = tf.Session()
# function to randomly initialize weights for a specific layer
def assign_var(layer_number):
rand_weight_value = np.random.rand(5,3) # or any calculations you need
sess.run(assign_weights,{var_name:'weights_layer'+str(layer_number),weight_value:rand_weight_value})
assign_var(1) # assigns random weight values to layer 1

Related

Restoring GPflow Model with Mean Function doesn't work

I have been following the methodology of saving/restoring GPflow models with success. But now I've run into a snag.
When I try to restore a model with a Linear mean function, the restore crashes with an error.
I think that the issue comes in the naming convention of the tensorflow Linear mean function object. The above "-44dbadbb-0" is random and changes every time the model is rebuilt, so if I check the tensor names when I saved the model with
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
print_tensors_in_checkpoint_file(file_name='./model.ckpt', tensor_name='', all_tensors=False)
I get the return:
Linear-eeb5f9f3-0/A/unconstrained (DT_DOUBLE) [1,1]
Linear-eeb5f9f3-0/b/unconstrained (DT_DOUBLE) [1]
model/X/dataholder (DT_DOUBLE) [15,1]
model/Y/dataholder (DT_DOUBLE) [15,1]
model/kern/kernels/0/lengthscales/unconstrained (DT_DOUBLE) []
model/kern/kernels/0/variance/unconstrained (DT_DOUBLE) []
model/kern/kernels/1/lengthscales/unconstrained (DT_DOUBLE) []
model/kern/kernels/1/variance/unconstrained (DT_DOUBLE) []
model/likelihood/variance/unconstrained (DT_DOUBLE) []
Where the Linear function clearly has a different name from the model which is trying to be restored.
I have tried to fix this by renaming the variables before the restore, but this doesn't work with tensorflow. I also tried different saving/restoring methods, but then I have problems with being able to sample from the model.
Saving the Model
import gpflow
import numpy as np
import random
import tensorflow as tf
# define data
rng = np.random.RandomState(4)
X = rng.uniform(0, 5.0, 15)[:, np.newaxis]
Y = np.sin((X[:, 0] - 2.5) ** 2).reshape(len(X),1)
# define the mean function
mf = gpflow.mean_functions.Linear(np.ones((1,1)),np.zeros((1,)))
# create the GP model
with gpflow.defer_build():
k = gpflow.kernels.Matern32(1)+gpflow.kernels.RBF(1)
m = gpflow.models.GPR(X, Y, kern=k,name='model',mean_function=mf)
m.likelihood.variance = 1e-03
m.likelihood.trainable = False
tf.global_variables_initializer()
tf_session = m.enquire_session()
m.compile( tf_session )
gpflow.train.ScipyOptimizer().minimize(m)
saver = tf.train.Saver()
save_path = saver.save(tf_session, "./model.ckpt")
print("Model saved in path: %s" % save_path)
Restoring the Model
import gpflow
import numpy as np
import random
import tensorflow as tf
# define data
rng = np.random.RandomState(4)
X = rng.uniform(0, 5.0, 15)[:, np.newaxis]
Y = np.sin((X[:, 0] - 2.5) ** 2).reshape(len(X),1)
# define the mean function
mf = gpflow.mean_functions.Linear(np.ones((1,1)),np.zeros((1,)))
with gpflow.defer_build():
k = gpflow.kernels.Matern32(1)+gpflow.kernels.RBF(1)
m = gpflow.models.GPR(X, Y, kern=k,name='model',mean_function=mf)
m.likelihood.variance = 1e-03
m.likelihood.trainable = False
# construct and compile the tensorflow session
tf.global_variables_initializer()
tf_session = m.enquire_session()
m.compile( tf_session )
saver = tf.train.Saver()
save_path = saver.restore(tf_session, "./model.ckpt")
print("Model loaded from path: %s" % save_path)
m.anchor(tf_session)
The code crashes at save_path = saver.restore(tf_session, "./model.ckpt") with the error:
NotFoundError (see above for traceback): Key Linear-44dbadbb-0/A/unconstrained not found in checkpoint...
The defer_build() does a bunch of things - but one part of constructing the entire model (i.e. tensorflow graph) in one go is that all the tensorflow variables & placeholders get consistent names, with all their names relating to the name of the model itself (which you set by passing the name='model' keyword argument to the model constructor).
In your code, however, the Linear mean function is constructed outside of the defer_build() scope. This means gpflow has to construct a graph for it right away - including setting up variables for the parameters (slope & offset in this case). All tensorflow variables live in a global name space, so the only way of allowing more than a single object to be created is to assign them randomized names. (E.g., imagine wanting to construct a sum of two kernels of the same type!)
Fortunately, the fix is easy: simply move the construction of the mean function into the defer_build block:
with gpflow.defer_build():
# define the mean function
mf = gpflow.mean_functions.Linear(np.ones((1,1)), np.zeros((1,)))
k = gpflow.kernels.Matern32(1) + gpflow.kernels.RBF(1)
m = gpflow.models.GPR(X, Y, kern=k, mean_function=mf, name='model')
m.likelihood.variance = 1e-03
m.likelihood.trainable = False
# construct and compile the tensorflow session
tf.global_variables_initializer()
tf_session = m.enquire_session()
m.compile(tf_session)
If you do this in both the "save" and "load" scripts, everything runs and hopefully as you expect it. Hope this helps!

Training while loop in Tensorflow

I've attempted converting a Python-side training loop to Tensorflow to (hypothetically) make the code run faster - not having to pass control over to cpu constantly. However, I can't manage using tf.while_loop.
Here's the code that works:
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from sklearn.datasets import load_iris
from sklearn.preprocessing import RobustScaler
x, y = load_iris(True)
x = RobustScaler().fit_transform(x)
shape = (10, 10)
max_epochs = 1000
graph = tf.Graph()
sess = tf.Session(graph=graph)
x = x.astype(np.float64)
# Construct graph
with graph.as_default():
weights = tf.get_variable(
'weights', shape, initializer=tf.constant_initializer, dtype=tf.float64
)
curr_epoch = tf.placeholder(dtype=tf.int64, shape=())
with tf.name_scope('data'):
data = tf.data.Dataset.from_tensor_slices(x)
data = data.shuffle(buffer_size=10000)
data = data.repeat(max_epochs)
data = data.batch(1)
data = data.make_one_shot_iterator().get_next()
with tf.name_scope('update'):
update_op = make_update_op(weights)
init = tf.global_variables_initializer()
sess.run(init)
for i in tqdm(range(max_epochs)):
for _ in range(x.shape[0]):
sess.run(update_op, feed_dict={
curr_epoch: i
})
np_weights = sess.run(weights)
print(np_weights) # Correctly prints an array of 150's.
Now, if I create an update function to pass tf.while_loop, an error is thrown.
def make_update_op(w):
return w.assign(
w + 0.001
)
# In the code above:
update_op = tf.while_loop(lambda _: True, make_update_op, (weights,), maximum_iterations=x.shape[0])
# No inner loop:
for i in tqdm(range(max_epochs)):
sess.run(update_op, feed_dict={
curr_epoch: i
})
Line 22, in make_update_op
return w.assign(
AttributeError: 'Tensor' object has no attribute 'assign'
I don't quite understand what is happening even after reading the documentation. weights is a Variable after all. What could be done to correctly make the training loop?
The tensor that you're trying to assign a new value within a while loop is a result of a sequence of multiple operations-tensors (operation is node in the graph, while tensor is a directed edge). In particular, the while loop will produce:
Variable/Read-->while/Enter-->while/Merge-->while/Switch-->while/Identity
What you're trying to assign here is a tensor while/Identity.
tf.while_loop is usually used to iterate over the dimensions of a tensor (also over the None - the unknown dimension). You're trying to iterate over the variables that are fully defined. You don't need to create a tf.while_loop for that. Just create operations that update each variable and group these operations together:
update_ops = [w.assign(w + 0.001) for w in weights]
update_op = tf.group(update_ops)
Now, when you execute the update_op with tf.Session() interface it will update all variables.
Example:
import tensorflow as tf
v1 = tf.Variable(tf.ones((1, 2), dtype=tf.float32))
v2 = tf.Variable(2*tf.ones((1, 3), dtype=tf.float32))
update_ops = [w.assign(w + 0.001) for w in [v1, v2]]
update_op = tf.group(update_ops)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('before update:')
print(v1.eval(), v2.eval())
print('after update:')
sess.run(update_op) # <-- update your variables
print(v1.eval(), v2.eval())
# before update:
# [[1. 1.]] [[2. 2. 2.]]
# after update:
# [[1.001 1.001]] [[2.001 2.001 2.001]]
Turns out, all that was missing was the fact that one cannot assign to a variable inside a loop as Vlad pointed out. Instead, one can return the new value of a variable.
def make_update_op(w):
return w + 0.001
new_w = tf.while_loop(lambda _: True, make_update_op, (weights,), maximum_iterations=x.shape[0])
update_op = weights.assign(new_w)
To use more variables one would need to return the same amount from the function and unpack them in Python, but the principle is the same.
def make_update_op(w, d):
return w + 0.001, d
new_w, _ = tf.while_loop(lambda *_: True, make_update_op, (weights, data), maximum_iterations=x.shape[0])
update_op = weights.assign(new_w)

Tensorflow: Error initializing variables created within tf.data.Dataset.map()

I have created a function with TF operations that I invoke with tf.data.Dataset.map() to transform the input data to my model. Inside that function I create a tf.Variable and assign to it. When initializing the variables, TF complains that the variable's init operation is not an element of the graph, or that the variable does not belong to the same graph as the other variables. I would appreciate any help to solve this issue.
Here you can see some toy code to reproduce the issue (TF 1.12):
import tensorflow as tf
def fun(x):
f = tf.Variable(tf.ones((1,), dtype=tf.int64), name='test')
op = f.assign(x, name='test_assign')
with tf.control_dependencies([op]):
f = tf.identity(f)
return f
def generator():
while True:
yield [2]
ds = tf.data.Dataset.from_generator(generator,
output_shapes=tf.TensorShape([1,]), output_types=tf.int64)
ds = ds.map(fun)
iterator = ds.make_one_shot_iterator()
y = iterator.get_next()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for _ in range(5):
print(sess.run(y)

Tensorflow: Can not convert a function into a Tensor or Operation

I have read through previous strings. My data are in the form of an array fed to a placeholder. Trying to convert the data to a tensor before feeding produces a different (inverse) error message. Other solutions similarly do not seem to work in this situation. Here is minimal code.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.factorization import KMeans
X = tf.placeholder(tf.float32, shape=[None, 10], name="X")
data = np.random.randn(2,10)
def lump(X):
# Build KMeans graph
kmeans = KMeans(inputs=X, num_clusters=k, distance_metric='cosine',
use_mini_batch=True)
(all_scores, cluster_idx, scores, cluster_centers_initialized, cluster_centers_var, init_op,
train_op) = kmeans.training_graph()
cluster_idx = cluster_idx[0] # fix for cluster_idx being a tuple
avg_distance = tf.reduce_mean(scores)
return cluster_idx, scores
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
idx, d = sess.run(lump,feed_dict={X: data})
Correct, you can't evaluate just lump, because it's a function (returning tensors), not a tensor or an op. You probably meant to do something like this:
cluster_idx, scores = lump(X)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
idx, d = sess.run([cluster_idx, scores], feed_dict={X: data})
Note that lump() is invoked before tf.global_variables_initializer(), because it defines new variables in the graph, so they must be initialized.
The code still fails, because lump is clearly not finished and has issues with dimensions, but it is the right way to evaluate something in a session.

Get value in placeholder array in TensorFlow

I have a error with while_loop, when I try to read a value in a placeholder.
The need package:
import tensorflow as tf
import numpy as np
# Start TensorFlow InteractiveSession
sess = tf.InteractiveSession()
# We define the input variable set to the problem
input1 = tf.placeholder(tf.float32, shape=[None,100], name='input1')
# Define the variable to traing
L = tf.Variable(np.identity(100, dtype='float32'), trainable=False)
i0 = tf.placeholder(tf.int32)
We use a function to definition of the body, or the lambda function to the condition. This is indiferent...
But I have a problem, because I want get the values of input1 at some position. Y try the used of eval, i0.eval() but this don't work.
def body(i0,L,input1):
N_valu = input1[:,i0] # Here is the error...
L = L+N_valu*tf.matmul(tf.transpose(L),L)
i0 = tf.add(i0,1)
return i0,L,input1
condition = lambda i0,L,input1: i0 < 100
sess.run(tf.initialize_all_variables())
i00,L0,input10 = tf.while_loop(condition, body, [i0,L,input1])
N1 = np.array(range(0,100)).reshape([1,100])
feed_dict={input1:N1, i0:0}
Lprueba = L0.eval(feed_dict=feed_dict)
print(i00.eval(feed_dict=feed_dict))
Some help?

Categories

Resources