Why does this TensorFlow snippet throw an error in feeding? - python

Code
#!/usr/bin/env python3
import tensorflow as tf
import numpy as np
def customOps(n):
x = tf.placeholder(tf.float32)
v1 = tf.reduce_sum(x,1)
v2 = tf.reduce_sum(x,0)
v = tf.nn.softmax(tf.concat([v1, v2], 0))
index = np.argmax(v)
if index > n/3:
finalval = tf.norm(v1-v2, ord='euclidean')
else:
finalval = tf.norm(v1+v2, ord='euclidean')
return finalval
if __name__ == '__main__':
mat = np.asarray([[0, 1], [1, 0]], dtype = np.float32)
n = mat.shape[0]
finalVal = customOps(n)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
outVal = sess.run(finalVal, feed_dict={x:mat})
print(outVal)
sess.close()
Error Thrown
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_5' with dtype float [[{{node Placeholder_5}} = Placeholder[dtype=DT_FLOAT, shape=<unknown>, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
The error is thrown at sess.run(init) line in the above snippet. I am feeding a float type array through feed_dict and I am not sure why the error is being thrown.
Where is the error and why?

Why the error:
Because you ran the same snippet multiple times in an unclean graph (i.e, your graph has multiple copies of the network).
The reason I can say this is the _5 at the end of the node name in the error message. TF assigns a default name to all tensors in the graph using incremental indices in case a name is already taken. Placeholder_5 means that in the same graph there is at least 5 Placeholder instances without a custom default name assigned which, given your code, should be impossible unless you called the function multiple times without cleaning up the graph.
How to fix it:
Run in a clean graph: Put tf.reset_default_graph() before finalVal = customOps(n).
Note: Your code has more issues than that (for example, you have x in the main branch, but x is a local variable of customOps), but the cause of the error you have is the one stated above.
Below you find a tested and working version of your code that addresses both issues.
import tensorflow as tf
import numpy as np
def customOps(n):
x = tf.placeholder(tf.float32)
v1 = tf.reduce_sum(x,1)
v2 = tf.reduce_sum(x,0)
v = tf.nn.softmax(tf.concat([v1, v2], 0))
index = np.argmax(v)
if index > n/3:
finalval = tf.norm(v1-v2, ord='euclidean')
else:
finalval = tf.norm(v1+v2, ord='euclidean')
return x, finalval
if __name__ == '__main__':
mat = np.asarray([[0, 1], [1, 0]], dtype = np.float32)
n = mat.shape[0]
tf.reset_default_graph()
x, finalVal = customOps(n)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
outVal = sess.run(finalVal, feed_dict={x:mat})
print(outVal)
sess.close()

Related

Training while loop in Tensorflow

I've attempted converting a Python-side training loop to Tensorflow to (hypothetically) make the code run faster - not having to pass control over to cpu constantly. However, I can't manage using tf.while_loop.
Here's the code that works:
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from sklearn.datasets import load_iris
from sklearn.preprocessing import RobustScaler
x, y = load_iris(True)
x = RobustScaler().fit_transform(x)
shape = (10, 10)
max_epochs = 1000
graph = tf.Graph()
sess = tf.Session(graph=graph)
x = x.astype(np.float64)
# Construct graph
with graph.as_default():
weights = tf.get_variable(
'weights', shape, initializer=tf.constant_initializer, dtype=tf.float64
)
curr_epoch = tf.placeholder(dtype=tf.int64, shape=())
with tf.name_scope('data'):
data = tf.data.Dataset.from_tensor_slices(x)
data = data.shuffle(buffer_size=10000)
data = data.repeat(max_epochs)
data = data.batch(1)
data = data.make_one_shot_iterator().get_next()
with tf.name_scope('update'):
update_op = make_update_op(weights)
init = tf.global_variables_initializer()
sess.run(init)
for i in tqdm(range(max_epochs)):
for _ in range(x.shape[0]):
sess.run(update_op, feed_dict={
curr_epoch: i
})
np_weights = sess.run(weights)
print(np_weights) # Correctly prints an array of 150's.
Now, if I create an update function to pass tf.while_loop, an error is thrown.
def make_update_op(w):
return w.assign(
w + 0.001
)
# In the code above:
update_op = tf.while_loop(lambda _: True, make_update_op, (weights,), maximum_iterations=x.shape[0])
# No inner loop:
for i in tqdm(range(max_epochs)):
sess.run(update_op, feed_dict={
curr_epoch: i
})
Line 22, in make_update_op
return w.assign(
AttributeError: 'Tensor' object has no attribute 'assign'
I don't quite understand what is happening even after reading the documentation. weights is a Variable after all. What could be done to correctly make the training loop?
The tensor that you're trying to assign a new value within a while loop is a result of a sequence of multiple operations-tensors (operation is node in the graph, while tensor is a directed edge). In particular, the while loop will produce:
Variable/Read-->while/Enter-->while/Merge-->while/Switch-->while/Identity
What you're trying to assign here is a tensor while/Identity.
tf.while_loop is usually used to iterate over the dimensions of a tensor (also over the None - the unknown dimension). You're trying to iterate over the variables that are fully defined. You don't need to create a tf.while_loop for that. Just create operations that update each variable and group these operations together:
update_ops = [w.assign(w + 0.001) for w in weights]
update_op = tf.group(update_ops)
Now, when you execute the update_op with tf.Session() interface it will update all variables.
Example:
import tensorflow as tf
v1 = tf.Variable(tf.ones((1, 2), dtype=tf.float32))
v2 = tf.Variable(2*tf.ones((1, 3), dtype=tf.float32))
update_ops = [w.assign(w + 0.001) for w in [v1, v2]]
update_op = tf.group(update_ops)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('before update:')
print(v1.eval(), v2.eval())
print('after update:')
sess.run(update_op) # <-- update your variables
print(v1.eval(), v2.eval())
# before update:
# [[1. 1.]] [[2. 2. 2.]]
# after update:
# [[1.001 1.001]] [[2.001 2.001 2.001]]
Turns out, all that was missing was the fact that one cannot assign to a variable inside a loop as Vlad pointed out. Instead, one can return the new value of a variable.
def make_update_op(w):
return w + 0.001
new_w = tf.while_loop(lambda _: True, make_update_op, (weights,), maximum_iterations=x.shape[0])
update_op = weights.assign(new_w)
To use more variables one would need to return the same amount from the function and unpack them in Python, but the principle is the same.
def make_update_op(w, d):
return w + 0.001, d
new_w, _ = tf.while_loop(lambda *_: True, make_update_op, (weights, data), maximum_iterations=x.shape[0])
update_op = weights.assign(new_w)

why this tensorflow tutorial code not working

Now i'm trying lstm tutorial, look some one's book. But it didn't work. What's the problem? :
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import pprint
pp = pprint.PrettyPrinter(indent=4)
sess = tf.InteractiveSession()
a = [1, 0, 0, 0]
b = [0, 1, 0, 0]
c = [0, 0, 1, 0]
d = [0, 0, 0, 1]
init=tf.global_variables_initializer()
with tf.variable_scope('one_cell') as scope:
hidden_size = 2
cell = tf.contrib.rnn.BasicRNNCell(num_units=hidden_size)
print(cell.output_size, cell.state_size)
x_data = np.array([[a]], dtype=np.float32)
pp.pprint(x_data)
outputs, _states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32)
sess.run(init)
pp.pprint(outputs.eval())
Error message is like that. Please solve this problem.
Attempting to use uninitialized value one_cell/rnn/basic_rnn_cell/weights
[[Node: one_cell/rnn/basic_rnn_cell/weights/read = Identity[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](one_cell/rnn/basic_rnn_cell/weights)]]
You haven't initialized some graph variables, as the error mentioned. Shift your code to this and it will work.
outputs, _states = tf.nn.dynamic_rnn(cell, x_data, dtype=tf.float32)
init=tf.global_variables_initializer()
sess.run(init)
Best practice is to have init right at the end of your graph and before sess.run.
EDIT: Refer to What does tf.global_variables_initializer() do under the hood? for more insights.
You define the operation init before creating your variables. Thus this operation will be performed only on the variables defined at that time, even if you run it after creating your variables.
So just move the definition of init and you will be fine.

Get value in placeholder array in TensorFlow

I have a error with while_loop, when I try to read a value in a placeholder.
The need package:
import tensorflow as tf
import numpy as np
# Start TensorFlow InteractiveSession
sess = tf.InteractiveSession()
# We define the input variable set to the problem
input1 = tf.placeholder(tf.float32, shape=[None,100], name='input1')
# Define the variable to traing
L = tf.Variable(np.identity(100, dtype='float32'), trainable=False)
i0 = tf.placeholder(tf.int32)
We use a function to definition of the body, or the lambda function to the condition. This is indiferent...
But I have a problem, because I want get the values of input1 at some position. Y try the used of eval, i0.eval() but this don't work.
def body(i0,L,input1):
N_valu = input1[:,i0] # Here is the error...
L = L+N_valu*tf.matmul(tf.transpose(L),L)
i0 = tf.add(i0,1)
return i0,L,input1
condition = lambda i0,L,input1: i0 < 100
sess.run(tf.initialize_all_variables())
i00,L0,input10 = tf.while_loop(condition, body, [i0,L,input1])
N1 = np.array(range(0,100)).reshape([1,100])
feed_dict={input1:N1, i0:0}
Lprueba = L0.eval(feed_dict=feed_dict)
print(i00.eval(feed_dict=feed_dict))
Some help?

TensorFlow: "Attempting to use uninitialized value" in variable initialization

I am trying to implement multivariate linear regression in Python using TensorFlow, but have run into some logical and implementation issues. My code throws the following error:
Attempting to use uninitialized value Variable
Caused by op u'Variable/read'
Ideally the weights output should be [2, 3]
def hypothesis_function(input_2d_matrix_trainingexamples,
output_matrix_of_trainingexamples,
initial_parameters_of_hypothesis_function,
learning_rate, num_steps):
# calculate num attributes and num examples
number_of_attributes = len(input_2d_matrix_trainingexamples[0])
number_of_trainingexamples = len(input_2d_matrix_trainingexamples)
#Graph inputs
x = []
for i in range(0, number_of_attributes, 1):
x.append(tf.placeholder("float"))
y_input = tf.placeholder("float")
# Create Model and Set Model weights
parameters = []
for i in range(0, number_of_attributes, 1):
parameters.append(
tf.Variable(initial_parameters_of_hypothesis_function[i]))
#Contruct linear model
y = tf.Variable(parameters[0], "float")
for i in range(1, number_of_attributes, 1):
y = tf.add(y, tf.multiply(x[i], parameters[i]))
# Minimize the mean squared errors
loss = tf.reduce_mean(tf.square(y - y_input))
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train = optimizer.minimize(loss)
#Initialize the variables
init = tf.initialize_all_variables()
# launch the graph
session = tf.Session()
session.run(init)
for step in range(1, num_steps + 1, 1):
for i in range(0, number_of_trainingexamples, 1):
feed = {}
for j in range(0, number_of_attributes, 1):
array = [input_2d_matrix_trainingexamples[i][j]]
feed[j] = array
array1 = [output_matrix_of_trainingexamples[i]]
feed[number_of_attributes] = array1
session.run(train, feed_dict=feed)
for i in range(0, number_of_attributes - 1, 1):
print (session.run(parameters[i]))
array = [[0.0, 1.0, 2.0], [0.0, 2.0, 3.0], [0.0, 4.0, 5.0]]
hypothesis_function(array, [8.0, 13.0, 23.0], [1.0, 1.0, 1.0], 0.01, 200)
Run this:
init = tf.global_variables_initializer()
sess.run(init)
Or (depending on the version of TF that you have):
init = tf.initialize_all_variables()
sess.run(init)
It's not 100% clear from the code example, but if the list initial_parameters_of_hypothesis_function is a list of tf.Variable objects, then the line session.run(init) will fail because TensorFlow isn't (yet) smart enough to figure out the dependencies in variable initialization. To work around this, you should change the loop that creates parameters to use initial_parameters_of_hypothesis_function[i].initialized_value(), which adds the necessary dependency:
parameters = []
for i in range(0, number_of_attributes, 1):
parameters.append(tf.Variable(
initial_parameters_of_hypothesis_function[i].initialized_value()))
There is another the error happening which related to the order when calling initializing global variables. I've had the sample of code has similar error FailedPreconditionError (see above for traceback): Attempting to use uninitialized value W
def linear(X, n_input, n_output, activation = None):
W = tf.Variable(tf.random_normal([n_input, n_output], stddev=0.1), name='W')
b = tf.Variable(tf.constant(0, dtype=tf.float32, shape=[n_output]), name='b')
if activation != None:
h = tf.nn.tanh(tf.add(tf.matmul(X, W),b), name='h')
else:
h = tf.add(tf.matmul(X, W),b, name='h')
return h
from tensorflow.python.framework import ops
ops.reset_default_graph()
g = tf.get_default_graph()
print([op.name for op in g.get_operations()])
with tf.Session() as sess:
# RUN INIT
sess.run(tf.global_variables_initializer())
# But W hasn't in the graph yet so not know to initialize
# EVAL then error
print(linear(np.array([[1.0,2.0,3.0]]).astype(np.float32), 3, 3).eval())
You should change to following
from tensorflow.python.framework import ops
ops.reset_default_graph()
g = tf.get_default_graph()
print([op.name for op in g.get_operations()])
with tf.Session() as
# NOT RUNNING BUT ASSIGN
l = linear(np.array([[1.0,2.0,3.0]]).astype(np.float32), 3, 3)
# RUN INIT
sess.run(tf.global_variables_initializer())
print([op.name for op in g.get_operations()])
# ONLY EVAL AFTER INIT
print(l.eval(session=sess))
Normally there are two ways of initializing variables, 1) using the sess.run(tf.global_variables_initializer()) as the previous answers noted; 2) the load the graph from checkpoint.
You can do like this:
sess = tf.Session(config=config)
saver = tf.train.Saver(max_to_keep=3)
try:
saver.restore(sess, tf.train.latest_checkpoint(FLAGS.model_dir))
# start from the latest checkpoint, the sess will be initialized
# by the variables in the latest checkpoint
except ValueError:
# train from scratch
init = tf.global_variables_initializer()
sess.run(init)
And the third method is to use the tf.train.Supervisor. The session will be
Create a session on 'master', recovering or initializing the model as needed, or wait for a session to be ready.
sv = tf.train.Supervisor([parameters])
sess = sv.prepare_or_wait_for_session()
I want to give my resolution, it work when i replace the line [session = tf.Session()] with [sess = tf.InteractiveSession()]. Hope this will be useful to others.
run both:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())

Tensorflow slicing based on variable

I've found that indexing still is an open issue in tensorflow (#206), so I'm wondering what I could use as a workaround at the moment. I want to index/slice a row/column of a matrix based on a variable that changes for every training example.
What I've tried so far:
Slicing based on placeholder (doesn't work)
The following (working) code slices based on a fixed number.
import tensorflow as tf
import numpy as np
x = tf.placeholder("float")
y = tf.slice(x,[0],[1])
#initialize
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
#run
result = sess.run(y, feed_dict={x:[1,2,3,4,5]})
print(result)
However, it seems that I can't simply replace one of these fixed numbers with a tf.placeholder. The following code gives me the error "TypeError: List of Tensors when single Tensor expected."
import tensorflow as tf
import numpy as np
x = tf.placeholder("float")
i = tf.placeholder("int32")
y = tf.slice(x,[i],[1])
#initialize
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
#run
result = sess.run(y, feed_dict={x:[1,2,3,4,5],i:0})
print(result)
This sounds like the brackets around [i] are too much, but removing them doesn't help either. How to use a placeholder/variable as index?
Slicing based on python variable (doesn't backprop/update properly)
I've also tried using a normal python variable as index. This does not lead to an error, but the network doesn't learn anything while training. I suppose because the changing variable is not properly registered, the graph is malformed and updates don't work?
Slicing via one-hot vector + multiplication (works, but is slow)
One workaround I found is using a one-hot vector. Making a one-hot vector in numpy, passing this using a placeholder, then doing the slicing via matrix multiplication. This works, but is quite slow.
Any ideas how to efficiently slice/index based on a variable?
Slicing based on a placeholder should work just fine. It looks like you are running into a type error, due to some subtle issues of shapes and types. Where you have the following:
x = tf.placeholder("float")
i = tf.placeholder("int32")
y = tf.slice(x,[i],[1])
...you should instead have:
x = tf.placeholder("float")
i = tf.placeholder("int32")
y = tf.slice(x,i,[1])
...and then you should feed i as [0] in the call to sess.run().
To make this a little clearer, I would recommend rewriting the code as follows:
import tensorflow as tf
import numpy as np
x = tf.placeholder(tf.float32, shape=[None]) # 1-D tensor
i = tf.placeholder(tf.int32, shape=[1])
y = tf.slice(x, i, [1])
#initialize
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
#run
result = sess.run(y, feed_dict={x: [1, 2, 3, 4, 5], i: [0]})
print(result)
The additional shape arguments to the tf.placeholder op help to ensure that the values you feed have the appropriate shapes, and also that TensorFlow will raise an error if the shapes are not correct.
If you have an extra dimension, this works.
import tensorflow as tf
import numpy as np
def reorder0(e, i, length):
'''
e: a two dimensional tensor
i: a one dimensional int32 tensor, of shape (e.shape[0])
returns: a tensor of the same shape as e, where the jth entry is entry i[j] from e
'''
return tf.concat(
[ tf.expand_dims( e[i[j],:], axis=0) for j in range(length) ],
axis=0
)
e = tf.placeholder(tf.float32, shape=(2,3,5), name='e' ) # sentences, words, embedding
i = tf.placeholder(tf.int32, shape=(2,3), name='i' ) # for each word, index of parent
p = tf.concat(
[ tf.expand_dims(reorder0(e[k,:,:], i[k,:], 3), axis=0) for k in range(2) ],
axis=0,
name='p'
)
#initialize
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
#run
result = sess.run(p, feed_dict={
e: [
( (1.0,1.1,1.2,1.3,1.4),(2.0,2.1,2.2,2.3,2.4),(3.0,3.1,3.2,3.3,3.4) ),
( (21.0,21.1,21.2,21.3,21.4),(22.0,22.1,22.2,22.3,22.4),(23.0,23.1,23.2,23.3,23.4) ),
],
i: [ (1,1,1), (2,0,2)]
})
print(result)
If the sizes are not known when building the model, use TensorArray.
e = tf.placeholder(tf.float32, shape=(3,5) ) # words, embedding
i = tf.placeholder(tf.int32, shape=(3) ) # for each word, index of parent
#p = reorder0(e, i, 3)
a = tf.TensorArray(
tf.float32,
size=e.get_shape()[0],
dynamic_size=True,
infer_shape= True,
element_shape=e.get_shape()[1],
clear_after_read = False
)
#initialize
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
#run
result = sess.run(
a.unstack(e).gather(i),
feed_dict={
e: ( (1.0,1.1,1.2,1.3,1.4),(2.0,2.1,2.2,2.3,2.4),(3.0,3.1,3.2,3.3,3.4) ),
#( (21.0,21.1,21.2,21.3,21.4),(22.0,22.1,22.2,22.3,22.4),(23.0,23.1,23.2,23.3,23.4) ),
i: (2,0,2)
}
)
print(result)

Categories

Resources