tensor flow input variable error - python

I am creating a tensor flow code and get an error when I try to run with variables.
The base code is
import tensor flow as tf
import numpy as np
graph = tf.Graph()
with graph.as_default():
with tf.name_scope("variables"):
# keep track of how many times the model has been run
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="global_step")
# keep track of sum of all outputs over time
total_output = tf.Variable(0, dtype=tf.float32, trainable=False, name="total_output")
with tf.name_scope("transformation"):
# separate input layer
with tf.name_scope("input"):
# create input placeholder which takes in a vector
a = tf.placeholder(tf.float32, shape=[None], name = "input_placeholder_A")
#separate the middle layer
with tf.name_scope("middle"):
b = tf.reduce_prod(a, name = "product_b")
c = tf.reduce_sum(a, name = "sum_c")
# separate the output layer
with tf.name_scope("output"):
output = tf.add(b,c, name="output")
# separate the update layer and store the variables
with tf.name_scope("update"):
update_total = total_output.assign(output)
increment_step = global_step.assign_add(1)
# now create namescope summaries and store these in the summary
with tf.name_scope("summaries"):
avg = tf.divide(update_total, tf.cast(increment_step, tf.float32), name = "average")
# create summary for output node
tf.summary.scalar("output_summary", output)
tf.summary.scalar("total_summary",update_total)
tf.summary.scalar("average_summary",avg)
with tf.name_scope("global_ops"):
init = tf.initialize_all_variables()
merged_summaries = tf.summary.merge_all()
sess = tf.Session(graph=graph)
writer = tf.summary.FileWriter('./improved_graph', graph)
sess.run(init)
def run_graph(input_tensor):
feed_dict = {a: input_tensor}
_, step, summary = sess.run([output, increment_step, merged_summaries],feed_dict=feed_dict)
writer.add_summary(summary, global_step=step)
when I try to run the above code
run_graph([2,8])
I get the error
InvalidArgumentError Traceback (most recent call
last) InvalidArgumentError (see above for traceback): You must feed a
value for placeholder tensor
'transformation_2/input/input_placeholder_A' with dtype float and
shape [?][[Node: transformation_2/input/input_placeholder_A =
Placeholderdtype=DT_FLOAT, shape=[?],
_device="/job:localhost/replica:0/task:0/device:CPU:0"]]
I do not understand what I am doing wrong in this since the code is all corrected for the version of tensor flow installed.

Your placeholder a is defined as being of type float32 but [5, 8] contain int values.
run_graph([2., 8.]) or run_graph(np.array([5, 8], dtype=np.float32)) should work.

Related

How to assign new values to a tensorflow constant?

I am loading a TensorFlow model from a .pb file. I want to change the weights of all the layers. I am able to extract the weights but I am not able to change the weights.
I converted the graph_def model to TensorFlow model but even then I cannot assign a new value to the weights as the weights are stored in a tensor of type "Const".
b = graph_tf.get_tensor_by_name("Variable_1:0")
tf.assign(b, np.ones((1,1,64,64)))
I am getting the following error:
AttributeError: 'Tensor' object has no attribute 'assign'
Please provide a way to resolve this issue. Thanks in advance.
Here is one way you can achieve something like that. You want to replace some constant operations with variables initialized to the value of those operations, so you can first extract those constant values, and then create the graph with the variables initalized to those. See the example below.
import tensorflow as tf
# Example graph
with tf.Graph().as_default():
inp = tf.placeholder(tf.float32, [None, 3], name='Input')
w = tf.constant([[1.], [2.], [3.]], tf.float32, name='W')
out = tf.squeeze(inp # w, 1, name='Output')
gd = tf.get_default_graph().as_graph_def()
# Extract weight values
with tf.Graph().as_default():
w, = tf.graph_util.import_graph_def(gd, return_elements=['W:0'])
# Get the constant weight values
with tf.Session() as sess:
w_val = sess.run(w)
# Alternatively, since it is a constant,
# you can get the values from the operation attribute directly
w_val = tf.make_ndarray(w.op.get_attr('value'))
# Make new graph
with tf.Graph().as_default():
# Make variables initialized with stored values
w = tf.Variable(w_val, name='W')
init_op = tf.global_variables_initializer()
# Import graph
inp, out = tf.graph_util.import_graph_def(
gd, input_map={'W:0': w},
return_elements=['Input:0', 'Output:0'])
# Change value operation
w_upd = w[2].assign([5.])
# Test
with tf.Session() as sess:
sess.run(init_op)
print(sess.run(w))
# [[1.]
# [2.]
# [3.]]
sess.run(w_upd)
print(sess.run(w))
# [[1.]
# [2.]
# [5.]]

TensorFlow Error: No gradients provided for any variable, check your graph for ops that do not support gradients

Trying to use Derived class of Tensorflow FIFOQueue. I override the enqueue function. It takes in the images and enqueues the output from the final dense layer, in the queue.
Now I dequeue the output tensor and try to calculate Cost function and minimize it using Adam Optimiser.
On calculating cost and minimizing it inside the enqueue function itself, my code works fine. But as soon as I shift the loss_op (i.e my cost) outside the Derived class, I get the error: "No gradients provided for any variable, check your graph for ops that do not support gradients"
Import
from tensorflow.python.ops.data_flow_ops import FIFOQueue
import tensorflow as tf
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_data_flow_ops
Read the data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
Y = mnist.train.labels
X = mnist.train.images
Derived Queue
class MyQueue(FIFOQueue):
def enqueue(self, x,Y,name=None):
#Reshape
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# 1st conv_2d layer
conv1_mp = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu,name = 'Q1_c1')
# 1st max pool layer
conv1 = tf.layers.max_pooling2d(conv1_mp, 2, 2,name='Q1_mp1')
# 2nd conv_2d layer
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu,name = 'Q1_c2')
# 2nd max pool layer
conv2_mp = tf.layers.max_pooling2d(conv2, 2, 2,name='Q1_mp2')
#Flatten
flat = tf.contrib.layers.flatten(conv2_mp)
#Dense 1
dense_1 = tf.layers.dense(tf.reshape(flat,[-1,1600]), 1024,name = 'Q2_D1' )
#Dropout = 0.8
drop = tf.layers.dropout(dense_1, rate=0.8, training=True,name='Q2_Dp')
#Output class = 10
out = tf.layers.dense(drop, n_classes,name = 'Q2_Op')
#update vals to put "out" in the queue
vals = out
# Rest of the enqueue operation which has not been changed
with ops.name_scope(name, "%s_enqueue" % self._name,
self._scope_vals(vals)) as scope:
vals = self._check_enqueue_dtypes(vals)
# NOTE(mrry): Not using a shape function because
# we need access to the `QueueBase` object.
for val, shape in zip(vals, self._shapes):
val.get_shape().assert_is_compatible_with(shape)
if self._queue_ref.dtype == _dtypes.resource:
return gen_data_flow_ops.queue_enqueue_v2(
self._queue_ref, vals, name=scope)
else:
return gen_data_flow_ops.queue_enqueue(
self._queue_ref, vals, name=scope)
Main
q_pred = MyQueue( capacity=1, dtypes=tf.float32 )
enqueue_op = q_pred.enqueue(X,Y)
data_pred = q_pred.dequeue()
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
sess.run(enqueue_op)
out = data_pred
#Calculating Cost
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=out, labels=Y),name = 'Q2_loss')
# Adam optimiser
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
#Write in the graph
writer = tf.summary.FileWriter("logs\MyDerivedQueue", sess.graph)
####### ERROR LINE ###################
# Minimising the cost.
train_op = optimizer.minimize(cost)
correct_pred = tf.equal(tf.argmax(out, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
Using multiple hit and trial methods. I came to the conclusion that this won't work as backpropagation isn't in our control. While using multi GPU, every GPU will give it's feedforward and now while back propagating, we won't get to know which weights/parameters should be updated.

Pretrained Tensorflow Model invalid argument error

I'm doing my project using tensorflow with pre-trained mobilenet_v2 model which can be found on https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
I wanted to get hidden layer values so I implemented this source code and I got an invalidargumenterror
if __name__ == '__main__':
im = Image.open('./sample/maltiz.png')
im3 = im.resize((300, 300))
image = np.asarray(im)[:,:,:3]
model_path = 'models/ssd_mobilenet_v2_coco_2018_03_29/'
meta_path = os.path.join(model_path, 'model.ckpt.meta')
model = tf.train.import_meta_graph(meta_path)
sess = tf.Session()
model.restore(sess, tf.train.latest_checkpoint(model_path))
data = np.array([image])
data = data.astype(np.uint8)
X = tf.placeholder(tf.uint8, shape=[None, None, None, 3])
graph = tf.get_default_graph()
for i in graph.get_operations():
if "Relu" in i.name:
print(sess.run(i.values(), feed_dict = { X : data}))
I got this error message
File "load_model.py", line 42, in <module>
print(sess.run(i.values(), feed_dict = { X : data}))
InvalidArgumentError: You must feed a value for placeholder tensor 'image_tensor' with dtype uint8 and shape [?,?,?,3]
[[node image_tensor (defined at load_model.py:24) ]]
I printed out the placeholder and the shape of data.
placeholder was uint8 typed [?,?,?,3]
and image had a shape with [1,300,300,3]
I don't know what is the problem.
It looks like just perfect matching with the type on the error message.
Please let me know what is the problem.
When you load the predefined graph and restore the graph to the latest checkpoint, the graph is already defined. But when you do
X = tf.placeholder(tf.uint8, shape=[None, None, None, 3])
You are creating an extra node in the graph. and this node has nothing to do with the nodes you want to evaluate, nodes from graph.get_operations() don't depend on this extra node but some other node, and since this other node does not get fed with values, the error says invalid arguments.
The correct way is to get the tensor that the nodes to be evaluated depend upon from the predefined graph.
im = Image.open('./sample/maltiz.png')
im3 = im.resize((300, 300))
image = np.asarray(im)[:,:,:3]
model_path = 'models/ssd_mobilenet_v2_coco_2018_03_29/'
meta_path = os.path.join(model_path, 'model.ckpt.meta')
model = tf.train.import_meta_graph(meta_path)
sess = tf.Session()
model.restore(sess, tf.train.latest_checkpoint(model_path))
data = np.array([image])
data = data.astype(np.uint8)
graph = tf.get_default_graph()
X = graph.get_tensor_by_name('image_tensor:0')
for i in graph.get_operations():
if "Relu" in i.name:
print(sess.run(i.values(), feed_dict = { X : data}))
PS: I did try the above approach myself but there is some tensorflow (version 1.13.1) internal bug which stops me from evaluating all the nodes that have Relu in their names. But still some nodes can be evaluated this way.

Input to tensorflow in_top_k should be rank 1 or rank 2?

I try to experiment with in_top_k function to see what exactly this function is doing. But I found some really confusing behavior.
First I coded as follows
import numpy as np
import tensorflow as tf
target = tf.constant(np.random.randint(2, size=30).reshape(30,-1), dtype=tf.int32, name="target")
pred = tf.constant(np.random.rand(30,1), dtype=tf.float32, name="pred")
result = tf.nn.in_top_k(pred, target, 1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
targetVal = target.eval()
predVal = pred.eval()
resultVal = result.eval()
Then it generates the following error:
ValueError: Shape must be rank 1 but is rank 2 for 'in_top_k/InTopKV2' (op: 'InTopKV2') with input shapes: [30,1], [30,1], [].
Then I changed my code to
import numpy as np
import tensorflow as tf
target = tf.constant(np.random.randint(2, size=30), dtype=tf.int32, name="target")
pred = tf.constant(np.random.rand(30,1).reshape(-1), dtype=tf.float32, name="pred")
result = tf.nn.in_top_k(pred, target, 1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
targetVal = target.eval()
predVal = pred.eval()
resultVal = result.eval()
But now the error becomes
ValueError: Shape must be rank 2 but is rank 1 for 'in_top_k/InTopKV2' (op: 'InTopKV2') with input shapes: [30], [30], [].
So should the input be rank 1 or rank 2?
For in_top_k, the targets need to be rank 1 (class indices) and the predictions rank 2 (scores for each class). This can be seen from the docs easily.
This means that the two error messages actually complain about different inputs each time (targets the first time and predictions the second time), which funnily enough isn't mentioned in the messages at all... Either way, the following snippet should be more like it:
import numpy as np
import tensorflow as tf
target = tf.constant(np.random.randint(2, size=30), dtype=tf.int32, name="target")
pred = tf.constant(np.random.rand(30,1), dtype=tf.float32, name="pred")
result = tf.nn.in_top_k(pred, target, 1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
targetVal = target.eval()
predVal = pred.eval()
resultVal = result.eval()
Here, we basically combine the "best of both snippets": Predictions from the first one and targets from the second one. However, the way I understand the docs, even for binary classification we need two values for the predictions, one for each class. So something like
import numpy as np
import tensorflow as tf
target = tf.constant(np.random.randint(2, size=30), dtype=tf.int32, name="target")
pred = tf.constant(np.random.rand(30,1), dtype=tf.float32, name="pred")
pred = tf.concat((1-pred, pred), axis=1)
result = tf.nn.in_top_k(pred, target, 1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
targetVal = target.eval()
predVal = pred.eval()
resultVal = result.eval()

Tensorflow cost function placeholder error

I have the following code trying to optimize for a linear model with two inputs and three parameters (m_1, m_2 and b). Initially, I had issues with importing the data in a way such that the feed_dict would accept them, which I solved by putting it in a numpy array instead.
Now the optimizer function will run smoothly (and the outputs look roughly like it is optimizing the parameters), but as soon as I try to return the cost with the line at the end:
cost_val = sess.run(cost)
It returns the following error:
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_2' with dtype float and shape [?,1]
[[Node: Placeholder_2 = Placeholder[dtype=DT_FLOAT, shape=[?,1], _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
If I comment out that line alone, everything runs smoothly.
I tried changing the cost function from the more complicated one I was using to something simpler, but the error persists. I know this is probably related to the data input shape(?), but can't figure how the data would work for the optimizer but not the cost function.
# reading in data
filename = tf.train.string_input_producer(["file.csv"])
reader = tf.TextLineReader(skip_header_lines=1)
key, value = reader.read(filename)
rec_def = [[1], [1], [1]]
input_1, input_2, col3 = tf.decode_csv(value, record_defaults=rec_def)
# parameters
learning_rate = 0.001
training_steps = 300
x = tf.placeholder(tf.float32, [None,1])
x2 = tf.placeholder(tf.float32, [None,1])
m = tf.Variable(tf.zeros([1,1]))
m2 = tf.Variable(tf.zeros([1,1]))
b = tf.Variable(tf.zeros([1]))
y_ = tf.placeholder(tf.float32, [None,1])
y = tf.matmul(x,m) + tf.matmul(x2,m2) + b
# cost function
# cost = tf.reduce_mean(tf.log(1+tf.exp(-y_*y)))
cost = tf.reduce_sum(tf.pow((y_-y),2))
# Gradient descent optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# initializing variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(init)
for i in range(training_steps):
xs = np.array([[sess.run(input_1)]])
ys = np.array([[sess.run(input_2)]])
label = np.array([[sess.run(col3)]])
feed = {x:xs, x2:ys, y_:label}
sess.run(optimizer, feed_dict=feed)
cost_val = sess.run(cost)
coord.request_stop()
coord.join(threads)
The cost tensor is a function of the placeholder tensors and this requires them to have a value. Since the call to sess.run(cost) isn't feeding those placeholders, you're seeing the error. (Putting it another way - what values of x and y_ do you want to compute the cost for?)
So you want to change the line:
cost_val = sess.run(cost)
to:
cost_val = sess.run(cost, feed_dict=feed)
Hope that helps.

Categories

Resources