How Gradient passed by tf.py_func - python

This is Faster R-CNN implement in tensorflow.
The proposal_layer is implement by python
i am curious about if gradient can pass by tf.py_func
the weights and biases are keep changing
so I think the gradient deliver back successful
Then I do a small test
import tensorflow as tf
import numpy as np
def addone(x):
# print type(x)
return x + 1
def pyfunc_test():
# create data
x_data = tf.placeholder(dtype=tf.float32, shape=[None])
y_data = tf.placeholder(dtype=tf.float32, shape=[None])
w = tf.Variable(tf.constant([0.5]))
b = tf.Variable(tf.zeros([1]))
y1 = tf.mul(w, x_data, name='y1')
y2 = tf.py_func(addone, [y1], tf.float32)
y = tf.add(y2, b)
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in xrange(201):
ran = np.random.rand(115).astype(np.float32)
ans = ran * 1.5 + 3
dic = {x_data: ran, y_data: ans}
tt, yy, yy1= sess.run([train, y1, y2], feed_dict=dic)
if step % 20 == 0:
print 'step {}'.format(step)
print '{}, {}'.format(w.eval(), b.eval())
test = sess.run(y, feed_dict={x_data:[1]})
print 'test = {}'.format(test)
if __name__ == '__main__':
pyfunc_test()
Variable b keep changing, but w keep the value after initialize and never change
sess.run(tf.gradients(loss, b), feed_dict=dic) get value
sess.run(tf.gradients(loss, w), feed_dict=dic) get {TypeError}Fetch argument None has invalid type <type 'NoneType'>
I know some questions suggest use tf.RegisterGradient and gradient_override_map
but I can't find these in the faster rcnn repo(link on top of post)
am I do something wrong or missing something so that w is freeze

Gradient of py_func is None (just check ops.get_gradient_function(y2.op)). There's this gist by #harpone which shows how to use gradient override map for py_func.
Here's your example modified to use that recipe
import numpy as np
import tensorflow as tf
def addone(x):
# print(type(x)
return x + 1
def addone_grad(op, grad):
x = op.inputs[0]
return x
from tensorflow.python.framework import ops
import numpy as np
# Define custom py_func which takes also a grad op as argument:
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
# Need to generate a unique name to avoid duplicates:
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad) # see _MySquareGrad for grad example
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
def pyfunc_test():
# create data
x_data = tf.placeholder(dtype=tf.float32, shape=[None])
y_data = tf.placeholder(dtype=tf.float32, shape=[None])
w = tf.Variable(tf.constant([0.5]))
b = tf.Variable(tf.zeros([1]))
y1 = tf.mul(w, x_data, name='y1')
y2 = py_func(addone, [y1], [tf.float32], grad=addone_grad)[0]
y = tf.add(y2, b)
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
print("Pyfunc grad", ops.get_gradient_function(y2.op))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(10):
# ran = np.random.rand(115).astype(np.float32)
ran = np.ones((115)).astype(np.float32)
ans = ran * 1.5 + 3
dic = {x_data: ran, y_data: ans}
tt, yy, yy1= sess.run([train, y1, y2], feed_dict=dic)
if step % 1 == 0:
print('step {}'.format(step))
print('{}, {}'.format(w.eval(), b.eval()))
test = sess.run(y, feed_dict={x_data:[1]})
print('test = {}'.format(test))
if __name__ == '__main__':
pyfunc_test()

Related

TensorFlow Linear Regression

I am trying to use Tensorflow to calculate the linear regression of some data.
I do not understand why cannot predict a decent line.
Below the result I am getting:
This is my code, I have tried to change different parameters but nothing to do.
Any suggestion is welcome.
# Prepare the data
x = df["Attainment8_float"]
y = df["Progress8_float"]
# Check the data
plt.scatter(x, y)
plt.show()
# TensorFlow Model
# Config
num_epochs = 1000
learning_rate = 0.0001
# /Config
# Creating the graph
ops.reset_default_graph()
tf.disable_v2_behavior()
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')
a = tf.get_variable('a', initializer=0.)
b = tf.get_variable('b', initializer=0.)
h = a * X + b
cost = tf.reduce_mean( (h - Y)**2 )
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate
).minimize(cost)
init = tf.global_variables_initializer()
# Running the Model
found_a = 0
found_b = 0
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epochs):
_, costValue = sess.run(
[optimizer, cost],
feed_dict={
X: x,
Y: y,
}
)
found_a = a.eval()
found_b = b.eval()
if epoch % (num_epochs/10) == 0: # Every 10 percent
print("... epoch: " + str(epoch))
print(f"cost[{str(costValue)}] / a[{str(a.eval())}] / b[{str(b.eval())}]")
# Seing the obtained values in a plot
xrange = np.linspace(x.min(), x.max(), 2)
# Plot points
plt.plot(x, y, 'ro')
# Plot resulting function
plt.plot(xrange, xrange * found_a + found_b, 'b')
plt.show()
When I run it with
a = tf.get_variable('a', initializer= 0.05)
b = tf.get_variable('b', initializer=-2.0)
I get
However, I did some data preprocessing. I removed entries with "." as you did as far as I can see. Furthermore I removed entries with "x", so code looks like:
df = df[df.Attainment8 != "."]
df = df[df.Progress8 != "."]
df = df[df.Attainment8 != "x"]
df = df[df.Progress8 != "x"]
#convert object in float
df["Attainment8_float"] = df["Attainment8"].astype(float)
df["Progress8_float"]= df["Progress8"].astype(float)
When I additionally use (together with initializer set to 0.05 and -2.0)
num_epochs = 2000
learning_rate = 0.000001
I get

tf.get_variable initializer Tensorflow

How important is to choose the right value for the initializer in tensorflow?
With this code:
a = tf.get_variable('a', initializer=0.1)
b = tf.get_variable('b', initializer=-3.0)
with:
a = tf.get_variable('a', initializer=0.1)
b = tf.get_variable('b', initializer=0.0)
Why in the second example tensorflow doesn`t manage to fit the data properly? There is anything that can be done changing number_epochs or learning_rate?
This is my code:
# TensorFlow Model
# Config
num_epochs = 2000
learning_rate = 0.0001
# /Config
# Creating the graph
ops.reset_default_graph()
tf.disable_v2_behavior()
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')
a = tf.get_variable('a', initializer=0.1)
b = tf.get_variable('b', initializer=-3.0)
h = a * X + b
cost = tf.reduce_mean( (h - Y)**2 )
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate
).minimize(cost)
init = tf.global_variables_initializer()
# Running the Model
found_a = 0
found_b = 0
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epochs):
_, costValue = sess.run(
[optimizer, cost],
feed_dict={
X: x,
Y: y,
}
)
found_a = a.eval()
found_b = b.eval()
if epoch % (num_epochs/10) == 0: # Every 10 percent
print("... epoch: " + str(epoch))
print(f"cost[{str(costValue)}] / a[{str(a.eval())}] / b[{str(b.eval())}]")
# Seing the obtained values in a plot
xrange = np.linspace(x.min(), x.max(), 2)
# Plot points
plt.plot(x, y, 'ro')
# Plot resulting function
plt.plot(xrange, xrange * found_a + found_b, 'b')
plt.show()

Tensorflow's placeholder initialization differs from tensorflow's constant initialization. Why?

I have written 2 functions that initialize tensorflow's variables in different ways. I don't know why the results are different. Here is the first function using placeholder for initialization:
First function
import tensorflow as tf
import numpy as np
def linear_function():
np.random.seed(1)
X = tf.placeholder(dtype = tf.float64, name='X')
W = tf.placeholder(dtype = tf.float64, name='W')
b = tf.placeholder(dtype = tf.float64, name='b')
Y = tf.add(tf.matmul(W, X), b)
sess = tf.Session()
result = sess.run(Y, feed_dict={W:np.random.randn(4,3), X:np.random.randn(3,1), b:np.random.randn(4,1)})
sess.close()
return result
print( "result = " + str(linear_function()))
And the result is:
result = [[-1.98748544]
[-2.76826248]
[-0.78635415]
[-2.77463846]]
Second function
Second function uses tf.constant to initialize variables:
def linear_function():
np.random.seed(1)
X = tf.constant(np.random.randn(3,1), name ="X")
W = tf.constant(np.random.randn(4,3), name ="X")
b = tf.constant(np.random.randn(4,1), name ="X")
Y = tf.add(tf.matmul(W,X), b)
sess = tf.Session()
result = sess.run(Y)
sess.close()
return result
print( "result = " + str(linear_function()))
Result:
result = [[-2.15657382]
[ 2.95891446]
[-1.08926781]
[-0.84538042]]
What is the problem? Is it related to np.random.seed(1) ?
Thanks.
In the first snippet, the feed_dict is:
{W:np.random.randn(4,3), X:np.random.randn(3,1), b:np.random.randn(4,1)}
So first a random value for W is produced, then for X and then for b. However, in the second snippet the random values are given in the order X, W and b. Since the order in which the random numbers are generated is not the same, the values differ. If for example you change the order adequately in the feed_dict in the first snippet you will get the same result as the second one:
import tensorflow as tf
import numpy as np
def linear_function():
np.random.seed(1)
X = tf.placeholder(dtype = tf.float64, name='X')
W = tf.placeholder(dtype = tf.float64, name='W')
b = tf.placeholder(dtype = tf.float64, name='b')
Y = tf.add(tf.matmul(W, X), b)
sess = tf.Session()
result = sess.run(Y, feed_dict={X:np.random.randn(3,1), W:np.random.randn(4,3), b:np.random.randn(4,1)})
sess.close()
return result
print( "result = " + str(linear_function()))
Output:
result = [[-2.15657382]
[ 2.95891446]
[-1.08926781]
[-0.84538042]]

it is hard to using tf.train.batch for np.array data

import tensorflow as tf
import numpy as np
xy = np.loadtxt('data-01-test-score.csv', delimiter = ',', dtype = np.float32)
# numpy array data is made for tensor
x_imp_np = xy[:,:-1]
y_imp_np = xy[:,[-1]]
x_imp_ten = tf.constant(x_imp_np)
y_imp_ten = tf.constant(y_imp_np)
# make batches for data
x_batch, y_batch = tf.train.batch([x_imp_ten, y_imp_ten], batch_size = 10)
x = tf.placeholder(tf.float32, shape = [None,3])
y = tf.placeholder(tf.float32, shape = [None,1])
w = tf.Variable(tf.random_normal([3,1]), name = 'weight')
b = tf.Variable(tf.random_normal([1]), name = 'bias')
hypothesis = tf.matmul(x,w) + b
cost = tf.reduce_mean(tf.square(hypothesis - y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
x_data, y_data = sess.run([x_imp_ten,y_imp_ten])
for step in range(2001):
x_batch_tr, y_batch_tr = sess.run([x_batch,y_batch])
_, cost_val , hypothesis_val = sess.run([train, cost,hypothesis], feed_dict= {x: x_data, y: y_data})
if step % 10 == 0:
print(step, cost_val)
above code is just simple linear regression problem which is from sung kim's lecture. i have a problem about tf.train.batch. when queue was used, it operates well. however if i didn't uses the queue it doesn.t working. is there any method for not using queue data loading?
in here, it takes so much lots of time that it almostly useless..
i just want to use tf.train.batch for that numpy array by using just simple array slicing.

TensorFlow: parameters do not update when training

I'm implementing a classification model using TensorFlow
The problem that I'm facing is that my weights and error are not being updated when I run the training step. As a result, my network keeps returning the same results.
I've developed my model based on the MNIST example from the TensorFlow website.
import numpy as np
import tensorflow as tf
sess = tf.InteractiveSession()
#load dataset
dataset = np.loadtxt('char8k.txt', dtype='float', comments='#', delimiter=",")
Y = np.asmatrix( dataset[:,0] )
X = np.asmatrix( dataset[:,1:1201] )
m = 11527
labels = 26
# y is update to 11527x26
Yt = np.zeros((m,labels))
for i in range(0,m):
index = Y[0,i] - 1
Yt[i,index]= 1
Y = Yt
Y = np.asmatrix(Y)
#------------------------------------------------------------------------------
#graph settings
x = tf.placeholder(tf.float32, shape=[None, 1200])
y_ = tf.placeholder(tf.float32, shape=[None, 26])
Wtest = tf.Variable(tf.truncated_normal([1200,26], stddev=0.001))
W = tf.Variable(tf.truncated_normal([1200,26], stddev=0.001))
b = tf.Variable(tf.zeros([26]))
sess.run(tf.initialize_all_variables())
y = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
Wtest = W
for i in range(10):
print("iteracao:")
print(i)
Xbatch = X[np.random.randint(X.shape[0],size=100),:]
Ybatch = Y[np.random.randint(Y.shape[0],size=100),:]
train_step.run(feed_dict={x: Xbatch, y_: Ybatch})
print("atualizacao de pesos")
print(Wtest==W)#monitora atualizaƧao dos pesos
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("precisao:Y")
print accuracy.eval(feed_dict={x: X, y_: Y})
print(" ")
print(" ")
The issue probably arises from how you initialize the weight matrix, W. If it is initialized to all zeroes, all of the neurons will follow the same gradient in each step, which leads to the network not training. Replacing the line
W = tf.Variable(tf.zeros([1200,26]))
...with something like
W = tf.Variable(tf.truncated_normal([1200,26], stddev=0.001))
...should cause it to start training.
This question on the CrossValidated site has a good explanation of why you should not initialize all of your weights to zero.

Categories

Resources