How important is to choose the right value for the initializer in tensorflow?
With this code:
a = tf.get_variable('a', initializer=0.1)
b = tf.get_variable('b', initializer=-3.0)
with:
a = tf.get_variable('a', initializer=0.1)
b = tf.get_variable('b', initializer=0.0)
Why in the second example tensorflow doesn`t manage to fit the data properly? There is anything that can be done changing number_epochs or learning_rate?
This is my code:
# TensorFlow Model
# Config
num_epochs = 2000
learning_rate = 0.0001
# /Config
# Creating the graph
ops.reset_default_graph()
tf.disable_v2_behavior()
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')
a = tf.get_variable('a', initializer=0.1)
b = tf.get_variable('b', initializer=-3.0)
h = a * X + b
cost = tf.reduce_mean( (h - Y)**2 )
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate
).minimize(cost)
init = tf.global_variables_initializer()
# Running the Model
found_a = 0
found_b = 0
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epochs):
_, costValue = sess.run(
[optimizer, cost],
feed_dict={
X: x,
Y: y,
}
)
found_a = a.eval()
found_b = b.eval()
if epoch % (num_epochs/10) == 0: # Every 10 percent
print("... epoch: " + str(epoch))
print(f"cost[{str(costValue)}] / a[{str(a.eval())}] / b[{str(b.eval())}]")
# Seing the obtained values in a plot
xrange = np.linspace(x.min(), x.max(), 2)
# Plot points
plt.plot(x, y, 'ro')
# Plot resulting function
plt.plot(xrange, xrange * found_a + found_b, 'b')
plt.show()
Related
I am trying to use Tensorflow to calculate the linear regression of some data.
I do not understand why cannot predict a decent line.
Below the result I am getting:
This is my code, I have tried to change different parameters but nothing to do.
Any suggestion is welcome.
# Prepare the data
x = df["Attainment8_float"]
y = df["Progress8_float"]
# Check the data
plt.scatter(x, y)
plt.show()
# TensorFlow Model
# Config
num_epochs = 1000
learning_rate = 0.0001
# /Config
# Creating the graph
ops.reset_default_graph()
tf.disable_v2_behavior()
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')
a = tf.get_variable('a', initializer=0.)
b = tf.get_variable('b', initializer=0.)
h = a * X + b
cost = tf.reduce_mean( (h - Y)**2 )
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate
).minimize(cost)
init = tf.global_variables_initializer()
# Running the Model
found_a = 0
found_b = 0
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epochs):
_, costValue = sess.run(
[optimizer, cost],
feed_dict={
X: x,
Y: y,
}
)
found_a = a.eval()
found_b = b.eval()
if epoch % (num_epochs/10) == 0: # Every 10 percent
print("... epoch: " + str(epoch))
print(f"cost[{str(costValue)}] / a[{str(a.eval())}] / b[{str(b.eval())}]")
# Seing the obtained values in a plot
xrange = np.linspace(x.min(), x.max(), 2)
# Plot points
plt.plot(x, y, 'ro')
# Plot resulting function
plt.plot(xrange, xrange * found_a + found_b, 'b')
plt.show()
When I run it with
a = tf.get_variable('a', initializer= 0.05)
b = tf.get_variable('b', initializer=-2.0)
I get
However, I did some data preprocessing. I removed entries with "." as you did as far as I can see. Furthermore I removed entries with "x", so code looks like:
df = df[df.Attainment8 != "."]
df = df[df.Progress8 != "."]
df = df[df.Attainment8 != "x"]
df = df[df.Progress8 != "x"]
#convert object in float
df["Attainment8_float"] = df["Attainment8"].astype(float)
df["Progress8_float"]= df["Progress8"].astype(float)
When I additionally use (together with initializer set to 0.05 and -2.0)
num_epochs = 2000
learning_rate = 0.000001
I get
I have written the script that demonstrates the linear regression algorithm as follows:
training_epochs = 100
learning_rate = 0.01
# the training set
x_train = np.linspace(0, 10, 100)
y_train = x_train + np.random.normal(0,1,100)
# set up placeholders for input and output
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
# set up variables for weights
w0 = tf.Variable(0.0, name="w0")
w1 = tf.Variable(0.0, name="w1")
y_predicted = X*w1 + w0
# Define the cost function
costF = 0.5*tf.square(Y-y_predicted)
# Define the operation that will be called on each iteration
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(costF)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# Loop through the data training
for epoch in range(training_epochs):
for (x, y) in zip(x_train, y_train):
sess.run(train_op, feed_dict={X: x, Y: y})
# get values of the final weights
w_val_0,w_val_1 = sess.run([w0,w1])
sess.close()
With this script above, I could compute w_val_1 and w_val_0 easily. But if I changed something with the y_predicted:
w0 = tf.Variable(0.0, name="w0")
w1 = tf.Variable(0.0, name="w1")
w2 = tf.Variable(0.0, name="w2")
y_predicted = X*X*w2 + X*w1 + w0
...
w_val_0,w_val_1,w_val_2 = sess.run([w0,w1,w2])
then I couldn't compute w_val_0, w_val_1, w_val_2. Please help me!
When you are doing X*X the weight (w2, w1 and w0) increase rapidly reaching inf which results in nan values in the loss and no training happens. As a rule of thumb always normalize the data to 0 mean and unit variance.
Fixed code
training_epochs = 100
learning_rate = 0.01
# the training set
x_train = np.linspace(0, 10, 100)
y_train = x_train + np.random.normal(0,1,100)
# # Normalize the data
x_mean = np.mean(x_train)
x_std = np.std(x_train)
x_train_ = (x_train - x_mean)/x_std
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
# set up variables for weights
w0 = tf.Variable(0.0, name="w0")
w1 = tf.Variable(0.0, name="w1")
w2 = tf.Variable(0.0, name="w3")
y_predicted = X*X*w1 + X*w2 + w0
# Define the cost function
costF = 0.5*tf.square(Y-y_predicted)
# Define the operation that will be called on each iteration
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(costF)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# Loop through the data training
for epoch in range(training_epochs):
for (x, y) in zip(x_train_, y_train):
sess.run(train_op, feed_dict={X: x, Y: y})
y_hat = sess.run(y_predicted, feed_dict={X: x_train_})
print (sess.run([w0,w1,w2]))
sess.close()
plt.plot(x_train, y_train)
plt.plot(x_train, y_hat)
plt.show()
output:
[4.9228806, -0.08735728, 3.029659]
Problem Summary:
The issue is that even after running this code for multiple epochs, the cost isn't reducing much ( I have tried this for a variety of starting_learning_rates ). The equation that I am trying to optimize is ((m * pow(length, u) * pow(start_y, t) + c) where length and start_y are the inputs and u,t,m and c are learn-able parameters. I was able to observe (my dataset is quite small) that length * sqrt(start_y) is almost a constant and thought that tensorflow would be able to better help me find the value of the variables
This is my tensorflow code, combined_vehicles is an array with 129 rows and 2 columns( 2 features ), combined_labels is an array corresponding to labels for each of the examples in combined_vehicles
u = tf.Variable(0.0,dtype = "float32")
t = tf.Variable(0.0,dtype = "float32")
c = tf.Variable(0.0,dtype = "float32")
m = tf.Variable(0.0,dtype = "float32")
length = tf.placeholder(dtype = "float32", shape = [combined_vehicles.shape[0],1], name="length")
start_y = tf.placeholder(dtype = "float32", shape = [combined_vehicles.shape[0],1], name="start_y")
labels = tf.placeholder(dtype = "float32", shape = [combined_vehicles.shape[0],1], name = "labels")
output = tf.add(tf.multiply(tf.multiply(tf.pow(length, u), tf.pow(start_y, t)), m), c)
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = output, labels = labels))
global_step = tf.Variable(0, trainable=False, name = 'global_step')
start_learning_rate = 0.0001
decay_steps = 100
learning_rate = tf.train.exponential_decay(start_learning_rate, global_step, decay_steps, 0.1, staircase=True )
result_output = output > 0.5
result_label = combined_labels > 0.5
correct_prediction = tf.equal( result_output, result_label )
accuracy = tf.reduce_mean( tf.cast( correct_prediction, "float" ) )
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost, global_step=global_step)
init = tf.global_variables_initializer()
with tf.Session() as sess:
epochs = 100
sess.run(init)
for i in range(epochs):
_,cost_estimate = sess.run([optimizer, cost], feed_dict = {length: combined_vehicles[:,0].reshape([combined_vehicles.shape[0],1]), start_y:combined_vehicles[:,1].reshape([combined_vehicles.shape[0],1]), labels: combined_labels})
total_accuracy = accuracy.eval({length: combined_vehicles[:,0].reshape([combined_vehicles.shape[0],1]), start_y:combined_vehicles[:,1].reshape([combined_vehicles.shape[0],1]), labels: combined_labels})
This question already has answers here:
Save Tensorflow graph for viewing in Tensorboard without summary operations
(5 answers)
Closed 5 years ago.
I am trying to use tensorboard to analyse a graph in tensorflow with summaryWriter. However, TensorFlow is not outputting a 'graph' folder with information. Perhaps I am missing a command or it is not in the right place?
writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph());
Is what I used. I think this may not work for TensorFlow 1.0 anymore (just the summarywriter command)
import numpy as np
import tensorflow as tf
# %matplotlib inline
import matplotlib.pyplot as plt
# Global config variables
num_steps = 5 # number of truncated backprop steps ('n' in the discussion above)
batch_size = 200
num_classes = 2
state_size = 4
learning_rate = 0.1
logs_path = "./graph"
def gen_data(size=1000000):
X = np.array(np.random.choice(2, size=(size,)))
Y = []
for i in range(size):
threshold = 0.5
if X[i-3] == 1:
threshold += 0.5
if X[i-8] == 1:
threshold -= 0.25
if np.random.rand() > threshold:
Y.append(0)
else:
Y.append(1)
return X, np.array(Y)
# adapted from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/rnn/ptb/reader.py
def gen_batch(raw_data, batch_size, num_steps):
raw_x, raw_y = raw_data
data_length = len(raw_x)
# partition raw data into batches and stack them vertically in a data matrix
batch_partition_length = data_length // batch_size
data_x = np.zeros([batch_size, batch_partition_length], dtype=np.int32)
data_y = np.zeros([batch_size, batch_partition_length], dtype=np.int32)
for i in range(batch_size):
data_x[i] = raw_x[batch_partition_length * i:batch_partition_length * (i + 1)]
data_y[i] = raw_y[batch_partition_length * i:batch_partition_length * (i + 1)]
# further divide batch partitions into num_steps for truncated backprop
epoch_size = batch_partition_length // num_steps
for i in range(epoch_size):
x = data_x[:, i * num_steps:(i + 1) * num_steps]
y = data_y[:, i * num_steps:(i + 1) * num_steps]
yield (x, y)
def gen_epochs(n, num_steps):
for i in range(n):
yield gen_batch(gen_data(), batch_size, num_steps)
"""
Placeholders
"""
x = tf.placeholder(tf.int32, [batch_size, num_steps], name='input_placeholder')
y = tf.placeholder(tf.int32, [batch_size, num_steps], name='labels_placeholder')
init_state = tf.zeros([batch_size, state_size])
"""
Inputs
"""
x_one_hot = tf.one_hot(x, num_classes)
rnn_inputs = tf.unstack(x_one_hot, axis=1)
"""
RNN
"""
cell = tf.contrib.rnn.BasicRNNCell(state_size)
rnn_outputs, final_state = tf.contrib.rnn.static_rnn(cell, rnn_inputs, initial_state=init_state)
"""
Predictions, loss, training step
"""
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [state_size, num_classes])
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
logits = [tf.matmul(rnn_output, W) + b for rnn_output in rnn_outputs]
predictions = [tf.nn.softmax(logit) for logit in logits]
y_as_list = [tf.squeeze(i, axis=[1]) for i in tf.split(axis=1, num_or_size_splits=num_steps, value=y)]
loss_weights = [tf.ones([batch_size]) for i in range(num_steps)]
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(logits, y_as_list, loss_weights)
tf.scalar_summary("losses", losses)
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss)
# Not sure why this is not outputting a graph for tensorboard
writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph());
"""
Function to train the network
"""
def train_network(num_epochs, num_steps, state_size=4, verbose=True):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
training_losses = []
saved = gen_epochs(num_epochs, num_steps);
for idx, epoch in enumerate(gen_epochs(num_epochs, num_steps)):
training_loss = 0
training_state = np.zeros((batch_size, state_size))
if verbose:
print("\nEPOCH", idx)
for step, (X, Y) in enumerate(epoch):
tr_losses, training_loss_, training_state, _ = \
sess.run([losses,
total_loss,
final_state,
train_step],
feed_dict={x:X, y:Y, init_state:training_state})
training_loss += training_loss_
if step % 100 == 0 and step > 0:
if verbose:
print("Average loss at step", step,
"for last 250 steps:", training_loss/100)
training_losses.append(training_loss/100)
training_loss = 0
return training_losses
training_losses = train_network(1,num_steps)
plt.plot(training_losses)
# tensorboard --logdir="my_graph"
This worked for me:
writer = tf.summary.FileWriter(logdir='logdir', graph=tf.get_default_graph())
writer.flush()
This is Faster R-CNN implement in tensorflow.
The proposal_layer is implement by python
i am curious about if gradient can pass by tf.py_func
the weights and biases are keep changing
so I think the gradient deliver back successful
Then I do a small test
import tensorflow as tf
import numpy as np
def addone(x):
# print type(x)
return x + 1
def pyfunc_test():
# create data
x_data = tf.placeholder(dtype=tf.float32, shape=[None])
y_data = tf.placeholder(dtype=tf.float32, shape=[None])
w = tf.Variable(tf.constant([0.5]))
b = tf.Variable(tf.zeros([1]))
y1 = tf.mul(w, x_data, name='y1')
y2 = tf.py_func(addone, [y1], tf.float32)
y = tf.add(y2, b)
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in xrange(201):
ran = np.random.rand(115).astype(np.float32)
ans = ran * 1.5 + 3
dic = {x_data: ran, y_data: ans}
tt, yy, yy1= sess.run([train, y1, y2], feed_dict=dic)
if step % 20 == 0:
print 'step {}'.format(step)
print '{}, {}'.format(w.eval(), b.eval())
test = sess.run(y, feed_dict={x_data:[1]})
print 'test = {}'.format(test)
if __name__ == '__main__':
pyfunc_test()
Variable b keep changing, but w keep the value after initialize and never change
sess.run(tf.gradients(loss, b), feed_dict=dic) get value
sess.run(tf.gradients(loss, w), feed_dict=dic) get {TypeError}Fetch argument None has invalid type <type 'NoneType'>
I know some questions suggest use tf.RegisterGradient and gradient_override_map
but I can't find these in the faster rcnn repo(link on top of post)
am I do something wrong or missing something so that w is freeze
Gradient of py_func is None (just check ops.get_gradient_function(y2.op)). There's this gist by #harpone which shows how to use gradient override map for py_func.
Here's your example modified to use that recipe
import numpy as np
import tensorflow as tf
def addone(x):
# print(type(x)
return x + 1
def addone_grad(op, grad):
x = op.inputs[0]
return x
from tensorflow.python.framework import ops
import numpy as np
# Define custom py_func which takes also a grad op as argument:
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
# Need to generate a unique name to avoid duplicates:
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad) # see _MySquareGrad for grad example
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
def pyfunc_test():
# create data
x_data = tf.placeholder(dtype=tf.float32, shape=[None])
y_data = tf.placeholder(dtype=tf.float32, shape=[None])
w = tf.Variable(tf.constant([0.5]))
b = tf.Variable(tf.zeros([1]))
y1 = tf.mul(w, x_data, name='y1')
y2 = py_func(addone, [y1], [tf.float32], grad=addone_grad)[0]
y = tf.add(y2, b)
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
print("Pyfunc grad", ops.get_gradient_function(y2.op))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(10):
# ran = np.random.rand(115).astype(np.float32)
ran = np.ones((115)).astype(np.float32)
ans = ran * 1.5 + 3
dic = {x_data: ran, y_data: ans}
tt, yy, yy1= sess.run([train, y1, y2], feed_dict=dic)
if step % 1 == 0:
print('step {}'.format(step))
print('{}, {}'.format(w.eval(), b.eval()))
test = sess.run(y, feed_dict={x_data:[1]})
print('test = {}'.format(test))
if __name__ == '__main__':
pyfunc_test()