Tensorflow: Invalid calculations - python

I am new into tensorflow and try to understand how the computation graph works. I am working on the very basic linear regression example on the tensorflow website. I have the following piece of code:
import numpy as np
import tensorflow as tf
def manual_loss(_w, _b, _x, _y):
_loss = 0.0
n = len(_x)
for j in range(n):
_loss += (_w * _x[j] + _b - _y[j]) ** 2
return _loss
def manual_grads(_w, _b, _x, _y):
n = len(_x)
g_w = 0.0
g_b = 0
for j in range(n):
g_w += 2.0 * (_w * _x[j] + _b - _y[j]) * _x[j]
g_b += 2.0 * (_w * _x[j] + _b - _y[j])
return g_w, g_b
# Model parameters
W = tf.Variable([0.3], dtype=tf.float32)
b = tf.Variable([-0.3], dtype=tf.float32)
_W = 0.3
_b = -0.3
# Model input and output
x = tf.placeholder(tf.float32)
linear_model = W * x + b
y = tf.placeholder(tf.float32)
# loss
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
grads = tf.gradients(loss, [W, b])
# training data
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
lr = 0.001
for i in range(1000):
results = sess.run([loss, W, b, grads], {x: x_train, y: y_train})
loss_value = results[0]
W_value = results[1]
b_value = results[2]
grad_W = results[3][0]
grad_b = results[3][1]
manual_loss_value = manual_loss(_w=_W, _b=_b, _x=x_train, _y=y_train)
manual_grad_W, manual_grad_b = manual_grads(_w=_W, _b=_b, _x=x_train, _y=y_train)
new_W_value = W_value - lr * grad_W
new_b_value = b_value - lr * grad_b
W = tf.assign(W, value=new_W_value)
b = tf.assign(b, value=new_b_value)
print("***********************")
print("loss={0}".format(loss_value))
print("manual_loss_value={0}".format(manual_loss_value))
print("W={0}".format(W_value))
print("b={0}".format(b_value))
print("manual_W={0}".format(_W))
print("manual_b={0}".format(_b))
print("grad_W={0}".format(grad_W))
print("grad_b={0}".format(grad_b))
print("manual_grad_W={0}".format(manual_grad_W))
print("manual_grad_b={0}".format(manual_grad_b))
print("***********************")
_W -= lr * manual_grad_W
_b -= lr * manual_grad_b
I just try to apply gradient descent to a simple (w*X - b - Y)^2 model. I don't use Tensorflow's own optimizer purposefully, I want to understand the underlying graph update mechanisms. In order to check that the system calculates correct gradients, I implemented my own loss and gradient calculation functions for linear regression as well. Unfortunately, it seems that tensorflow does not calculate the loss function and the gradients as expected. Here is what I get as an output:
***********************
loss=23.65999984741211
manual_loss_value=23.659999999999997
W=[ 0.30000001]
b=[-0.30000001]
manual_W=0.3
manual_b=-0.3
grad_W=[ 52.]
grad_b=[ 15.59999943]
manual_grad_W=52.0
manual_grad_b=15.599999999999998
***********************
***********************
loss=23.65999984741211
manual_loss_value=20.81095744
W=[ 0.24800001]
b=[-0.31560001]
manual_W=0.248
manual_b=-0.3156
grad_W=[ 52.]
grad_b=[ 15.59999943]
manual_grad_W=48.568
manual_grad_b=14.4352
***********************
As you can see, tensorflow calculates incorrect loss value and gradients for W and b in the second iteration, actually the same ones as the first iteration. In some trials, it starts to diverge from the actual values from third or fourth iterations; not always in the second one. Am I doing something wrong here? As soon as I get the values of W and b and their gradients, I update their values with tf.assign() in the training loop. Does the problem lie here; is it a wrong way to update variables with tensorflow? It is really discouraging to run into such problems just at the start.

I think there is a problem with the use of tf.assign. The command tf.assign creates assign nodes, that should be run to be effective. You should change to something like
assign_W_placeholder = tf.placeholder(tf.float32)
assign_b_placeholder = tf.placeholder(tf.float32)
assign_W_node = tf.assign(W, assign_W_placeholder)
assign_b_node = tf.assign(b, assign_b_placeholder)
and then in the for loop, add something like
sess.run(assign_W_node, feed_dict={assign_W_placeholder: new_W_value}
sess.run(assign_b_node, feed_dict={assign_b_placeholder: new_b_value}
After these, tensorflow and manual give the same results.
The complete code:
import numpy as np
import tensorflow as tf
def manual_loss(_w, _b, _x, _y):
_loss = 0.0
n = len(_x)
for j in range(n):
_loss += (_w * _x[j] + _b - _y[j]) ** 2
return _loss
def manual_grads(_w, _b, _x, _y):
n = len(_x)
g_w = 0.0
g_b = 0
for j in range(n):
g_w += 2.0 * (_w * _x[j] + _b - _y[j]) * _x[j]
g_b += 2.0 * (_w * _x[j] + _b - _y[j])
return g_w, g_b
# Model parameters
W = tf.Variable([0.3], dtype=tf.float32)
b = tf.Variable([-0.3], dtype=tf.float32)
_W = 0.3
_b = -0.3
# Model input and output
x = tf.placeholder(tf.float32)
linear_model = W * x + b
y = tf.placeholder(tf.float32)
assign_W_placeholder = tf.placeholder(tf.float32)
assign_b_placeholder = tf.placeholder(tf.float32)
assign_W_node = tf.assign(W, assign_W_placeholder)
assign_b_node = tf.assign(b, assign_b_placeholder)
# loss
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
grads = tf.gradients(loss, [W, b])
# training data
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
lr = 0.001
for i in range(1000):
results = sess.run([loss, W, b, grads], {x: x_train, y: y_train})
loss_value = results[0]
W_value = results[1]
b_value = results[2]
grad_W = results[3][0]
grad_b = results[3][1]
manual_loss_value = manual_loss(_w=_W, _b=_b, _x=x_train, _y=y_train)
manual_grad_W, manual_grad_b = manual_grads(_w=_W, _b=_b, _x=x_train, _y=y_train)
new_W_value = W_value - lr * grad_W
new_b_value = b_value - lr * grad_b
sess.run([assign_W_node, assign_b_node],
feed_dict={assign_W_placeholder: new_W_value, assign_b_placeholder: new_b_value})
print("***********************")
print("loss={0}".format(loss_value))
print("manual_loss_value={0}".format(manual_loss_value))
print("W={0}".format(W_value))
print("b={0}".format(b_value))
print("manual_W={0}".format(_W))
print("manual_b={0}".format(_b))
print("grad_W={0}".format(grad_W))
print("grad_b={0}".format(grad_b))
print("manual_grad_W={0}".format(manual_grad_W))
print("manual_grad_b={0}".format(manual_grad_b))
print("***********************")
_W -= lr * manual_grad_W
_b -= lr * manual_grad_b

I think you have a problem of numeric precision. Numpy uses double floats by default (64 bits). You are declaring your tensors as tf.float32. Try to change them to tf.float64.
Edit: I think the difference is due to the exponentiation in the loss function. Try to change for a multiplication as in:
_loss += (_w * _x[j] + _b - _y[j]) * (_w * _x[j] + _b - _y[j])
import numpy as np
import tensorflow as tf
def manual_loss(_w, _b, _x, _y):
_loss = 0.0
n = len(_x)
for j in range(n):
diff = (_w * _x[j] + _b - _y[j])
_loss += diff * diff
return _loss
def manual_grads(_w, _b, _x, _y):
n = len(_x)
g_w = 0.0
g_b = 0
for j in range(n):
g_w += 2.0 * (_w * _x[j] + _b - _y[j]) * _x[j]
g_b += 2.0 * (_w * _x[j] + _b - _y[j])
return g_w, g_b
# Model parameters
W = tf.Variable([0.3], dtype=tf.float64)
b = tf.Variable([-0.3], dtype=tf.float64)
_W = 0.3
_b = -0.3
# Model input and output
x = tf.placeholder(tf.float64)
linear_model = W * x + b
y = tf.placeholder(tf.float64)
# loss
loss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares
grads = tf.gradients(loss, [W, b])
# training data
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
lr = 0.001
for i in range(10):
with tf.device('cpu:0'):
results = sess.run([loss, grads], {x: x_train, y: y_train})
loss_value = results[0]
grad_W = results[1][0]
grad_b = results[1][1]
manual_loss_value = manual_loss(_w=_W, _b=_b, _x=x_train, _y=y_train)
manual_grad_W, manual_grad_b = manual_grads(_w=_W, _b=_b, _x=x_train, _y=y_train)
new_W_value = (W - lr * grad_W).eval(session = sess)
new_b_value = (b - lr * grad_b).eval(session = sess)
tf.assign(W, value=new_W_value).eval(session = sess)
tf.assign(b, value=new_b_value).eval(session = sess)
print("***********************")
print("loss={0}".format(loss_value))
print("manual_loss_value={0}".format(manual_loss_value))
print("W={0}".format(W.eval(session = sess)))
print("b={0}".format(b.eval(session = sess)))
print("manual_W={0}".format(_W))
print("manual_b={0}".format(_b))
print("grad_W={0}".format(grad_W))
print("grad_b={0}".format(grad_b))
print("manual_grad_W={0}".format(manual_grad_W))
print("manual_grad_b={0}".format(manual_grad_b))
print("***********************")
_W -= lr * manual_grad_W
_b -= lr * manual_grad_b

Related

Neural network gives same prediction for each data point for diabetes.csv

I have a neural network in Python, but it gives almost the exact same prediction for each data point and I can't work out why this is. I have tried altering the features I use to make the predictions but I get the same issue. Thanks for any help.
I have a data file which looks like this:
Pregnancies,Glucose,BloodPressure,SkinThickness,Insulin,BMI,DiabetesPedigreeFunction,Age,Outcome
6,148,72,35,0,33.6,0.627,50,1
1,85,66,29,0,26.6,0.351,31,0
8,183,64,0,0,23.3,0.672,32,1
1,89,66,23,94,28.1,0.167,21,0
from kaggle.
My neural network code is this:
import numpy as np
import pandas as pd
data = pd.read_csv("diabetes.csv", header=0)
print(data.head())
training_examples = data[["BloodPressure", "Glucose", "Outcome"]]
X = training_examples[["BloodPressure", "Glucose"]].to_numpy()
y = training_examples[["Outcome"]].to_numpy()
DIMENSIONS = 2
HIDDEN_LAYER = 20
# Set up the training data
# X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
# y = np.array([[0], [1], [1], [0]])
# Set the number of epochs and the learning rate
num_epochs = 10
learning_rate = 0.1
# Initialize the weights and biases
w1 = np.random.randn(DIMENSIONS, HIDDEN_LAYER)
b1 = np.zeros((1, HIDDEN_LAYER))
w2 = np.random.randn(HIDDEN_LAYER, 1)
b2 = np.zeros((1, 1))
# Define the sigmoid activation function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Define the derivative of the sigmoid function
def sigmoid_derivative(x):
return x * (1 - x)
# Train the network
for epoch in range(num_epochs):
# Forward pass
z1 = np.dot(X, w1) + b1
a1 = sigmoid(z1)
z2 = np.dot(a1, w2) + b2
a2 = sigmoid(z2)
# Calculate the loss
loss = np.mean((a2 - y)**2)
# Print the loss every 100 epochs
if epoch % 100 == 0:
print(f'Epoch {epoch}: loss = {loss}')
# Backpropagation
dz2 = a2 - y
dw2 = np.dot(a1.T, dz2)
db2 = np.sum(dz2, axis=0)
da1 = np.dot(dz2, w2.T)
dz1 = da1 * sigmoid_derivative(a1)
dw1 = np.dot(X.T, dz1)
db1 = np.sum(dz1, axis=0)
# Update the weights and biases
w1 -= learning_rate * dw1
b1 -= learning_rate * db1
w2 -= learning_rate * dw2
b2 -= learning_rate * db2
# Make predictions on the test data
predictions = a2
# Print the predictions
print(predictions)

TensorFlow weighted_cross_entropy_with_logits produces wrong result

I am trying to use tf.nn.weighted_cross_entropy_with_logits API, but I found I just can not get the right result when the weight is not 1.0 (1.0 means no weight).
import tensorflow as tf
import numpy as np
def my_binary_crossentropy_np(labels, output, weight=10.0):
"""
Weighted binary crossentropy between an output tensor
and a target tensor.
"""
# transform back to logits
epsilon = 1e-08
np.clip(output, epsilon, 1.0 - epsilon, out=output)
output = np.log(output / (1.0 - output))
# https://www.tensorflow.org/api_docs/python/tf/nn/weighted_cross_entropy_with_logits
# l = 1 + (q - 1) * z
# (1 - z) * x + l * (log(1 + exp(-abs(x))) + max(-x, 0))
l = 1.0 + (weight - 1.0) * labels
loss1 = np.multiply(1.0 - labels, output)
loss2 = np.multiply(l, np.log(1.0 + np.exp(-abs(output))))
loss3 = np.maximum(-output, 0)
loss = loss1 + loss2 + loss3
return np.mean(loss)
def my_binary_crossentropy_tf(labels, output, weight=1.0):
"""
Weighted binary crossentropy between an output tensor
and a target tensor.
"""
epsilon = 1e-08
output = tf.clip_by_value(output, epsilon, 1.0 - epsilon)
output = tf.log(output / (1.0 - output))
# compute weighted loss
#loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=output)
loss = tf.nn.weighted_cross_entropy_with_logits(targets=labels, logits=output, pos_weight=weight)
return tf.reduce_mean(loss)
# generate random test data and random label
predict = np.random.rand(10, 8)
label = np.random.rand(10, 8)
label[label >= 0.5] = 1
label[label < 0.5] = 0
loss1 = my_binary_crossentropy_np(label, predict, 1.0)
print('loss1 = ', loss1)
loss1 = my_binary_crossentropy_np(label, predict, 10.0)
print('loss1 = ', loss1)
predict_tf = tf.convert_to_tensor(predict)
loss2 = my_binary_crossentropy_tf(label, predict_tf, 1.0)
loss2 = tf.Session().run(loss2)
print('loss2 = ', loss2)
loss2 = my_binary_crossentropy_tf(label, predict_tf, 10.0)
loss2 = tf.Session().run(loss2)
print('loss2 = ', loss2)
running result:
loss1 = 1.02193164517
loss1 = 1.96332399324
loss2 = 1.02193164517
loss2 = 4.80529539791
The implementation of my_binary_crossentropy_np is wrong.
Here is the right one:
l = (weight - 1.0) * labels + 1.0
loss1 = np.multiply(1.0 - labels, output)
loss2 = np.multiply(l, np.log(1.0 + np.exp(-abs(output))) + np.maximum(-output, 0))
loss = loss1 + loss2

Splitting ndarray gives unexpected results (TensorFlow RNN tutorial)

I am following a tutorial on rnn's in TensorFlow but I have a question concerning the input formats.
They are taking raw_x (one hot vector) and basically first cutting that up in pieces of length 200 (batch_size) to form data_x. That is good.
Then they further cut up data_x in pieces of length 5 (num_step, or graph width) with:
for i in range(epoch_size):
x = data_x[:, i * num_steps:(i + 1) * num_steps]
y = data_y[:, i * num_steps:(i + 1) * num_steps]
yield (x, y)
However, if I look in the data, the slices of x do not match data_x. The first one does, but then they diverge.
Am I misunderstanding the above code? I would like to understand how x is being created or what it is supposed to look like.
I had expected the second item to be 0 1 0 1 0.
Also, I thought an epoch is when you go through the data completely, from this it seems that they split up the data in 1000 parts (epoch size)?
If it helps, this is my full code. I am trying to figure out what is going on in x. at line 48:
import numpy as np
import tensorflow as tf
# %matplotlib inline
import matplotlib.pyplot as plt
# Global config variables
num_steps = 5 # number of truncated backprop steps ('n' in the discussion above)
batch_size = 200
num_classes = 2
state_size = 4
learning_rate = 0.1
def gen_data(size=1000000):
print('generating data');
X = np.array(np.random.choice(2, size=(size,)))
Y = []
for i in range(size):
threshold = 0.5
if X[i-3] == 1:
threshold += 0.5
if X[i-8] == 1:
threshold -= 0.25
if np.random.rand() > threshold:
Y.append(0)
else:
Y.append(1)
return X, np.array(Y)
# adapted from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/rnn/ptb/reader.py
def gen_batch(raw_data, batch_size, num_steps):
print('generating batches');
raw_x, raw_y = raw_data
data_length = len(raw_x)
# partition raw data into batches and stack them vertically in a data matrix
batch_partition_length = data_length // batch_size
data_x = np.zeros([batch_size, batch_partition_length], dtype=np.int32)
data_y = np.zeros([batch_size, batch_partition_length], dtype=np.int32)
for i in range(batch_size):
data_x[i] = raw_x[batch_partition_length * i:batch_partition_length * (i + 1)]
data_y[i] = raw_y[batch_partition_length * i:batch_partition_length * (i + 1)]
# further divide batch partitions into num_steps for truncated backprop
epoch_size = batch_partition_length // num_steps
for i in range(epoch_size):
x = data_x[:, i * num_steps:(i + 1) * num_steps]
y = data_y[:, i * num_steps:(i + 1) * num_steps]
yield (x, y)
def gen_epochs(n, num_steps):
for i in range(n):
yield gen_batch(gen_data(), batch_size, num_steps)
"""
Placeholders
"""
x = tf.placeholder(tf.int32, [batch_size, num_steps], name='input_placeholder')
y = tf.placeholder(tf.int32, [batch_size, num_steps], name='labels_placeholder')
init_state = tf.zeros([batch_size, state_size])
"""
RNN Inputs
"""
# Turn our x placeholder into a list of one-hot tensors:
# rnn_inputs is a list of num_steps tensors with shape [batch_size, num_classes]
x_one_hot = tf.one_hot(x, num_classes)
rnn_inputs = tf.unstack(x_one_hot, axis=1)
"""
Definition of rnn_cell
This is very similar to the __call__ method on Tensorflow's BasicRNNCell. See:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/rnn_cell.py
"""
with tf.variable_scope('rnn_cell'):
W = tf.get_variable('W', [num_classes + state_size, state_size])
b = tf.get_variable('b', [state_size], initializer=tf.constant_initializer(0.0))
def rnn_cell(rnn_input, state):
with tf.variable_scope('rnn_cell', reuse=True):
W = tf.get_variable('W', [num_classes + state_size, state_size])
b = tf.get_variable('b', [state_size], initializer=tf.constant_initializer(0.0))
return tf.tanh(tf.matmul(tf.concat(axis=1, values=[rnn_input, state]), W) + b)
"""
Adding rnn_cells to graph
This is a simplified version of the "rnn" function from Tensorflow's api. See:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/rnn.py
"""
state = init_state
rnn_outputs = []
for rnn_input in rnn_inputs:
state = rnn_cell(rnn_input, state)
rnn_outputs.append(state)
final_state = rnn_outputs[-1]
"""
Predictions, loss, training step
Losses and total_loss are simlar to the "sequence_loss_by_example" and "sequence_loss"
functions, respectively, from Tensorflow's api. See:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/seq2seq.py
"""
#logits and predictions
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [state_size, num_classes])
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
logits = [tf.matmul(rnn_output, W) + b for rnn_output in rnn_outputs]
predictions = [tf.nn.softmax(logit) for logit in logits]
# Turn our y placeholder into a list labels
y_as_list = [tf.squeeze(i, axis=[1]) for i in tf.split(axis=1, num_or_size_splits=num_steps, value=y)]
#losses and train_step
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit,labels=label) for \
logit, label in zip(logits, y_as_list)]
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss)
"""
Function to train the network
"""
def train_network(num_epochs, num_steps, state_size=4, verbose=True):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
training_losses = []
for idx, epoch in enumerate(gen_epochs(num_epochs, num_steps)):
training_loss = 0
training_state = np.zeros((batch_size, state_size))
if verbose:
print("\nEPOCH", idx)
for step, (X, Y) in enumerate(epoch):
tr_losses, training_loss_, training_state, _ = \
sess.run([losses,
total_loss,
final_state,
train_step],
feed_dict={x:X, y:Y, init_state:training_state})
training_loss += training_loss_
if step % 100 == 0 and step > 0:
if verbose:
print("Average loss at step", step,
"for last 250 steps:", training_loss/100)
training_losses.append(training_loss/100)
training_loss = 0
return training_losses
training_losses = train_network(1,num_steps)
plt.plot(training_losses)
Seems like the batches are actually transposed.
So the first elements of the x-matrix (200 x 5) will fit the first 5 elements of x_raw.
Then only in the next iteration, the next 5-10 elements of x_raw will be in the first elements (again) of x.

SummaryWriter not outputting graph in TensorFlow [duplicate]

This question already has answers here:
Save Tensorflow graph for viewing in Tensorboard without summary operations
(5 answers)
Closed 5 years ago.
I am trying to use tensorboard to analyse a graph in tensorflow with summaryWriter. However, TensorFlow is not outputting a 'graph' folder with information. Perhaps I am missing a command or it is not in the right place?
writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph());
Is what I used. I think this may not work for TensorFlow 1.0 anymore (just the summarywriter command)
import numpy as np
import tensorflow as tf
# %matplotlib inline
import matplotlib.pyplot as plt
# Global config variables
num_steps = 5 # number of truncated backprop steps ('n' in the discussion above)
batch_size = 200
num_classes = 2
state_size = 4
learning_rate = 0.1
logs_path = "./graph"
def gen_data(size=1000000):
X = np.array(np.random.choice(2, size=(size,)))
Y = []
for i in range(size):
threshold = 0.5
if X[i-3] == 1:
threshold += 0.5
if X[i-8] == 1:
threshold -= 0.25
if np.random.rand() > threshold:
Y.append(0)
else:
Y.append(1)
return X, np.array(Y)
# adapted from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/rnn/ptb/reader.py
def gen_batch(raw_data, batch_size, num_steps):
raw_x, raw_y = raw_data
data_length = len(raw_x)
# partition raw data into batches and stack them vertically in a data matrix
batch_partition_length = data_length // batch_size
data_x = np.zeros([batch_size, batch_partition_length], dtype=np.int32)
data_y = np.zeros([batch_size, batch_partition_length], dtype=np.int32)
for i in range(batch_size):
data_x[i] = raw_x[batch_partition_length * i:batch_partition_length * (i + 1)]
data_y[i] = raw_y[batch_partition_length * i:batch_partition_length * (i + 1)]
# further divide batch partitions into num_steps for truncated backprop
epoch_size = batch_partition_length // num_steps
for i in range(epoch_size):
x = data_x[:, i * num_steps:(i + 1) * num_steps]
y = data_y[:, i * num_steps:(i + 1) * num_steps]
yield (x, y)
def gen_epochs(n, num_steps):
for i in range(n):
yield gen_batch(gen_data(), batch_size, num_steps)
"""
Placeholders
"""
x = tf.placeholder(tf.int32, [batch_size, num_steps], name='input_placeholder')
y = tf.placeholder(tf.int32, [batch_size, num_steps], name='labels_placeholder')
init_state = tf.zeros([batch_size, state_size])
"""
Inputs
"""
x_one_hot = tf.one_hot(x, num_classes)
rnn_inputs = tf.unstack(x_one_hot, axis=1)
"""
RNN
"""
cell = tf.contrib.rnn.BasicRNNCell(state_size)
rnn_outputs, final_state = tf.contrib.rnn.static_rnn(cell, rnn_inputs, initial_state=init_state)
"""
Predictions, loss, training step
"""
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [state_size, num_classes])
b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))
logits = [tf.matmul(rnn_output, W) + b for rnn_output in rnn_outputs]
predictions = [tf.nn.softmax(logit) for logit in logits]
y_as_list = [tf.squeeze(i, axis=[1]) for i in tf.split(axis=1, num_or_size_splits=num_steps, value=y)]
loss_weights = [tf.ones([batch_size]) for i in range(num_steps)]
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example(logits, y_as_list, loss_weights)
tf.scalar_summary("losses", losses)
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss)
# Not sure why this is not outputting a graph for tensorboard
writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph());
"""
Function to train the network
"""
def train_network(num_epochs, num_steps, state_size=4, verbose=True):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
training_losses = []
saved = gen_epochs(num_epochs, num_steps);
for idx, epoch in enumerate(gen_epochs(num_epochs, num_steps)):
training_loss = 0
training_state = np.zeros((batch_size, state_size))
if verbose:
print("\nEPOCH", idx)
for step, (X, Y) in enumerate(epoch):
tr_losses, training_loss_, training_state, _ = \
sess.run([losses,
total_loss,
final_state,
train_step],
feed_dict={x:X, y:Y, init_state:training_state})
training_loss += training_loss_
if step % 100 == 0 and step > 0:
if verbose:
print("Average loss at step", step,
"for last 250 steps:", training_loss/100)
training_losses.append(training_loss/100)
training_loss = 0
return training_losses
training_losses = train_network(1,num_steps)
plt.plot(training_losses)
# tensorboard --logdir="my_graph"
This worked for me:
writer = tf.summary.FileWriter(logdir='logdir', graph=tf.get_default_graph())
writer.flush()

Using Gradient Descent on Linear Regression Yields an Incorrect Bias

I've got a toy example set up of a linear regression model with one input variable and one output variable. The problem that I'm encountering is that the output for the bias is far off from the generated data. If I manually set the bias then it will produce a weight and bias which is close enough to the original.
I've written two pieces of code gen_data which generates data and GradientDescent which performs that gradient descent algorithm to find the weight and bias.
def gen_data(num_points=50, slope=1, bias=10, x_max=50):
f = lambda z: slope * z + bias
x = np.zeros(shape=(num_points, 1))
y = np.zeros(shape=(num_points, 1))
for i in range(num_points):
x_temp = np.random.uniform()*x_max
x[i] = x_temp
y[i] = f(x_temp) + np.random.normal(scale=3.0)
return (x, y)
# \mathbb{R}^1 with no regularization
def gradientDescent2(x, y, learning_rate=0.0001, epochs=100):
theta = np.random.rand()
bias = np.random.rand()
for i in range(0, epochs):
loss = (theta * x + bias) - y
cost = np.mean(loss**2) / 2
# print('Iteration {} | Cost: {}'.format(i, cost))
grad_b = np.mean(loss)
grad_t = np.mean(loss*x)
# updates
bias -= learning_rate * grad_b
theta -= learning_rate * grad_t
return (theta, bias)
If you want to use batch update, don't set your batch_size equals to your simple size. (I also believe that batch_update is not very suitable for this case.)
2.Your gradient calculation and parameter update are incorrect, the gradient should be:
grad_b = 1
grad_t = x
For the parameter update, you should always trying to minimize the loss, so it should be
if loss>0:
bias -= learning_rate * grad_b
theta -= learning_rate * grad_t
elif loss< 0:
bias += learning_rate * grad_b
theta += learning_rate * grad_t
After all, below is the modified code works well.
import numpy as np
import sys
def gen_data(num_points=500, slope=1, bias=10, x_max=50):
f = lambda z: slope * z + bias
x = np.zeros(shape=(num_points))
y = np.zeros(shape=(num_points))
for i in range(num_points):
x_temp = np.random.uniform()*x_max
x[i] = x_temp
y[i] = f(x_temp) #+ np.random.normal(scale=3.0)
#print('x:',x[i],' y:',y[i])
return (x, y)
def gradientDescent2(x, y, learning_rate=0.001, epochs=100):
theta = np.random.rand()
bias = np.random.rand()
for i in range(0, epochs):
for j in range(len(x)):
loss = (theta * x[j] + bias) - y[j]
cost = np.mean(loss**2) / 2
# print('Iteration {} | Cost: {}'.format(i, cost))
grad_b = 1
grad_t = x[j]
if loss>0:
bias -= learning_rate * grad_b
theta -= learning_rate * grad_t
elif loss< 0:
bias += learning_rate * grad_b
theta += learning_rate * grad_t
return (theta, bias)
def main():
x,y =gen_data()
ta,bias = gradientDescent2(x,y)
print('theta:',ta)
print('bias:',bias)
if __name__ == '__main__':
sys.exit(int(main() or 0))

Categories

Resources