Computing weights in linear regression problem - python

I have written the script that demonstrates the linear regression algorithm as follows:
training_epochs = 100
learning_rate = 0.01
# the training set
x_train = np.linspace(0, 10, 100)
y_train = x_train + np.random.normal(0,1,100)
# set up placeholders for input and output
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
# set up variables for weights
w0 = tf.Variable(0.0, name="w0")
w1 = tf.Variable(0.0, name="w1")
y_predicted = X*w1 + w0
# Define the cost function
costF = 0.5*tf.square(Y-y_predicted)
# Define the operation that will be called on each iteration
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(costF)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# Loop through the data training
for epoch in range(training_epochs):
for (x, y) in zip(x_train, y_train):
sess.run(train_op, feed_dict={X: x, Y: y})
# get values of the final weights
w_val_0,w_val_1 = sess.run([w0,w1])
sess.close()
With this script above, I could compute w_val_1 and w_val_0 easily. But if I changed something with the y_predicted:
w0 = tf.Variable(0.0, name="w0")
w1 = tf.Variable(0.0, name="w1")
w2 = tf.Variable(0.0, name="w2")
y_predicted = X*X*w2 + X*w1 + w0
...
w_val_0,w_val_1,w_val_2 = sess.run([w0,w1,w2])
then I couldn't compute w_val_0, w_val_1, w_val_2. Please help me!

When you are doing X*X the weight (w2, w1 and w0) increase rapidly reaching inf which results in nan values in the loss and no training happens. As a rule of thumb always normalize the data to 0 mean and unit variance.
Fixed code
training_epochs = 100
learning_rate = 0.01
# the training set
x_train = np.linspace(0, 10, 100)
y_train = x_train + np.random.normal(0,1,100)
# # Normalize the data
x_mean = np.mean(x_train)
x_std = np.std(x_train)
x_train_ = (x_train - x_mean)/x_std
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
# set up variables for weights
w0 = tf.Variable(0.0, name="w0")
w1 = tf.Variable(0.0, name="w1")
w2 = tf.Variable(0.0, name="w3")
y_predicted = X*X*w1 + X*w2 + w0
# Define the cost function
costF = 0.5*tf.square(Y-y_predicted)
# Define the operation that will be called on each iteration
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(costF)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# Loop through the data training
for epoch in range(training_epochs):
for (x, y) in zip(x_train_, y_train):
sess.run(train_op, feed_dict={X: x, Y: y})
y_hat = sess.run(y_predicted, feed_dict={X: x_train_})
print (sess.run([w0,w1,w2]))
sess.close()
plt.plot(x_train, y_train)
plt.plot(x_train, y_hat)
plt.show()
output:
[4.9228806, -0.08735728, 3.029659]

Related

LSTM+FFN performs more poorly than FFN

I am building several simple networks to predict the bike rentals at 500 stations in the upcoming hour, given rentals at all stations in the previous 24 hours. I am working with two architectures, one with a graph convolution (which amounts to updating each station with a learned linear combination of other stations, at each hour) and a FNN layer to prediction, and a second with a graph convolution -> LSTM -> FNN to prediction.
Before I describe more, I'm getting poorer performance for my model which includes an LSTM unit, which is confusing me.
See these two images for a description of each architecture, for each architecture I also add hourly meta-data (weather, time, etc) as variation, they are in the images in red, and not relevant to my question. Image links at the bottom of the post.
[Architecture 1: GCNN + FNN][1]
[Architecture 2: GCNN + LSTM + FNN][2]
Confusingly, the test RMSE for the first model is 3.46, for the second model its 3.57. Could someone please explain to me why the second wouldn't be lower, as it seems to be running the exact same processes, except with an additional LSTM unit.
Here are relevant snippets of my code for the GCNN+FNN model:
def gcnn_ddgf(hidden_layer, node_num, feature_in, horizon, learning_rate, beta, batch_size, early_stop_th, training_epochs, X_training, Y_training, X_val, Y_val, X_test, Y_test, scaler, display_step):
n_output_vec = node_num * horizon # length of output vector at the final layer
early_stop_k = 0 # early stop patience
best_val = 10000
traing_error = 0
test_error = 0
pred_Y = []
tf.reset_default_graph()
batch_size = batch_size
early_stop_th = early_stop_th
training_epochs = training_epochs
# tf Graph input and output
X = tf.placeholder(tf.float32, [None, node_num, feature_in]) # X is the input signal
Y = tf.placeholder(tf.float32, [None, n_output_vec]) # y is the regression output
# define dictionaries to store layers weight & bias
weights_hidden = {}
weights_A = {}
biases = {}
vec_length = feature_in
weights_hidden['h1'] = tf.Variable(tf.random_normal([vec_length, hidden_layer], stddev=0.5))
biases['b1'] = tf.Variable(tf.random_normal([1, hidden_layer], stddev=0.5))
weights_A['A1'] = tf.Variable(tf.random_normal([node_num, node_num], stddev=0.5))
weights_hidden['out'] = tf.Variable(tf.random_normal([hidden_layer, horizon], stddev=0.5))
biases['bout'] = tf.Variable(tf.random_normal([1, horizon], stddev=0.5))
# Construct model
pred= gcn(X, weights_hidden, weights_A, biases, node_num, horizon) #see below
pred = scaler.inverse_transform(pred)
Y_original = scaler.inverse_transform(Y)
cost = tf.sqrt(tf.reduce_mean(tf.pow(pred - Y_original, 2)))
#optimizer = tf.train.RMSPropOptimizer(learning_rate, decay).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost_sq = 0.
num_train = X_training.shape[0]
total_batch = int(num_train/batch_size)
for i in range(total_batch):
_, c = sess.run([optimizer, cost], feed_dict={X: X_training[i*batch_size:(i+1)*batch_size,],
Y: Y_training[i*batch_size:(i+1)*batch_size,]})
avg_cost_sq += np.square(c) * batch_size #/ total_batch
# rest part of training dataset
if total_batch * batch_size != num_train:
_, c = sess.run([optimizer, cost], feed_dict={X: X_training[total_batch*batch_size:num_train,],
Y: Y_training[total_batch*batch_size:num_train,]})
avg_cost_sq += np.square(c) * (num_train - total_batch*batch_size)
avg_cost = np.sqrt(avg_cost_sq / num_train)
# validation
c_val, = sess.run([cost], feed_dict={X: X_val, Y: Y_val})
if c_val < best_val:
# testing
c_tes, preds, Y_true = sess.run([cost, pred, Y_original], feed_dict={X: X_test,Y: Y_test})
best_val = c_val
test_error = c_tes
traing_error = avg_cost
pred_Y = preds
early_stop_k = 0 # reset to 0
# update early stopping patience
if c_val >= best_val:
early_stop_k += 1
# threshold
if early_stop_k == early_stop_th:
break
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "Training RMSE: ","{:.9f}".format(avg_cost))
print("Validation RMSE: ", c_val)
print("Lowest test RMSE: ", test_error)
print("epoch is ", epoch)
print("training RMSE is ", traing_error)
print("Optimization Finished! the lowest validation RMSE is ", best_val)
print("The test RMSE is ", test_error)
return best_val, pred_Y ,Y_true,test_error
# code that creates the model
def gcn(signal_in, weights_hidden, weights_A, biases, node_num, horizon):
signal_in = tf.transpose(signal_in, [1, 0, 2]) # node_num, batch, feature_in
feature_len = signal_in.shape[2] # feature vector length at the node of the input graph
signal_in = tf.reshape(signal_in, [node_num, -1]) # node_num, batch*feature_in
Adj = 0.5*(weights_A['A1'] + tf.transpose(weights_A['A1']))
Adj = normalize_adj(Adj)
Z = tf.matmul(Adj, signal_in) # node_num, batch*feature_in
Z = tf.reshape(Z, [-1, int(feature_len)]) # node_num * batch, feature_in
signal_output = tf.add(tf.matmul(Z, weights_hidden['h1']), biases['b1'])
signal_output = tf.nn.relu(signal_output) # node_num * batch, hidden_vec
final_output = tf.add(tf.matmul(signal_output, weights_hidden['out']), biases['bout']) # node_num * batch, horizon
# final_output = tf.nn.relu(final_output)
final_output = tf.reshape(final_output, [node_num, -1, horizon]) # node_num, batch, horizon
final_output = tf.transpose(final_output, [1, 0, 2]) # batch, node_num, horizon
final_output = tf.reshape(final_output, [-1, node_num*horizon]) # batch, node_num*horizon
return final_output
And the code for the GCNN+LSTM+FNN model:
def gcnn_ddgf_lstm(node_num, feature_in, learning_rate, beta, batch_size, early_stop_th, training_epochs, X_training,
Y_training, X_val, Y_val, X_test, Y_test, scaler, lstm_layer):
n_output_vec = node_num # length of output vector at the final layer
early_stop_k = 0 # early stop patience
display_step = 1 # frequency of printing results
best_val = 10000
traing_error = 0
test_error = 0
predic_res = []
tf.reset_default_graph()
batch_size = batch_size
early_stop_th = early_stop_th
training_epochs = training_epochs
# tf Graph input and output
X = tf.placeholder(tf.float32, [None, node_num, feature_in]) # X is the input signal
Y = tf.placeholder(tf.float32, [None, n_output_vec]) # y is the regression output
lstm_cell = tf.nn.rnn_cell.LSTMCell(lstm_layer, state_is_tuple=True)
# define dictionaries to store layers weight & bias
weights_hidden = {}
weights_A = {}
biases = {}
weights_A['A1'] = tf.Variable(tf.random_normal([node_num, node_num], stddev=0.5))
weights_hidden['h1'] = tf.Variable(tf.random_normal([lstm_layer, node_num], stddev=0.5))
biases['h1'] = tf.Variable(tf.random_normal([1, node_num], stddev=0.5))
weights_hidden['out'] = tf.Variable(tf.random_normal([node_num, node_num], stddev=0.5))
biases['bout'] = tf.Variable(tf.random_normal([1, node_num], stddev=0.5))
# Construct model
pred= gcn_lstm(X, weights_hidden, weights_A, biases, node_num, lstm_cell)
# pred = scaler.inverse_transform(pred)
# Y_original = scaler.inverse_transform(Y)
cost = tf.sqrt(tf.reduce_mean(tf.pow(pred - Y, 2)))
#optimizer = tf.train.RMSPropOptimizer(learning_rate, decay).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost_sq = 0.
num_train = X_training.shape[0]
total_batch = int(num_train/batch_size)
for i in range(total_batch):
_, c = sess.run([optimizer, cost], feed_dict={X: X_training[i*batch_size:(i+1)*batch_size,],
Y: Y_training[i*batch_size:(i+1)*batch_size,]})
avg_cost_sq += np.square(c) * batch_size #/ total_batch
# rest part of training dataset
if total_batch * batch_size != num_train:
_, c = sess.run([optimizer, cost], feed_dict={X: X_training[total_batch*batch_size:num_train,],
Y: Y_training[total_batch*batch_size:num_train,]})
avg_cost_sq += np.square(c) * (num_train - total_batch*batch_size)
avg_cost = np.sqrt(avg_cost_sq / num_train)
# validation
c_val, = sess.run([cost], feed_dict={X: X_val, Y: Y_val})
if c_val < best_val:
c_tes, preds = sess.run([cost, pred], feed_dict={X: X_test,Y: Y_test})
best_val = c_val
# save model
#saver.save(sess, './bikesharing_gcnn_ddgf')
test_error = c_tes
traing_error = avg_cost
early_stop_k = 0 # reset to 0
# update early stopping patience
if c_val >= best_val:
early_stop_k += 1
# threshold
if early_stop_k == early_stop_th:
pred_Y = scaler.inverse_transform(preds)
Y_true = scaler.inverse_transform(Y_test)
test_err = tf.sqrt(tf.reduce_mean(tf.pow(pred_Y - Y_true, 2)))
break
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "Training RMSE: ","{:.9f}".format(avg_cost))
print("Validation RMSE: ", c_val)
print("Lowest test RMSE: ", test_error)
print("epoch is ", epoch)
print("training RMSE is ", traing_error)
print("Optimization Finished! the lowest validation RMSE is ", best_val)
print("The scaled test RMSE is ", test_error)
return pred_Y, Y_true
def gcn_lstm(signal_in, weights_hidden, weights_A, biases, node_num, lstm_cell):
signal_in = tf.transpose(signal_in, [1, 0, 2]) # node_num, batch, feature_in
feature_len = signal_in.shape[2] # feature vector length at the node of the input graph
signal_in = tf.reshape(signal_in, [node_num, -1]) # node_num, batch*feature_in
Adj = 0.5*(weights_A['A1'] + tf.transpose(weights_A['A1']))
Adj = normalize_adj(Adj)
Z = tf.matmul(Adj, signal_in) # node_num, batch*feature_in
Z = tf.reshape(Z, [node_num, -1, int(feature_len)]) # node_num, batch, feature_in
Z = tf.transpose(Z,[1,2,0]) # batch, feature_in, node_num
# init_state = cell.zero_state(batch_size, tf.float32)
_, Z = tf.nn.dynamic_rnn(lstm_cell, Z, dtype = tf.float32) # init_state?
dense_output = tf.add(tf.matmul(Z[1], weights_hidden['h1']), biases['h1'])
dense_output = tf.nn.relu(dense_output)
final_output = tf.add(tf.matmul(dense_output, weights_hidden['out']), biases['bout']) # batch, node_num*horizon
return final_output
In particular, should I be weary that _, Z = tf.nn.dynamic_rnn(lstm_cell, Z, dtype = tf.float32) causes my variables defined elsewhere not to train?
Thanks a lot for any help :)
[1]: https://i.stack.imgur.com/MAO2t.png
[2]: https://i.stack.imgur.com/UDjHw.png
I resolved this.
I have three years of bike use data to make the prediction, and was using the ~last three months as my validation/test set. The last few months were winter with lower bike use. I got expected results (GCNN+LSTM outperforms GCNN, though not by much) when I shuffled my training data prior to allocating to sets (with sequences preserved for LSTM)

tf.get_variable initializer Tensorflow

How important is to choose the right value for the initializer in tensorflow?
With this code:
a = tf.get_variable('a', initializer=0.1)
b = tf.get_variable('b', initializer=-3.0)
with:
a = tf.get_variable('a', initializer=0.1)
b = tf.get_variable('b', initializer=0.0)
Why in the second example tensorflow doesn`t manage to fit the data properly? There is anything that can be done changing number_epochs or learning_rate?
This is my code:
# TensorFlow Model
# Config
num_epochs = 2000
learning_rate = 0.0001
# /Config
# Creating the graph
ops.reset_default_graph()
tf.disable_v2_behavior()
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')
a = tf.get_variable('a', initializer=0.1)
b = tf.get_variable('b', initializer=-3.0)
h = a * X + b
cost = tf.reduce_mean( (h - Y)**2 )
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate
).minimize(cost)
init = tf.global_variables_initializer()
# Running the Model
found_a = 0
found_b = 0
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epochs):
_, costValue = sess.run(
[optimizer, cost],
feed_dict={
X: x,
Y: y,
}
)
found_a = a.eval()
found_b = b.eval()
if epoch % (num_epochs/10) == 0: # Every 10 percent
print("... epoch: " + str(epoch))
print(f"cost[{str(costValue)}] / a[{str(a.eval())}] / b[{str(b.eval())}]")
# Seing the obtained values in a plot
xrange = np.linspace(x.min(), x.max(), 2)
# Plot points
plt.plot(x, y, 'ro')
# Plot resulting function
plt.plot(xrange, xrange * found_a + found_b, 'b')
plt.show()

Tensorflow - Nan loss and constant accuracy when training

as the title says, I am trying to train a neural network to predict outcomes, and I can't figure out what is wrong with my model. I keep getting the exact same accuracy level, and the loss is Nan. I'm so confused... I have looked at other similar questions and still can't seem to get it working. My code for the model and training is below:
import numpy as np
import pandas as pd
import tensorflow as tf
import urllib.request as request
import matplotlib.pyplot as plt
from FlowersCustom import get_MY_data
def get_data():
IRIS_TRAIN_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'species']
train = pd.read_csv(IRIS_TRAIN_URL, names=names, skiprows=1)
test = pd.read_csv(IRIS_TEST_URL, names=names, skiprows=1)
# Train and test input data
Xtrain = train.drop("species", axis=1)
Xtest = test.drop("species", axis=1)
# Encode target values into binary ('one-hot' style) representation
ytrain = pd.get_dummies(train.species)
ytest = pd.get_dummies(test.species)
return Xtrain, Xtest, ytrain, ytest
def create_graph(hidden_nodes):
# Reset the graph
tf.reset_default_graph()
# Placeholders for input and output data
X = tf.placeholder(shape=Xtrain.shape, dtype=tf.float64, name='X')
y = tf.placeholder(shape=ytrain.shape, dtype=tf.float64, name='y')
# Variables for two group of weights between the three layers of the network
print(Xtrain.shape, ytrain.shape)
W1 = tf.Variable(np.random.rand(Xtrain.shape[1], hidden_nodes), dtype=tf.float64)
W2 = tf.Variable(np.random.rand(hidden_nodes, ytrain.shape[1]), dtype=tf.float64)
# Create the neural net graph
A1 = tf.sigmoid(tf.matmul(X, W1))
y_est = tf.sigmoid(tf.matmul(A1, W2))
# Define a loss function
deltas = tf.square(y_est - y)
loss = tf.reduce_sum(deltas)
# Define a train operation to minimize the loss
# optimizer = tf.train.GradientDescentOptimizer(0.005)
optimizer = tf.train.AdamOptimizer(0.001)
opt = optimizer.minimize(loss)
return opt, X, y, loss, W1, W2, y_est
def train_model(hidden_nodes, num_iters, opt, X, y, loss, W1, W2, y_est):
# Initialize variables and run session
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
losses = []
# Go through num_iters iterations
for i in range(num_iters):
sess.run(opt, feed_dict={X: Xtrain, y: ytrain})
local_loss = sess.run(loss, feed_dict={X: Xtrain.values, y: ytrain.values})
losses.append(local_loss)
weights1 = sess.run(W1)
weights2 = sess.run(W2)
y_est_np = sess.run(y_est, feed_dict={X: Xtrain.values, y: ytrain.values})
correct = [estimate.argmax(axis=0) == target.argmax(axis=0)
for estimate, target in zip(y_est_np, ytrain.values)]
acc = 100 * sum(correct) / len(correct)
if i % 10 == 0:
print('Epoch: %d, Accuracy: %.2f, Loss: %.2f' % (i, acc, local_loss))
print("loss (hidden nodes: %d, iterations: %d): %.2f" % (hidden_nodes, num_iters, losses[-1]))
sess.close()
return weights1, weights2
def test_accuracy(weights1, weights2):
X = tf.placeholder(shape=Xtest.shape, dtype=tf.float64, name='X')
y = tf.placeholder(shape=ytest.shape, dtype=tf.float64, name='y')
W1 = tf.Variable(weights1)
W2 = tf.Variable(weights2)
A1 = tf.sigmoid(tf.matmul(X, W1))
y_est = tf.sigmoid(tf.matmul(A1, W2))
# Calculate the predicted outputs
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
y_est_np = sess.run(y_est, feed_dict={X: Xtest, y: ytest})
# Calculate the prediction accuracy
correct = [estimate.argmax(axis=0) == target.argmax(axis=0)
for estimate, target in zip(y_est_np, ytest.values)]
accuracy = 100 * sum(correct) / len(correct)
print('final accuracy: %.2f%%' % accuracy)
def get_inputs_and_outputs(train, test, output_column_name):
Xtrain = train.drop(output_column_name, axis=1)
Xtest = test.drop(output_column_name, axis=1)
ytrain = pd.get_dummies(getattr(train, output_column_name))
ytest = pd.get_dummies(getattr(test, output_column_name))
return Xtrain, Xtest, ytrain, ytest
if __name__ == '__main__':
train, test = get_MY_data('output')
Xtrain, Xtest, ytrain, ytest = get_inputs_and_outputs(train, test, 'output')#get_data()
# Xtrain, Xtest, ytrain, ytest = get_data()
hidden_layers = 10
num_epochs = 500
opt, X, y, loss, W1, W2, y_est = create_graph(hidden_layers)
w1, w2 = train_model(hidden_layers, num_epochs, opt, X, y, loss, W1, W2, y_est)
# test_accuracy(w1, w2)
Here is a screenshot of what the training is printing out:
And this is a screenshot of the Pandas Dataframe that I am using for the input data (5 columns of floats):
And finally, here is the Pandas Dataframe that I am using for the expected outputs (1 column of either -1 or 1):
This is almost always a problem with the input data.
I would suggest inspecting in detail the values you are feeding into the model to make sure the model is receiving what you think it is.

nan on loss function tensorflow

I try to build a model that will identify by data and try to see the LOSS function
loss =tf.reduce_mean(-(y_ * tf.log(y)+(1- y_)* tf.log (1-y)))
But as of now I only get NAN on the prediction and printing NAN in the LOSS function
np_labels = np.array(labels)
np_labels = np_labels.reshape([np_labels.shape[0], 1])
features = 910
hidden_layer_nodes = 100
x = tf.placeholder(tf.float32, [None, features])
y_ = tf.placeholder(tf.float32, [None, 1])
W1 = tf.Variable(tf.truncated_normal([features,hidden_layer_nodes], stddev=0.1))
b1 = tf.Variable(tf.constant(0.1, shape=[hidden_layer_nodes]))
z1 = tf.add(tf.matmul(x,W1),b1)
a1 = tf.nn.relu(z1)
W2 = tf.Variable(tf.truncated_normal([hidden_layer_nodes,1], stddev=0.1))
b2 = tf.Variable(0.)
z2 = tf.matmul(a1,W2) + b2
y = 1 / (1.0 + tf.exp(-z2))
loss =tf.reduce_mean(-(y_ * tf.log(y)+(1- y_)* tf.log (1-y)))
update = tf.train.AdamOptimizer(0.01).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(0,50):
sess.run(update, feed_dict = {x:fvecs, y_:np_labels})
print(sess.run(loss, feed_dict={x: fvecs, y_: np_labels}))
# sess.run(update, feed_dict = {x:data_x, y_:data_y})
# print(sess.run(loss, feed_dict={x: data_x, y_: data_y}))
print('prediction: ', y.eval(session=sess, feed_dict = {x:[[493.9, 702.6, .....
i want to print the loss
Thanks
This is not a TensorFlow-Issue. This results from the very bad idea of implementing the loss-function yourself.
import tensorflow as tf
z2 = tf.random_normal([8, 10]) * 20
y_ = tf.random_uniform([8, 1], minval=0, maxval=10, dtype=tf.float32)
y = 1 / (1.0 + tf.exp(-z2))
loss = tf.reduce_mean(-(y_ * tf.log(y)+(1- y_)* tf.log (1-y)))
with tf.Session() as sess:
print sess.run(loss) # will always fail with high prob
Will give Inf just because of missing the log-sum-exp trick which then causes your implementation to fail due to numerical instabilities (a folklore example which produces overflows). Just run this code several times and you get either NaN or Inf.
Solution would be:
replace y = tf.sigmoid(-z2) by y = tf.identity(z2) to just get the untransformed logits
replace loss = .. by loss = tf.nn.sigmoid_cross_entropy_with_logits(...) to use the numerical stable way
See the docs of sigmoid_cross_entropy_with_logits which explicitly describes this issue.

TensorFlow MLP always returns 0 or 1 when float values between 0 and 1 are expected

I am a beginner in TensorFlow. I implemented a tensorFlow MLP network to predict values between 0 and - 1. Input values are float values between 0 and 1 and weights are random float between 0 and 1. But the output always returns 0 or 1 where I expect to return float values between 0 and 1. The code is given below.
import tensorflow as tf
import numpy as np
from sklearn import datasets`enter code here`
from sklearn.model_selection import train_test_split
from scipy.io import loadmat
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
RANDOM_SEED = 42
tf.set_random_seed(RANDOM_SEED)
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape, stddev=0.01)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
h = tf.nn.sigmoid(tf.matmul(X, w_1)) # The \sigma function
yhat = tf.matmul(h, w_2) # The \varphi function
return yhat
def load_data():
dw = loadmat('dw.mat')
dv = loadmat('dv.mat')
dw_2 = loadmat('test_dw.mat')
dv_2 = loadmat('test_dv.mat')
train_dw = dw['dw']
train_dv = dv['dv']
test_dw = dw_2['test_dw']
test_dv = dv_2['test_dv']
scaler = MinMaxScaler()
train_dw = scaler.fit_transform(train_dw)
train_dv = scaler.fit_transform(train_dv)
test_dw = scaler.fit_transform(test_dw)
test_dv = scaler.fit_transform(test_dv)
rows = len(train_dw)
# train_input = dv(t),dw(t),dw(-1),dw(t-2),dw_pred_neighbor1(t),dw_pred_neighbor2(t)
train_input = np.column_stack((np.ones((rows-3)),train_dv[2:(rows-1),1],train_dw[2:(rows-1),1],train_dw[1:(rows-2),1],train_dw[0:(rows-3),1],train_dw[2:(rows-1),2],train_dw[2:(rows-1),4]))
# train target = dw(t+1)
train_target = np.column_stack((np.ones((rows-3)),train_dw[3:rows,1]))
test_rows = len(test_dw)
# test_input = dv(t),dw(t),dw(-1),dw(t-2),dw_pred_neighbor1(t),dw_pred_neighbor2(t)
test_input = np.column_stack((np.ones((test_rows-3)),test_dv[2:(test_rows-1),1],test_dw[2:(test_rows-1),1],test_dw[1:(test_rows-2),1],test_dw[0:(test_rows-3),1],test_dw[2:(test_rows-1),2],test_dw[2:(test_rows-1),4]))
# test target = dw(t+1)
test_target = np.column_stack((np.ones((test_rows-3)),test_dw[3:test_rows,1]))
return train_input, test_input, train_target, test_target
def main():
train_X, test_X, train_y, test_y = load_data()
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes
h_size = 10 # Number of hidden nodes
y_size = train_y.shape[1] # Number of outputs
# Symbols
X = tf.placeholder(dtype = tf.float32, shape=[None, x_size])
y = tf.placeholder(dtype = tf.float32, shape=[None, y_size])
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
# Forward propagation
yhat = forwardprop(X, w_1, w_2)
predict = tf.argmax(yhat, axis=1)
# Backward propagation
#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
cost = tf.losses.mean_squared_error(y,yhat)
updates = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
# Run SGD
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
cost_history = np.empty(shape=[1],dtype=float)
for epoch in range(1000):
#Train with each example
for i in range(len(train_X)):
sess.run(updates, feed_dict={X: train_X, y: train_y})
pred = sess.run(predict,feed_dict={X: train_X})
print(pred)
plt.plot(range(len(pred)),pred)
plt.show()
cost_history = np.append(cost_history, sess.run(cost, feed_dict={X: train_X, y: train_y}))
train_accuracy = np.mean(np.argmax(train_y, axis=0) ==
sess.run(predict, feed_dict={X: train_X, y: train_y}))
test_accuracy = np.mean(np.argmax(test_y, axis=0) ==
sess.run(predict, feed_dict={X: test_X, y: test_y}))
print("Epoch = %d, train accuracy = %.2f%%, test accuracy = %.2f%%"
% (epoch + 1, 100. * train_accuracy, 100. * test_accuracy))
plt.plot(range(len(cost_history)),cost_history)
plt.axis([0,epoch,0,np.max(cost_history)])
plt.show()
sess.close()
if __name__ == '__main__':
main()
tf.argmax returns the index in the vector which has the max value.
If you want to find the exact class probabilities, you can get that using tf.max

Categories

Resources