TensorFlow confusing output - python

What I am trying to do is predicting the credit risk having the dataset with 20 inputs and 1 binary output.
As long as it is classification problem (two classes: positive and negative credit risk) I decided to take TensorFlow Classification code example and modified it to work with csv dataset.
So at the end I am expecting 1 or 0 output. However, I get numbers which are not even close. For example: [[-561.7623291]]
PS: Resulting column is the 1st.
Here is my Python TensorFlow code:
from __future__ import print_function
import tensorflow as tf
import csv
import numpy as np
# Import MNIST data
#from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
#INPUT
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
col9 = []
col10 = []
col11 = []
col12 = []
col13 = []
col14 = []
col15 = []
col16 = []
col17 = []
col18 = []
col19 = []
col20 = []
col21 = []
col0 = []
mycsv = csv.reader(open("german_credit_for_mp.csv"))
for row in mycsv:
col1.append(row[0])
col2.append(row[1])
col3.append(row[2])
col4.append(row[3])
col5.append(row[4])
col6.append(row[5])
col7.append(row[6])
col8.append(row[7])
col9.append(row[8])
col10.append(row[9])
col11.append(row[10])
col12.append(row[11])
col13.append(row[12])
col14.append(row[13])
col15.append(row[14])
col16.append(row[15])
col17.append(row[16])
col18.append(row[17])
col19.append(row[18])
col20.append(row[19])
col21.append(row[20])
col0.append(0)
#INPUT
# Parameters
learning_rate = 0.1
training_epochs = 100
batch_size = 100
display_step = 10
# Network Parameters
n_hidden_1 = 2 # 1st layer number of features
n_hidden_2 = 2 # 2nd layer number of features
n_input = 20
n_classes = 1
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = 1000#int(mnist.train.num_examples/batch_size)
# Loop over all batches
for xr in range(1000):
#batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
feed_y = np.reshape(([col1[xr]]),(-1,1))
feed_x = np.reshape(([col2[xr],col3[xr],col4[xr],col5[xr],col6[xr],col7[xr],col8[xr],col9[xr],col10[xr],col11[xr],col12[xr],col13[xr],col14[xr],col15[xr],col16[xr],col17[xr],col18[xr],col19[xr],col20[xr],col21[xr]]),(-1,20))
_, c = sess.run([optimizer, cost], feed_dict={x: feed_x, y: feed_y})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
print("Eval:", pred.eval({x: np.reshape([1,30,2,2,6350,5,5,4,3,1,4,2,31,3,2,1,3,1,1,1],(-1,20))}))
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Eval:", pred.eval({x: np.reshape([1,30,2,2,6350,5,5,4,3,1,4,2,31,3,2,1,3,1,1,1],(-1,20))}))
#32,2,46,38,7,0,0,33,0,20,53,3,0,0

For binary classification, you can use a scheme like the following (I haven't let this run):
...
output_activation = tf.matmul(layer_2, weights['out']) + biases['out'] # this is what is often called the "logit"
prediction = tf.round(tf.nn.sigmoid(output_activation))
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
output_activation, y))
train = optimizer.minimize(loss)
sigmoid_cross_entropy_with_logits does a lot of stuff at once: it is equivalent to applying sigmoid to the input (which then results in the normal result of a classification network), and then calculating the cross-entropy between this and the given targets, but does so in a more efficient way.
The accuracy should then also be calculated on prediction, of course.

Related

Multilayer perceptron with sigmoid activation produces straight line on sin(2x) regression

I'm trying to approximate noisy data from the sin(2x) function using a multilayer perceptron:
# Get data
datasets = gen_datasets()
# Add noise
datasets["ysin_train"] = add_noise(datasets["ysin_train"])
datasets["ysin_test"] = add_noise(datasets["ysin_test"])
# Extract wanted data
patterns_train = datasets["x_train"]
targets_train = datasets["ysin_train"]
patterns_test = datasets["x_test"]
targets_test = datasets["ysin_test"]
# Reshape to fit model
patterns_train = patterns_train.reshape(62, 1)
targets_train = targets_train.reshape(62, 1)
patterns_test = patterns_test.reshape(62, 1)
targets_test = targets_test.reshape(62, 1)
# Parameters
learning_rate = 0.001
training_epochs = 10000
batch_size = patterns_train.shape[0]
display_step = 1
# Network Parameters
n_hidden_1 = 2
n_hidden_2 = 2
n_input = 1
n_classes = 1
# tf Graph input
X = tf.placeholder("float", [None, n_input])
Y = tf.placeholder("float", [None, n_classes])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Create model
def multilayer_perceptron(x):
# Hidden fully connected layer with 2 neurons
layer_1 = tf.sigmoid(tf.add(tf.matmul(x, weights['h1']), biases['b1']))
# Hidden fully connected layer with 2 neurons
layer_2 = tf.sigmoid(tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']))
# Output fully connected layer
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Construct model
logits = multilayer_perceptron(X)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.losses.absolute_difference(labels = Y, predictions = logits, reduction=tf.losses.Reduction.NONE))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Training Cycle
for epoch in range(training_epochs):
_ = sess.run(train_op, feed_dict={X: patterns_train,
Y: targets_train})
c = sess.run(loss_op, feed_dict={X: patterns_test,
Y: targets_test})
if epoch % display_step == 0:
print("Epoch: {0: 4} cost={1:9}".format(epoch+1, c))
print("Optimization finished!")
outputs = sess.run(logits, feed_dict={X: patterns_test})
print("outputs: {0}".format(outputs.T))
plt.plot(patterns_test, outputs, "r.", label="outputs")
plt.plot(patterns_test, targets_test, "b.", label="targets")
plt.legend()
plt.show()
When I plot this at the end, I get a straight line, as if I have a linear network. Take a look at the plot:
This is a correct minimization of the error for a linear network. But I shouldn't have a linear betwork because I'm using the sigmoid function in my multilayer_perceptron() function! Why is my network behaving like this?
The default value of stddev=1.0 in tf.random_normal, which you use for weight & bias initialization, is huge. Try an explicit value of stddev=0.01 for the weights; as for the biases, common practice is to initialize them to zero.
As an initial approach, I would also try a higher learning_rate of 0.01 (or maybe not - see answer in a related question here)

Inferior performance of Tensorflow compared to sklearn

I'm comparing the performance of Tensorflow with sklearn on two datasets:
A toy dataset in sklearn
MNIST dataset
Here is my code (Python):
from __future__ import print_function
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
import tensorflow as tf
from sklearn.datasets import load_digits
import numpy as np
# digits = load_digits()
# data = digits.data
# labels = digits.target
# convert to binary labels
# y = np.zeros((labels.shape[0],10))
# y[np.arange(labels.shape[0]),labels] = 1
x_train = mnist.train.images
y_train = mnist.train.labels
x_test = mnist.test.images
y_test = mnist.test.labels
n_train = mnist.train.images.shape[0]
# import pdb;pdb.set_trace()
# Parameters
learning_rate = 1e-3
lambda_val = 1e-5
training_epochs = 30
batch_size = 200
display_step = 1
# Network Parameters
n_hidden_1 = 300 # 1st layer number of neurons
n_input = x_train.shape[1] # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, n_input])
Y = tf.placeholder("float", [None, n_classes])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Create model
def multilayer_perceptron(x):
# Hidden fully connected layer with 256 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
# Activation
layer_1_relu = tf.nn.relu(layer_1)
# Output fully connected layer with a neuron for each class
out_layer = tf.matmul(layer_1_relu, weights['out']) + biases['out']
return out_layer
# Construct model
logits = multilayer_perceptron(X)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y)) + lambda_val*tf.nn.l2_loss(weights['h1']) + lambda_val*tf.nn.l2_loss(weights['out'])
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Test model
pred = tf.nn.softmax(logits) # Apply softmax to logits
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_train/batch_size)
# Loop over all batches
ptr = 0
for i in range(total_batch):
next_ptr = ptr + batch_size
if next_ptr > len(x_train):
next_ptr = len(x_train)
batch_x, batch_y = x_train[ptr:next_ptr],y_train[ptr:next_ptr]
ptr += batch_size
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([train_op, loss_op], feed_dict={X: batch_x,
Y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost={:.9f}".format(avg_cost))
print("Optimization Finished!")
print("Accuracy on training set: ", accuracy.eval({X:x_train,Y:y_train}))
print("Accuracy on testing set:", accuracy.eval({X: x_test, Y: y_test}))
print("Experimenting sklearn...")
# now experiment with sklearn
from sklearn.datasets import load_digits
import numpy as np
from sklearn.neural_network import MLPClassifier
import time
# use MLP
t_start = time.time()
print('fitting MLP...')
clf = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(300,),max_iter=training_epochs)
clf.fit(x_train,y_train)
print('fitted MLP in {:.2f} seconds'.format(time.time() - t_start))
print('predicting...')
labels_predicted = clf.predict(x_test)
print('accuracy: {:.2f} %'.format(np.mean(np.argmax(y_test,axis=1) == np.argmax(labels_predicted,axis=1)) * 100))
The code is adapted from a github repository. For this testing, I'm using a traditional neural network (MLP) with only one hidden layer of size 300.
Following is the result for the both datasets:
sklearn digits: ~83% (tensorflow), ~90% (sklearn)
MNIST: ~94% (tensorflow), ~97% (sklearn)
I'm using the same model for both libraries. All the parameters (number of hidden layers, number of hidden units, learning_rate, l2 regularization constant, number of training epochs, batch size) and optimization algorithms are the same (Adam optimizer, beta parameters for Adam optimizer, no momentum, etc).
I wonder if sklearn has done a magic implementation over tensorflow? Can anyone help answer?
Thank you very much.

Neural network for regression not learning in tensorflow

I've modified tensor flow example to fit on my data, given here: data
But my neural network is not learning at all, I tried to use different no. of hidden layers, learning rate and optimization functions, but it didn't help.My code is given below:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import learn
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn import datasets, linear_model
from sklearn import cross_validation
from sklearn import preprocessing
import numpy as np
filename_queue = tf.train.string_input_producer(["file0.csv"])
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
# Default values, in case of empty columns. Also specifies the type of the
# decoded result.
record_defaults = [[0], [0], [0], [0]]
col1, col2, col3, col4 = tf.decode_csv(
value, record_defaults=record_defaults)
features = tf.stack([col1, col2, col3])
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
x = np.zeros(shape=(1813,3))
y = np.zeros(shape=(1813))
for i in range(1813):
# Retrieve a single instance:
x1, y1 = sess.run([features, col4])
x[i] = x1
y[i] = y1
coord.request_stop()
coord.join(threads)
#standard_scaler = preprocessing.StandardScaler()
#x = standard_scaler.fit_transform(x)
# Split in test and train data
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(x, y, test_size=0.2)
total_len = X_train.shape[0]
# Parameters
learning_rate = 0.001
training_epochs = 500
batch_size = 5
display_step = 1
# Network Parameters
n_hidden_1 = 50
n_input = X_train.shape[1]
n_classes = 1
# tf Graph input
x = tf.placeholder("float", [None, 3])
y = tf.placeholder("float", [None])
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Output layer with linear activation
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes], 0, 0.1))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_classes], 0, 0.1))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
#reshape(pred, [-1])
tf.shape(pred)
tf.shape(y)
print("Prediction matrix:", pred)
print("Output matrix:", y)
# Define loss and optimizer
cost = tf.reduce_mean(tf.square(pred-y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(total_len/batch_size)
print(total_batch)
# Loop over all batches
for i in range(total_batch-1):
batch_x = X_train[i*batch_size:(i+1)*batch_size]
batch_y = Y_train[i*batch_size:(i+1)*batch_size]
# Run optimization op (backprop) and cost op (to get loss value)
_, c, p = sess.run([optimizer, cost, pred], feed_dict={x: batch_x,
y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# sample prediction
label_value = batch_y
estimate = p
err = label_value-estimate
print ("num batch:", total_batch)
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print ("[*]----------------------------")
for i in xrange(5):
print ("label value:", label_value[i], \
"estimated value:", estimate[i])
print ("[*]============================")
print ("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred), tf.argmax(y))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Accuracy:", accuracy.eval({x: X_test, y: Y_test}))
and result looks like that: (label value = expected result)
result

Neural network with a CSV file in TensorFlow

I am using two tutorials to figure out how to take a CVS file of format:
feature1,feature2....feature20,label
feature1,feature2....feature20,label
...
and train a neural network on it. What I do in the code below is read in the CVS file and group 100 lines at a time into batches: x_batch and y_batch. Next, i try to have the NN learn in batches. However, I get the following error:
"ValueError: Cannot feed value of shape (99,) for Tensor 'Placeholder_1:0', which has shape '(?, 4)'"
I am wondering what i am doing wrong and what another approach might be.
import tensorflow as tf
filename_queue = tf.train.string_input_producer(["VOL_TRAIN.csv"])
line_reader = tf.TextLineReader(skip_header_lines=1)
_, csv_row = line_reader.read(filename_queue)
# Type information and column names based on the decoded CSV.
[[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[""]]
record_defaults = [[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0.0],[0]]
in1,in2,in3,in4,in5,in6,in7,in8,in9,in10,in11,in12,in13,in14,in15,in16,in17,in18,in19,in20,out = \
tf.decode_csv(csv_row, record_defaults=record_defaults)
# Turn the features back into a tensor.
features = tf.pack([in1,in2,in3,in4,in5,in6,in7,in8,in9,in10,in11,in12,in13,in14,in15,in16,in17,in18,in19,in20])
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
num_examples= 33500
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 20 # MNIST data input (img shape: 28*28)
n_classes = 4 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
#tf.initialize_all_variables().run()
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x = []
batch_y = []
for iteration in range(1, batch_size):
example, label = sess.run([features, out])
batch_x.append(example)
batch_y.append(label)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print ("Optimization Finished!")
coord.request_stop()
coord.join(threads)
Your placeholder y specifies you input an array of unknown length, with arrays of length "n_classes" (which is 4). In your feed_dict you give the array batch_y, which is an array of length 99 (your batch_size) with numbers.
What you want to do is change your batch_y variable to have one-hot vectors as input. Please let me know if this works!

nan cost in tensorflow training perceptron

I am trying to train a single layer perceptron (basing my code on this) on the following data file in tensor flow:
1,1,0.05,-1.05
1,1,0.1,-1.1
....
where the last column is the label (function of 3 parameters) and the first three columns are the function argument. The code that reads the data and trains the model (I simplify it for readability):
import tensorflow as tf
... # some basics to read the data
example, label = read_file_format(filename_queue)
... # model construction and parameter setting
n_hidden_1 = 4 # 1st layer number of features
n_input = 3
n_output = 1
...
# calls a function which produces a prediction
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
_, c = sess.run([optimizer, cost], feed_dict={x: example.reshape(1,3), y: label.reshape(-1,1)})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "Cost:",c)
but when I run it, something seems to be very wrong:
('Epoch:', '0001', 'Cost:', nan)
('Epoch:', '0002', 'Cost:', nan)
....
('Epoch:', '0015', 'Cost:', nan)
This is the complete code for the multilaye_perceptron function, etc:
# Parameters
learning_rate = 0.001
training_epochs = 15
display_step = 1
# Network Parameters
n_hidden_1 = 4 # 1st layer number of features
n_input = 3
n_output = 1
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_output])
# Create model
def multilayer_perceptron(x, weights, biases):
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Output layer with linear activation
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'out': tf.Variable(tf.random_normal([n_hidden_1, n_output]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'out': tf.Variable(tf.random_normal([n_output]))
}
Is this one example at a time? I would go batches and increase batch size to 128 or similar, as long as you are getting nans.
When I am getting nans it is usually either of the three:
- batch size too small (in your case then just 1)
- log(0) somewhere
- learning rate too high and uncapped gradients

Categories

Resources