Got very poor accuracy rate whenever I train any network - python

I am trying to train a network on Alabone dataset downloaded from "UCI machine learning repository" site. The dataset is look like:
M,0.455,0.365,0.095,0.514,0.2245,0.101,0.15,15
M,0.35,0.265,0.09,0.2255,0.0995,0.0485,0.07,7
F,0.53,0.42,0.135,0.677,0.2565,0.1415,0.21,9
M,0.44,0.365,0.125,0.516,0.2155,0.114,0.155,10
I,0.33,0.255,0.08,0.205,0.0895,0.0395,0.055,7
I have given the exact same column names as they have mentioned. But when I try to apply a neural network to train it, it always gives very poor accuracy rate about 50% just.
I am new in the field so I don't know if I am using a wrong Activation function?, or executing wrong code?, or didn't have preprocess the data well?.
So please help me to find the mistake I have done.
Here's my whole code:
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
def read_dataset():
df = pd.read_csv("abalone.data.txt")
X = np.array(df.drop("Sex", 1))
y = np.array(df["Sex"])
encoder = LabelEncoder()
encoder.fit(y)
y = encoder.transform(y)
Y = one_hot_encode(y)
# print(X.shape)
return X, Y
def one_hot_encode(label):
n_label = len(label)
n_unique_label = len(np.unique(label))
one_hot_encode = np.zeros((n_label, n_unique_label))
one_hot_encode[np.arange(n_label), label] = 1
return one_hot_encode
X, y = read_dataset()
train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.2)
n_nodes_1 = 60
n_nodes_2 = 60
n_nodes_3 = 60
n_nodes_4 = 60
model_path = "C:\\Users\Kashif\Projects\DeepLearning-Tensorflow\Learnings\AlaboneDetection\AlaboneModel"
n_class = 3
input_size = X.shape[1]
x = tf.placeholder(tf.float32, [None, input_size])
y = tf.placeholder(tf.float32, [None, n_class])
def neural_network(x):
hidden_1 = {"weights": tf.Variable(tf.random_normal([input_size, n_nodes_1])),
"biases": tf.Variable(tf.random_normal([n_nodes_1]))}
hidden_2 = {"weights": tf.Variable(tf.random_normal([n_nodes_1, n_nodes_2])),
"biases": tf.Variable(tf.random_normal([n_nodes_2]))}
hidden_3 = {"weights": tf.Variable(tf.random_normal([n_nodes_2, n_nodes_3])),
"biases": tf.Variable(tf.random_normal([n_nodes_3]))}
hidden_4 = {"weights": tf.Variable(tf.random_normal([n_nodes_3, n_nodes_4])),
"biases": tf.Variable(tf.random_normal([n_nodes_4]))}
out_layer = {"weights": tf.Variable(tf.random_normal([n_nodes_4, n_class])),
"biases": tf.Variable(tf.random_normal([n_class]))}
# (input * weights) + biases
layer_1 = tf.add(tf.matmul(x, hidden_1["weights"]), hidden_1["biases"])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, hidden_2["weights"]), hidden_2["biases"])
layer_2 = tf.nn.relu(layer_2)
layer_3 = tf.add(tf.matmul(layer_2, hidden_3["weights"]), hidden_3["biases"])
layer_3 = tf.nn.relu(layer_3)
layer_4 = tf.add(tf.matmul(layer_3, hidden_4["weights"]), hidden_4["biases"])
layer_4 = tf.nn.relu(layer_4)
output = tf.matmul(layer_4, out_layer["weights"]) + out_layer["biases"]
return output
def train_neural_network(x):
prediction = neural_network(x)
cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost_function)
init = tf.global_variables_initializer()
loss_trace = []
accuracy_trace = []
#saver = tf.train.Saver()
epochs = 1000
with tf.Session() as sess:
sess.run(init)
for i in range(epochs):
sess.run(optimizer, feed_dict={x: train_X, y: train_y})
loss = sess.run(cost_function, feed_dict={x: train_X, y: train_y})
accuracy = np.mean(np.argmax(sess.run(prediction,feed_dict={x:train_X,y:train_y}),axis=1) == np.argmax(train_y,axis=1))
loss_trace.append(loss)
accuracy_trace.append(accuracy)
print('Epoch:', (i + 1), 'loss:', loss, 'accuracy:', accuracy)
#saver.save(sess, model_path)
print('Final training result:', 'loss:', loss, 'accuracy:', accuracy)
loss_test = sess.run(cost_function, feed_dict={x: test_X, y: test_y})
test_pred = np.argmax(sess.run(prediction, feed_dict={x: test_X, y: test_y}), axis=1)
accuracy_test = np.mean(test_pred == np.argmax(test_y, axis=1))
print('Results on test dataset:', 'loss:', loss_test, 'accuracy:', accuracy_test)
train_neural_network(x)
And here's my last result of final three epochs and final accuracy result.
Epoch: 997 loss: 24.625622 accuracy: 0.518407662376534
Epoch: 998 loss: 22.168245 accuracy: 0.48757856929063154
Epoch: 999 loss: 21.896841 accuracy: 0.5001496557916791
Epoch: 1000 loss: 22.28085 accuracy: 0.4968572283747381
Final training result: loss: 22.28085 accuracy: 0.4968572283747381
Results on test dataset: loss: 23.206755 accuracy: 0.4688995215311005

I am new to tensorflow. Maybe you can try two things:
1.decreasing learning rate.such as 0.0001. because your loss is oscillation
2.increase the number of the layer. because your model maybe under-fitting.
If above ways can't solve your problem, you can print your data and check whether train_X and train_y is correct

Related

Tensorflow NN accuracy/predictions not changing

Attempting to create a NN with Tensorflow that predicts whether or not an NBA shot was made but the model seems to just predict that the shot misses every time and the accuracy does not change from epoch to epoch.
I'm not sure whether or not it is due to the data itself or the gradient descent function. It worked with the iris dataset and cannot figure out what changes when the data changed.
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
RANDOM_SEED = 42
tf.set_random_seed(RANDOM_SEED)
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape, stddev=0.1)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
h = tf.nn.sigmoid(tf.matmul(X, w_1)) # The \sigma function
yhat = tf.matmul(h, w_2) # The \varphi function
return yhat
def get_iris_data():
""" Read the iris data set and split them into training and test sets """
my_data = pd.read_csv("shot_logs.csv").sample(2000)
my_data["GAME_CLOCK"] = pd.to_numeric(my_data["GAME_CLOCK"].str.split(":").str[0]) * 60 + pd.to_numeric(
my_data["GAME_CLOCK"].str.split(":").str[1])
my_data["LOCATION"] = my_data["LOCATION"].map({
"A":0,
"H":1
})
data = my_data[["LOCATION", "SHOT_NUMBER", "PERIOD", "GAME_CLOCK", "SHOT_CLOCK", "DRIBBLES", "TOUCH_TIME",
"SHOT_DIST", "CLOSE_DEF_DIST"]]
target = my_data["FGM"]
# Prepend the column of 1s for bias
N, M = data.shape
all_X = np.ones((N, M + 1))
all_X[:, 1:] = data
# Convert into one-hot vectors
num_labels = len(np.unique(target))
print(num_labels)
print(np.eye(num_labels))
all_Y = np.eye(num_labels)[target] # One liner trick!
return train_test_split(all_X, all_Y, test_size=0.33, random_state=RANDOM_SEED)
def main():
train_X, test_X, train_y, test_y = get_iris_data()
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias
h_size = 256 # Number of hidden nodes
y_size = train_y.shape[1] # Number of outcomes (3 iris flowers)
print(y_size)
# Symbols
X = tf.placeholder("float", shape=[None, x_size])
y = tf.placeholder("float", shape=[None, y_size])
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
# Forward propagation
yhat = forwardprop(X, w_1, w_2)
predict = tf.argmax(yhat, axis=1)
# Backward propagation
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
updates = tf.train.GradientDescentOptimizer(0.1).minimize(cost)
# Run SGD
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(100):
# Train with each example
for i in range(len(train_X)):
sess.run(updates, feed_dict={X: train_X[i: i + 1], y: train_y[i: i + 1]})
train_accuracy = np.mean(np.argmax(train_y, axis=1) ==
sess.run(predict, feed_dict={X: train_X, y: train_y}))
test_accuracy = np.mean(np.argmax(test_y, axis=1) ==
sess.run(predict, feed_dict={X: test_X, y: test_y}))
print("Epoch = %d, train accuracy = %.2f%%, test accuracy = %.2f%%"
% (epoch + 1, 100. * train_accuracy, 100. * test_accuracy))
sess.close()
if __name__ == '__main__':
main()
Here is what the output is looking like:
Epoch = 1, train accuracy = 54.85%, test accuracy = 54.55%
Epoch = 2, train accuracy = 54.85%, test accuracy = 54.55%
Epoch = 3, train accuracy = 54.85%, test accuracy = 54.55%
Epoch = 4, train accuracy = 54.85%, test accuracy = 54.55%
Epoch = 5, train accuracy = 54.85%, test accuracy = 54.55%
Epoch = 6, train accuracy = 54.85%, test accuracy = 54.55%
Epoch = 7, train accuracy = 54.85%, test accuracy = 54.55%
Epoch = 8, train accuracy = 54.85%, test accuracy = 54.55%
Epoch = 9, train accuracy = 54.85%, test accuracy = 54.55%
Epoch = 10, train accuracy = 54.85%, test accuracy = 54.55%
Epoch = 11, train accuracy = 54.85%, test accuracy = 54.55%

Suspiciously high accuracy for binary classification problem

Based on the layer function
def neuron_layer(X, n_neurons, name, activation_fn=None):
with tf.name_scope(name):
n_inputs = int(X.get_shape()[1])
stddev = 2 / np.sqrt(n_inputs)
init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev)
W = tf.Variable(init, name="kernel")
b = tf.Variable(tf.zeros([n_neurons]), name="bias")
Z = tf.matmul(X, W) + b
if activation_fn is not None:
return activation_fn(Z)
else:
return Z
The following network for a binary classification problem is constructed:
n_hidden1 = 100
n_hidden2 = 120
n_outputs = 1 # single value prediction
n_inputs = X_test.shape[1]
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.float32, shape=(None), name="y")
layer1 = neuron_layer(X, n_hidden1, "layer1", activation_fn=tf.nn.relu)
layer2 = neuron_layer(layer1, n_hidden2, "layer2", activation_fn=tf.nn.relu)
prediction = neuron_layer(layer2, n_outputs, "output",activation_fn=tf.nn.sigmoid)
cost = tf.losses.log_loss(y,prediction)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
The training routine
learning_rate = 0.01
n_epochs = 20
batch_size = 60
num_rec = X_train.shape[0]
n_batches = int(np.ceil(num_rec / batch_size))
acc_test = 0. # assign the result of accuracy testing to this variable
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
for batch_index in range(n_batches):
X_batch,y_batch = random_batch(X_train,Y_train,batch_size)
_,opt = sess.run([optimizer,cost], feed_dict={X: X_batch, y: y_batch})
loss, acc = sess.run([cost, accuracy], feed_dict={X: X_batch,y: y_batch})
print("epoch " + str(epoch) + ", Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
print("Optimization Finished!")
_, acc_test = sess.run([cost, accuracy], feed_dict={X:X_test,y:Y_test})
generates the following output:
epoch 0, Loss= -6.756775, Training Accuracy= 1.00000 Optimization
Finished!
[. . .]
epoch 19, Loss=
-6.769919, Training Accuracy= 1.00000 Optimization Finished!
and the the accuracy on the test set acc_test is 1.0.
The batches are generated by
def random_batch(X_train, y_train, batch_size):
np.random.seed(42)
rnd_indices = np.random.randint(0, len(X_train), batch_size)
X_batch = X_train[rnd_indices]
y_batch = y_train[rnd_indices]
return X_batch, y_batch
the input shapes are
print(X_batch.shape,y_batch.shape,X_test.shape,Y_test.shape)
>(60, 3) (60, 1) (2500, 3) (2500, 1)
Obviously, the accuracy on the training and test tests can't be correct. Where could be the problem in the network, training or evaluation procedure?
The model is overfitting due to which you are getting abnormally high accuracy at initial epochs, to avoid overfitting you can use regularization methods or increase dataset by augmenting. Use ImageDataGenerator for augmentation, it will provide images to model in batches. Try setting dropout to 0.2. Enable early stopping in callbacks, it will terminate training when model performance degrades. Try playing with patience in early stopping.

tf.metrics.accuracy and hand-written accuracy function give different results

I am trying to see how tf.metrics.accuracy works. I want to compare batch accuracy results of the function given below
with tf.name_scope('Accuracy1'):
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(y, 1))
accuracy1 = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="accuracy")
with
with tf.name_scope('Accuracy2'):
accuracy2, accuracy_op = tf.metrics.accuracy(labels=tf.argmax(y, 1), predictions=tf.argmax(predictions, 1))
Minimal working example is provided below:
import numpy as np
import pandas as pd
import tensorflow as tf
import math
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
num_steps=28
num_inputs = 28
num_classes = 10
num_neurons = 128
num_layers = 3
batch_size = 500
graph = tf.Graph()
with graph.as_default():
with tf.name_scope("graph_inputs"):
X = tf.placeholder(tf.float32, [None, num_steps, num_inputs], name='input_placeholder')
y = tf.placeholder(tf.float32, [None, num_classes], name='labels_placeholder')
output_keep_prob = tf.placeholder_with_default(1.0, shape=(), name ="output_dropout")
def build_lstm_cell(num_neurons, output_keep_prob):
"""Returns a dropout-wrapped LSTM-cell.
See https://stackoverflow.com/a/44882273/2628369 for why this local function is necessary.
Returns:
tf.contrib.rnn.DropoutWrapper: The dropout-wrapped LSTM cell.
"""
initializer = tf.contrib.layers.xavier_initializer()
lstm_cell = tf.contrib.rnn.LSTMCell(num_units=num_neurons, initializer=initializer, forget_bias=1.0, state_is_tuple=True, name='LSTM_cell')
lstm_cell_drop = tf.contrib.rnn.DropoutWrapper(lstm_cell, output_keep_prob=output_keep_prob)
return lstm_cell_drop
with tf.name_scope("LSTM"):
with tf.name_scope("Cell"):
multi_layer_cell = tf.contrib.rnn.MultiRNNCell([build_lstm_cell(num_neurons, output_keep_prob) for _ in range(num_layers)], state_is_tuple=True)
with tf.name_scope("Model"):
outputs, states = tf.nn.dynamic_rnn(cell=multi_layer_cell, inputs=X, swap_memory=False, time_major = False, dtype=tf.float32)#[Batch_size, time_steps, num_neurons]
with tf.name_scope("Graph_Outputs"):
outputs = tf.transpose(outputs, [1, 0, 2]) # [num_timesteps, batch_size, num_neurons]
outputs = tf.gather(outputs, int(outputs.get_shape()[0]) - 1) # [batch_size, num_neurons]
with tf.variable_scope('Softmax'):
logits = tf.layers.dense(inputs = outputs, units = num_classes, name="logits") #[Batch_size, num_classes]
with tf.name_scope('Predictions'):
predictions = tf.nn.softmax(logits, name="predictions") #[Batch_size, num_classes]
with tf.name_scope('Accuracy1'):
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(y, 1))
accuracy1 = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="accuracy")
with tf.name_scope('Accuracy2'):
accuracy2, accuracy_op = tf.metrics.accuracy(labels=tf.argmax(y, 1), predictions=tf.argmax(predictions, 1))
with tf.name_scope('Loss'):
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope('Train'):
optimizer= tf.train.AdamOptimizer(learning_rate=0.0001)
trainer=optimizer.minimize(loss, name="training_op")
with tf.Session(graph = graph) as sess:
tf.global_variables_initializer().run()
total_batch = mnist.train.num_examples // batch_size
for batch in range(total_batch):
tf.local_variables_initializer().run()
xBatch, yBatch = mnist.train.next_batch(batch_size)
xBatch = xBatch.reshape((batch_size, num_steps, num_inputs))
sess.run(trainer, feed_dict={X: xBatch, y: yBatch, output_keep_prob: 0.5})
miniBatchAccuracy1 = sess.run(accuracy1, feed_dict={X: xBatch, y: yBatch, output_keep_prob: 0.5})
print('[hand-written] Batch {} accuracy: {}'.format(batch, miniBatchAccuracy1))
accuracy_op_val = sess.run(accuracy_op, feed_dict={X: xBatch, y: yBatch, output_keep_prob: 0.5})
miniBatchAccuracy2 = sess.run(accuracy2)
print("[tf.metrics.accuracy] Batch {} accuracy: {}".format(batch, miniBatchAccuracy2))
sess.close()
I print the accuracy values of each batches using these two approaches and they are different. Should not the results be the same?
[hand-written] Batch 0 accuracy: 0.09600000083446503
[tf.metrics.accuracy] Batch 0 accuracy: 0.09399999678134918
[hand-written] Batch 1 accuracy: 0.1120000034570694
[tf.metrics.accuracy] Batch 1 accuracy: 0.07800000160932541
[hand-written] Batch 2 accuracy: 0.10199999809265137
[tf.metrics.accuracy] Batch 2 accuracy: 0.09600000083446503
[hand-written] Batch 3 accuracy: 0.12999999523162842
[tf.metrics.accuracy] Batch 3 accuracy: 0.12800000607967377
[hand-written] Batch 4 accuracy: 0.1379999965429306
[tf.metrics.accuracy] Batch 4 accuracy: 0.10199999809265137
[hand-written] Batch 5 accuracy: 0.16200000047683716
[tf.metrics.accuracy] Batch 5 accuracy: 0.1340000033378601
[hand-written] Batch 6 accuracy: 0.1340000033378601
[tf.metrics.accuracy] Batch 6 accuracy: 0.12600000202655792
[hand-written] Batch 7 accuracy: 0.12999999523162842
[tf.metrics.accuracy] Batch 7 accuracy: 0.16200000047683716
...
...
...
...
When measuring the accuracy for both cases, you are passing the dropout rate as 0.5. This is the reason its giving two different values. Set the dropout value at 1.0 and you should see similar values for both cases.

accuracy function when labels are not one-hot encoded in NN tensorflow

i am doing binary classification in NN tensorflow without encoding the labels.. everything is fine except the accuracy function always return 1.0
Output is
Epoch 0 completed out of 10 loss: 5536.991802096367
Epoch 1 completed out of 10 loss: 1777.5951525866985
Epoch 2 completed out of 10 loss: 1442.1777643710375
Epoch 3 completed out of 10 loss: 1315.4084038436413
.
.
.
Epoch 9 completed out of 10 loss: 968.3492169082165
Accuracy: 1.0
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.dates as mdates
import numpy as np
from matplotlib import style
import csv,math,time
from sklearn import preprocessing, cross_validation
import tensorflow as tf
df2=pd.read_csv("Book.csv",encoding="latin-1",index_col=0)
df2['iday'].replace(0,1,inplace=True)
df2['imonth'].replace(0,1,inplace=True)
df2['Datetime'] = pd.to_datetime(dict(year=df2.iyear, month=df2.imonth, day=df2.iday))
print(df2.tail());
df=df2[['Datetime','country','longitude','latitude','suicide','attacktype1','targtype1','nkill','nwound','weaptype1','success']]
df.set_index('Datetime', inplace=True)
df.fillna(value=-99999, inplace=True) #cleaning data!!!
XX = np.array(df.drop(['success'], 1)) #
XX = preprocessing.scale(XX) #dividing dataset into features and label
#df.dropna(inplace=True) #dropping those rows which contain nulls
yy = np.array(df['success'])
yy=yy.reshape(-1,1)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(XX, yy,random_state=2) #training
n_nodes_hl1 = 100
n_nodes_hl2 = 100
n_nodes_hl3 = 100
n_input=X_train.shape[1]
print(X_train.shape)
print(yy.shape)
n_classes=y_train.shape[1]
batch_size = 100
x = tf.placeholder('float', [None, n_input])
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([n_input, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes])),}
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3,output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
# OLD VERSION:
#cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
# NEW:
cost = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=prediction, labels=y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
# OLD:
#sess.run(tf.initialize_all_variables())
# NEW:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
total_batch = int(len(X_train) / batch_size)
print(total_batch)
x_batches = np.array_split(X_train, total_batch)
y_batches = np.array_split(y_train, total_batch)
epoch_loss = 0
for i in range(total_batch):
epoch_x, epoch_y =x_batches[i], y_batches[i]
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y:
epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out
of',hm_epochs,'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:X_test, y:y_test}))
train_neural_network(x)

Neural network for regression not learning in tensorflow

I've modified tensor flow example to fit on my data, given here: data
But my neural network is not learning at all, I tried to use different no. of hidden layers, learning rate and optimization functions, but it didn't help.My code is given below:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import learn
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn import datasets, linear_model
from sklearn import cross_validation
from sklearn import preprocessing
import numpy as np
filename_queue = tf.train.string_input_producer(["file0.csv"])
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
# Default values, in case of empty columns. Also specifies the type of the
# decoded result.
record_defaults = [[0], [0], [0], [0]]
col1, col2, col3, col4 = tf.decode_csv(
value, record_defaults=record_defaults)
features = tf.stack([col1, col2, col3])
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
x = np.zeros(shape=(1813,3))
y = np.zeros(shape=(1813))
for i in range(1813):
# Retrieve a single instance:
x1, y1 = sess.run([features, col4])
x[i] = x1
y[i] = y1
coord.request_stop()
coord.join(threads)
#standard_scaler = preprocessing.StandardScaler()
#x = standard_scaler.fit_transform(x)
# Split in test and train data
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(x, y, test_size=0.2)
total_len = X_train.shape[0]
# Parameters
learning_rate = 0.001
training_epochs = 500
batch_size = 5
display_step = 1
# Network Parameters
n_hidden_1 = 50
n_input = X_train.shape[1]
n_classes = 1
# tf Graph input
x = tf.placeholder("float", [None, 3])
y = tf.placeholder("float", [None])
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Output layer with linear activation
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes], 0, 0.1))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_classes], 0, 0.1))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
#reshape(pred, [-1])
tf.shape(pred)
tf.shape(y)
print("Prediction matrix:", pred)
print("Output matrix:", y)
# Define loss and optimizer
cost = tf.reduce_mean(tf.square(pred-y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(total_len/batch_size)
print(total_batch)
# Loop over all batches
for i in range(total_batch-1):
batch_x = X_train[i*batch_size:(i+1)*batch_size]
batch_y = Y_train[i*batch_size:(i+1)*batch_size]
# Run optimization op (backprop) and cost op (to get loss value)
_, c, p = sess.run([optimizer, cost, pred], feed_dict={x: batch_x,
y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# sample prediction
label_value = batch_y
estimate = p
err = label_value-estimate
print ("num batch:", total_batch)
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print ("[*]----------------------------")
for i in xrange(5):
print ("label value:", label_value[i], \
"estimated value:", estimate[i])
print ("[*]============================")
print ("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred), tf.argmax(y))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Accuracy:", accuracy.eval({x: X_test, y: Y_test}))
and result looks like that: (label value = expected result)
result

Categories

Resources