tensorflow - relating the accuracy back to the graph - python

This is from a tutorial, but there's a large piece of it that I can't see the logic in - and I'd rather not just accept it.
It's a tensorflow example to train and test against mnist images of digits.
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot = True)
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([784, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes])),}
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3,output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
train_neural_network(x)
My question is specifically about these three lines:
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
How does 'y' get its data - unless it merely holds the values for the last batch of the last epoch?
How do these 3 lines tie together and connect back to the graph created?
If the variable 'correct' is already an assessment of the output prediction against 'y', why is the apparent result of that comparison used to test against test data?

Related

Tensorflow Loss is always 0.0

i've done the Tutorial from sentdex. But when I excecute the programm, loss is always 0.0.
Epoch 0 completed out of 10 loss: 0.0
Epoch 1 completed out of 10 loss: 0.0
Epoch 2 completed out of 10 loss: 0.0
Epoch 3 completed out of 10 loss: 0.0
Epoch 4 completed out of 10 loss: 0.0
Epoch 5 completed out of 10 loss: 0.0
Epoch 6 completed out of 10 loss: 0.0
Epoch 7 completed out of 10 loss: 0.0
Epoch 8 completed out of 10 loss: 0.0
Epoch 9 completed out of 10 loss: 0.0
Accuracy: 0.0
I am not able to find any solutions.
import numpy as np
import tensorflow as tf
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer = {'weights': tf.Variable(tf.random_normal([784, n_nodes_hl1])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases': tf.Variable(tf.random_normal([n_classes])), }
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3, output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
# OLD VERSION:
# cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
# NEW:
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
# OLD:
# sess.run(tf.initialize_all_variables())
# NEW:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples / batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
train_neural_network(x)
This is the full code. To be sure I wrote everything right I copied the Code from the Website.
I get no errors, but the loss value does not increase or even changes.
Can you please try to help me?
Elias
The loss isn't zero. Even in the code you pasted where you are appending (epoch_loss += c) it prints the accumulated loss for me.
Slighlty modified version of your code is this. It plots the loss
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
old_v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer = {'weights': tf.Variable(tf.random_normal([784, n_nodes_hl1])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases': tf.Variable(tf.random_normal([n_classes])), }
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3, output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
# OLD VERSION:
# cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
# NEW:
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
# OLD:
# sess.run(tf.initialize_all_variables())
# NEW:
sess.run(tf.global_variables_initializer())
epoch_loss = []
for epoch in range(hm_epochs):
for _ in range(int(mnist.train.num_examples / batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss.append(c)
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', c)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
plt.subplot(1, 2, 1)
plt.plot(epoch_loss)
plt.title('Epoch Loss')
plt.show()
train_neural_network(x)
Your algorithm seems to work : Here is a screenshot :
(i have just copy paste your code)
My configuration :
tensorflow 1.8.0

Neural Network - ValueError: Cannot feed value of shape

I'm new in Python and Tensorflow . For the beginning I watched the MNIST tutorial and understood it so far.
But now I have to create a new Neural Network with numerical input_datas.
I got a dataset which delivers an input_data and v_data.
If I run input_data.shape -> (1000,25,4)
If I run v_data.shape -> (1000,2)
What I tried to do is to split the data for (Training + Validation) and Testing.
Training + Validation = 90% of train_data (90% of the input.pkl)
Testing data = the remaining 10%
And then I devided the 90% of the input_data in training and validation (70% training, 30% validation)
The network should correctly predict based on v_data, but I still get an error. See the code and the error below.
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Imports
import tensorflow as tf
import pickle as pkl
import numpy as np
# load data
with open('input.pkl', 'rb') as f:
input_data = pkl.load(f)
f.close()
X_train, y_train = input_data
#split data into train / validation and test
X_input = X_train[0:900]
y_input = y_train[0:900]
#print (X_input.shape)
#print (y_input.shape)
X_train_data = X_input[0:630]
X_test_data = X_input[630:900]
y_train_data = y_input[0:630]
y_test_data = y_input[630:900]
# Variables
hidden_layer_1_nodes = 300
hidden_layer_2_nodes = 100
output_layer_nodes = 100
epochs = 10
classes = 2
epoch_errors = []
stddev = 0.035
learning_rate = 0.08
batch_size = 100
#print (X_train_data[0])
# TF Placeholders
X = tf.placeholder('float', [25, 4], name='X')
y = tf.placeholder('float', name='y')
# Weights Matrices
W1 = tf.Variable(tf.truncated_normal([4, hidden_layer_1_nodes], stddev=stddev), name='W1')
W2 = tf.Variable(tf.truncated_normal([hidden_layer_1_nodes, hidden_layer_2_nodes], stddev=stddev), name='W2')
W3 = tf.Variable(tf.truncated_normal([hidden_layer_2_nodes, output_layer_nodes], stddev=stddev), name='W3')
W4 = tf.Variable(tf.truncated_normal([output_layer_nodes, classes], stddev=stddev), name='W4')
# Biases Vectors
b1 = tf.Variable(tf.truncated_normal([hidden_layer_1_nodes], stddev=stddev), name='b1')
b2 = tf.Variable(tf.truncated_normal([hidden_layer_2_nodes], stddev=stddev), name='b2')
b3 = tf.Variable(tf.truncated_normal([output_layer_nodes], stddev=stddev), name='b3')
b4 = tf.Variable(tf.truncated_normal([classes], stddev=stddev), name='b4')
# Define the Neural Network
def nn_model(X):
input_layer = {'weights': W1, 'biases': b1}
hidden_layer_1 = {'weights': W2, 'biases': b2}
hidden_layer_2 = {'weights': W3, 'biases': b3}
output_layer = {'weights': W4, 'biases': b4}
input_layer_sum = tf.add(tf.matmul(X, input_layer['weights']), input_layer['biases'])
input_layer_sum = tf.nn.relu(input_layer_sum)
hidden_layer_1_sum = tf.add(tf.matmul(input_layer_sum, hidden_layer_1['weights']), hidden_layer_1['biases'])
hidden_layer_1_sum = tf.nn.relu(hidden_layer_1_sum)
hidden_layer_2_sum = tf.add(tf.matmul(hidden_layer_1_sum, hidden_layer_2['weights']), hidden_layer_2['biases'])
hidden_layer_2_sum = tf.nn.relu(hidden_layer_2_sum)
output_layer_sum = tf.add(tf.matmul(hidden_layer_2_sum, output_layer['weights']), output_layer['biases'])
return output_layer_sum
# Train the Neural Network
def nn_train(X):
pred = nn_model(X)
pred = tf.identity(pred)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(cost)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
#saver = tf.train.Saver()
sess.run(init_op)
for epoch in range(epochs):
epoch_loss = 0.0
i = 0
while i < len(X_train_data):
start = i
end = i+batch_size
batch_x = np.array(X_train_data[start:end])
batch_y = np.array(y_train_data[start:end])
_, c = sess.run([optimizer, cost], feed_dict={X: batch_x, y: batch_y})
epoch_loss += c
i+= batch_size
epoch_errors.append(epoch_loss)
print('Epoch ', epoch + 1, ' of ', epochs, ' with loss: ', epoch_loss)
correct_result = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_result, 'float'))
print('Acc: ', accuracy.eval({X:X_test_data, y:y_test_data}))
if __name__ == "__main__":
nn_train(X)
The following error
ValueError: Cannot feed value of shape (100, 25, 4) for Tensor 'X:0', which has shape '(25, 4)'
occurs in line 105
_, c = sess.run([optimizer, cost], feed_dict={X: batch_x, y: batch_y})
The placeholder you have defined for the input X has the shape (25,4)
tf.placeholder('float', [25, 4], name='X')
But the input you are providing is of the shape (100, 25, 4) where 100 is your batch size.
Change the definition to
tf.placeholder('float', [None, 25, 4], name='X')
and the error should be gone. Here, None takes care of batch size, automatically.
Update: Sorry, I didn't go through the whole code. You code needs a few fixes.
The correct syntax for feeding data to a placeholder is:
X = tf.placeholder(tf.float32, [None, input_dim], name='X')
Now, if you are dealing with the images, your input_dim will be the length of the flattened array for one example i.e. If your image has dimension 25x4, the input_dim should be 25*4=100. It should be equal to the first dimension of your weight layer 1 here W1.
Also, before feeding your batch you will need to reshape it.
Below is the fixed code(changes are commented):
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Imports
import tensorflow as tf
import pickle as pkl
import numpy as np
# load data
with open('input.pkl', 'rb') as f:
input_data = pkl.load(f)
f.close()
X_train, y_train = input_data
#split data into train / validation and test
X_input = X_train[0:900]
y_input = y_train[0:900]
#print (X_input.shape)
#print (y_input.shape)
X_train_data = X_input[0:630]
X_test_data = X_input[630:900]
y_train_data = y_input[0:630]
y_test_data = y_input[630:900]
# Variables
hidden_layer_1_nodes = 300
hidden_layer_2_nodes = 100
output_layer_nodes = 100
epochs = 10
classes = 2
epoch_errors = []
stddev = 0.035
learning_rate = 0.08
batch_size = 100
#print (X_train_data[0])
# TF Placeholders
# input data should be of the shape (batch_size, flatten data for one example). Also, the correct shape of y"
X = tf.placeholder(tf.float32, [None, 25 * 4], name='X')
y = tf.placeholder(tf.float32, [None, classes] name='y')
# Weights Matrices. First dimension of W1 == second dimension of X
W1 = tf.Variable(tf.truncated_normal([25 * 4, hidden_layer_1_nodes], stddev=stddev), name='W1')
W2 = tf.Variable(tf.truncated_normal([hidden_layer_1_nodes, hidden_layer_2_nodes], stddev=stddev), name='W2')
W3 = tf.Variable(tf.truncated_normal([hidden_layer_2_nodes, output_layer_nodes], stddev=stddev), name='W3')
W4 = tf.Variable(tf.truncated_normal([output_layer_nodes, classes], stddev=stddev), name='W4')
# Biases Vectors
b1 = tf.Variable(tf.truncated_normal([hidden_layer_1_nodes], stddev=stddev), name='b1')
b2 = tf.Variable(tf.truncated_normal([hidden_layer_2_nodes], stddev=stddev), name='b2')
b3 = tf.Variable(tf.truncated_normal([output_layer_nodes], stddev=stddev), name='b3')
b4 = tf.Variable(tf.truncated_normal([classes], stddev=stddev), name='b4')
# Define the Neural Network
def nn_model(X):
input_layer = {'weights': W1, 'biases': b1}
hidden_layer_1 = {'weights': W2, 'biases': b2}
hidden_layer_2 = {'weights': W3, 'biases': b3}
output_layer = {'weights': W4, 'biases': b4}
input_layer_sum = tf.add(tf.matmul(X, input_layer['weights']), input_layer['biases'])
input_layer_sum = tf.nn.relu(input_layer_sum)
hidden_layer_1_sum = tf.add(tf.matmul(input_layer_sum, hidden_layer_1['weights']), hidden_layer_1['biases'])
hidden_layer_1_sum = tf.nn.relu(hidden_layer_1_sum)
hidden_layer_2_sum = tf.add(tf.matmul(hidden_layer_1_sum, hidden_layer_2['weights']), hidden_layer_2['biases'])
hidden_layer_2_sum = tf.nn.relu(hidden_layer_2_sum)
output_layer_sum = tf.add(tf.matmul(hidden_layer_2_sum, output_layer['weights']), output_layer['biases'])
return output_layer_sum
# Train the Neural Network
def nn_train(X):
pred = nn_model(X)
pred = tf.identity(pred)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(cost)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
#saver = tf.train.Saver()
sess.run(init_op)
for epoch in range(epochs):
epoch_loss = 0.0
i = 0
while i < len(X_train_data):
start = i
end = i+batch_size
# reshape before feeding.
batch_x = np.array(X_train_data[start:end]).reshape(batch_size, 25 * 4)
batch_y = np.array(y_train_data[start:end]).reshape(batch_size, classes)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_x, y: batch_y})
epoch_loss += c
i+= batch_size
epoch_errors.append(epoch_loss)
print('Epoch ', epoch + 1, ' of ', epochs, ' with loss: ', epoch_loss)
correct_result = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_result, 'float'))
print('Acc: ', accuracy.eval({X:X_test_data.reshape(-1, 25 * 4), y:y_test_data.reshape(-1, classes)}))
if __name__ == "__main__":
nn_train(X)
UPDATE: Sorry I posted the wrong error:
#Kumar, I changed the batch size to 30 (so 21*30 = 630). It prints now the epochs, but in some weird way:
Epoch 1 of 10 with loss: 1680690.2648780346
Epoch 2 of 10 with loss: 2382142.9208984375
Epoch 3 of 10 with loss: 4215628.857421875
Epoch 4 of 10 with loss: 9046892.295166016
Epoch 5 of 10 with loss: 23961644.453125
Epoch 6 of 10 with loss: 31733882.34375
Epoch 7 of 10 with loss: 46124696.609375
Epoch 8 of 10 with loss: 61760446.28125
Epoch 9 of 10 with loss: 89145610.59375
Epoch 10 of 10 with loss: 121249417.25
And I received a next error for:
print('Acc: ', accuracy.eval({X:X_test_data, y:y_test_data}))
ValueError: Cannot feed value of shape (270, 25, 4) for Tensor 'X:0', which has shape '(?, 100)'
Process finished with exit code 1

accuracy function when labels are not one-hot encoded in NN tensorflow

i am doing binary classification in NN tensorflow without encoding the labels.. everything is fine except the accuracy function always return 1.0
Output is
Epoch 0 completed out of 10 loss: 5536.991802096367
Epoch 1 completed out of 10 loss: 1777.5951525866985
Epoch 2 completed out of 10 loss: 1442.1777643710375
Epoch 3 completed out of 10 loss: 1315.4084038436413
.
.
.
Epoch 9 completed out of 10 loss: 968.3492169082165
Accuracy: 1.0
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.dates as mdates
import numpy as np
from matplotlib import style
import csv,math,time
from sklearn import preprocessing, cross_validation
import tensorflow as tf
df2=pd.read_csv("Book.csv",encoding="latin-1",index_col=0)
df2['iday'].replace(0,1,inplace=True)
df2['imonth'].replace(0,1,inplace=True)
df2['Datetime'] = pd.to_datetime(dict(year=df2.iyear, month=df2.imonth, day=df2.iday))
print(df2.tail());
df=df2[['Datetime','country','longitude','latitude','suicide','attacktype1','targtype1','nkill','nwound','weaptype1','success']]
df.set_index('Datetime', inplace=True)
df.fillna(value=-99999, inplace=True) #cleaning data!!!
XX = np.array(df.drop(['success'], 1)) #
XX = preprocessing.scale(XX) #dividing dataset into features and label
#df.dropna(inplace=True) #dropping those rows which contain nulls
yy = np.array(df['success'])
yy=yy.reshape(-1,1)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(XX, yy,random_state=2) #training
n_nodes_hl1 = 100
n_nodes_hl2 = 100
n_nodes_hl3 = 100
n_input=X_train.shape[1]
print(X_train.shape)
print(yy.shape)
n_classes=y_train.shape[1]
batch_size = 100
x = tf.placeholder('float', [None, n_input])
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([n_input, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes])),}
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3,output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
# OLD VERSION:
#cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
# NEW:
cost = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=prediction, labels=y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
# OLD:
#sess.run(tf.initialize_all_variables())
# NEW:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
total_batch = int(len(X_train) / batch_size)
print(total_batch)
x_batches = np.array_split(X_train, total_batch)
y_batches = np.array_split(y_train, total_batch)
epoch_loss = 0
for i in range(total_batch):
epoch_x, epoch_y =x_batches[i], y_batches[i]
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y:
epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out
of',hm_epochs,'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:X_test, y:y_test}))
train_neural_network(x)

tf.nn.softmax_cross_entropy_with_logits() error: logits and labels must be same size

I am new to TensorFlow and am trying to write an algorithm to classify images in the CIFAR-10 dataset. I am getting this error:
InvalidArgumentError (see above for traceback): logits and labels must be same size: logits_size=[10000,10] labels_size=[1,10000]
[[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](Reshape, Reshape_1)]]
Here is my code:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import cPickle
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100
image_size = 32*32*3 # because 3 channels
x = tf.placeholder('float', shape=(None, image_size))
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([image_size, n_nodes_hl1])), 'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])), 'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])), 'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights':tf.Variable(I am new to TensorFlow and tf.random_normal([n_nodes_hl3, n_classes])), 'biases':tf.Variable(tf.random_normal([n_classes]))}
# input_data * weights + biases
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
# activation function
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3, output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))//THIS IS LINE 48 WHERE THE ERROR OCCURS
#learning rate = 0.001
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
for i in range(5):
with open('data_batch_'+str(i+1),'rb') as f:
train_data = cPickle.load(f)
print train_data
print prediction.get_shape()
#print len(y)
_, c = sess.run([optimizer, cost], feed_dict={x:train_data['data'],y:train_data['labels']})
epoch_loss += c
print 'Epoch ' + str(epoch) + ' completed out of ' + str(hm_epochs) + ' loss: ' + str(epoch_loss)
correct = tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
with open('test_batch','rb') as f:
test_data = cPickle.load(f)
accuracy = accuracy.eval({x:test_data['data'],y:test_data['labels']})
print 'Accuracy: ' + str(accuracy)
train_neural_network(x)
I'm pretty sure this means that in line 48 (shown above) prediction and y are not the same shape, but I don't understand TensorFlow well enough to know how to fix it. I don't even really understand where y is being set, I got most of this code from a tutorial and fiddled with it to apply it to a different dataset. How can I fix this error?
The tf.nn.softmax_cross_entropy_with_logits(logits, labels) op expects its logits and labels arguments to be tensors with the same shape. Furthermore, the logits and labels arguments should be 2-D tensors (matrices) with batch_size rows, and num_classes columns.
From the error message and the size of logits, I'm guessing that batch_size is 10000, and num_classes is 10. From the size of labels, I'm guessing that your labels are encoded as a list of integers, where the integer represent the index of the class for the corresponding input example. (I'd have expected this to be a tf.int32 value, rather than tf.float32 as it appears to be in your program, but perhaps there is some automatic conversion going on.)
In TensorFlow, you can use the tf.nn.sparse_softmax_cross_entropy_with_logits() to compute cross-entropy on data in this form. In your program, you could do this by replacing the cost calculation with:
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
prediction, tf.squeeze(y)))
Note that the tf.squeeze() op is needed to convert y into a vector of length batch_size (in order to be a valid argument to tf.nn.sparse_softmax_cross_entropy_with_logits().
Here are some updates to the code to support TensorFlow version 1.0:
def train_neural_network(x):
prediction = neural_network_model(x)
#OLD VERSION:
#cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
#NEW:
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
#OLD:
#sess.run(tf.initialize_all_variables())
#NEW:
sess.run(tf.global_variables_initializer())

MNIST classification in tensorflow, RecursionError: maximum recursion depth exceeded

I ran a Neural Network model for MNIST classification and received error-
RecursionError: maximum recursion depth exceeded
I checked some of the issues on stackoverflow and tried to increase the recursion limit to 1500 but did not work.
How should I increase the limit? An how do I know what limit will cause stack overflow?
I followed the tutorial from here
I have Anaconda 3.5 distribution on my windows 10 machine.
The full code is here-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist= input_data.read_data_sets("/tmp/data/", one_hot=True)
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 =500
n_classes = 10
batch_size = 100
#height x weight
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer= {'weights': tf.Variable(tf.random_normal([784, n_nodes_hl1])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))
}
hidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))
}
hidden_3_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))
}
output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3,n_classes])),
'biases': tf.Variable(tf.random_normal([n_classes]))
}
#our model= (input_data x weights) + biases
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3, output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
prediction = train_neural_network(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction,y))
optimizer= tf.train.AdamOptimizer().minimize(cost) #default learning rate for adamoptimizer= 0.001
hm_epochs = 5
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples / batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print(('Epoch', epoch), ('completed out of', hm_epochs), ('loss:', epoch_loss))
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print(('Accuracy:', accuracy.eval({x: mnist.test.images, y: mnist.test.labels})))
train_neural_network(x)
I don't know what the exact code is supposed to be, but I'm quite sure the following lines are wrong:
def train_neural_network(x):
prediction = train_neural_network(x)
This will cause an infinite recursion, and increasing the recursion limit will not solve the problem.
Looks like you're using code from PythonProgramming.net's deep learning course
In your code, you're doing:
def train_neural_network(x):
prediction = train_neural_network(x)
You're calling the function within the function to run, which is what is never-ending.
Consider your objectives here. Should prediction be the return of train_neural_network, or the return from the neural_network_model?
def train_neural_network(x):
prediction = neural_network_model(x)

Categories

Resources