Neural Network - ValueError: Cannot feed value of shape - python

I'm new in Python and Tensorflow . For the beginning I watched the MNIST tutorial and understood it so far.
But now I have to create a new Neural Network with numerical input_datas.
I got a dataset which delivers an input_data and v_data.
If I run input_data.shape -> (1000,25,4)
If I run v_data.shape -> (1000,2)
What I tried to do is to split the data for (Training + Validation) and Testing.
Training + Validation = 90% of train_data (90% of the input.pkl)
Testing data = the remaining 10%
And then I devided the 90% of the input_data in training and validation (70% training, 30% validation)
The network should correctly predict based on v_data, but I still get an error. See the code and the error below.
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Imports
import tensorflow as tf
import pickle as pkl
import numpy as np
# load data
with open('input.pkl', 'rb') as f:
input_data = pkl.load(f)
f.close()
X_train, y_train = input_data
#split data into train / validation and test
X_input = X_train[0:900]
y_input = y_train[0:900]
#print (X_input.shape)
#print (y_input.shape)
X_train_data = X_input[0:630]
X_test_data = X_input[630:900]
y_train_data = y_input[0:630]
y_test_data = y_input[630:900]
# Variables
hidden_layer_1_nodes = 300
hidden_layer_2_nodes = 100
output_layer_nodes = 100
epochs = 10
classes = 2
epoch_errors = []
stddev = 0.035
learning_rate = 0.08
batch_size = 100
#print (X_train_data[0])
# TF Placeholders
X = tf.placeholder('float', [25, 4], name='X')
y = tf.placeholder('float', name='y')
# Weights Matrices
W1 = tf.Variable(tf.truncated_normal([4, hidden_layer_1_nodes], stddev=stddev), name='W1')
W2 = tf.Variable(tf.truncated_normal([hidden_layer_1_nodes, hidden_layer_2_nodes], stddev=stddev), name='W2')
W3 = tf.Variable(tf.truncated_normal([hidden_layer_2_nodes, output_layer_nodes], stddev=stddev), name='W3')
W4 = tf.Variable(tf.truncated_normal([output_layer_nodes, classes], stddev=stddev), name='W4')
# Biases Vectors
b1 = tf.Variable(tf.truncated_normal([hidden_layer_1_nodes], stddev=stddev), name='b1')
b2 = tf.Variable(tf.truncated_normal([hidden_layer_2_nodes], stddev=stddev), name='b2')
b3 = tf.Variable(tf.truncated_normal([output_layer_nodes], stddev=stddev), name='b3')
b4 = tf.Variable(tf.truncated_normal([classes], stddev=stddev), name='b4')
# Define the Neural Network
def nn_model(X):
input_layer = {'weights': W1, 'biases': b1}
hidden_layer_1 = {'weights': W2, 'biases': b2}
hidden_layer_2 = {'weights': W3, 'biases': b3}
output_layer = {'weights': W4, 'biases': b4}
input_layer_sum = tf.add(tf.matmul(X, input_layer['weights']), input_layer['biases'])
input_layer_sum = tf.nn.relu(input_layer_sum)
hidden_layer_1_sum = tf.add(tf.matmul(input_layer_sum, hidden_layer_1['weights']), hidden_layer_1['biases'])
hidden_layer_1_sum = tf.nn.relu(hidden_layer_1_sum)
hidden_layer_2_sum = tf.add(tf.matmul(hidden_layer_1_sum, hidden_layer_2['weights']), hidden_layer_2['biases'])
hidden_layer_2_sum = tf.nn.relu(hidden_layer_2_sum)
output_layer_sum = tf.add(tf.matmul(hidden_layer_2_sum, output_layer['weights']), output_layer['biases'])
return output_layer_sum
# Train the Neural Network
def nn_train(X):
pred = nn_model(X)
pred = tf.identity(pred)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(cost)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
#saver = tf.train.Saver()
sess.run(init_op)
for epoch in range(epochs):
epoch_loss = 0.0
i = 0
while i < len(X_train_data):
start = i
end = i+batch_size
batch_x = np.array(X_train_data[start:end])
batch_y = np.array(y_train_data[start:end])
_, c = sess.run([optimizer, cost], feed_dict={X: batch_x, y: batch_y})
epoch_loss += c
i+= batch_size
epoch_errors.append(epoch_loss)
print('Epoch ', epoch + 1, ' of ', epochs, ' with loss: ', epoch_loss)
correct_result = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_result, 'float'))
print('Acc: ', accuracy.eval({X:X_test_data, y:y_test_data}))
if __name__ == "__main__":
nn_train(X)
The following error
ValueError: Cannot feed value of shape (100, 25, 4) for Tensor 'X:0', which has shape '(25, 4)'
occurs in line 105
_, c = sess.run([optimizer, cost], feed_dict={X: batch_x, y: batch_y})

The placeholder you have defined for the input X has the shape (25,4)
tf.placeholder('float', [25, 4], name='X')
But the input you are providing is of the shape (100, 25, 4) where 100 is your batch size.
Change the definition to
tf.placeholder('float', [None, 25, 4], name='X')
and the error should be gone. Here, None takes care of batch size, automatically.
Update: Sorry, I didn't go through the whole code. You code needs a few fixes.
The correct syntax for feeding data to a placeholder is:
X = tf.placeholder(tf.float32, [None, input_dim], name='X')
Now, if you are dealing with the images, your input_dim will be the length of the flattened array for one example i.e. If your image has dimension 25x4, the input_dim should be 25*4=100. It should be equal to the first dimension of your weight layer 1 here W1.
Also, before feeding your batch you will need to reshape it.
Below is the fixed code(changes are commented):
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Imports
import tensorflow as tf
import pickle as pkl
import numpy as np
# load data
with open('input.pkl', 'rb') as f:
input_data = pkl.load(f)
f.close()
X_train, y_train = input_data
#split data into train / validation and test
X_input = X_train[0:900]
y_input = y_train[0:900]
#print (X_input.shape)
#print (y_input.shape)
X_train_data = X_input[0:630]
X_test_data = X_input[630:900]
y_train_data = y_input[0:630]
y_test_data = y_input[630:900]
# Variables
hidden_layer_1_nodes = 300
hidden_layer_2_nodes = 100
output_layer_nodes = 100
epochs = 10
classes = 2
epoch_errors = []
stddev = 0.035
learning_rate = 0.08
batch_size = 100
#print (X_train_data[0])
# TF Placeholders
# input data should be of the shape (batch_size, flatten data for one example). Also, the correct shape of y"
X = tf.placeholder(tf.float32, [None, 25 * 4], name='X')
y = tf.placeholder(tf.float32, [None, classes] name='y')
# Weights Matrices. First dimension of W1 == second dimension of X
W1 = tf.Variable(tf.truncated_normal([25 * 4, hidden_layer_1_nodes], stddev=stddev), name='W1')
W2 = tf.Variable(tf.truncated_normal([hidden_layer_1_nodes, hidden_layer_2_nodes], stddev=stddev), name='W2')
W3 = tf.Variable(tf.truncated_normal([hidden_layer_2_nodes, output_layer_nodes], stddev=stddev), name='W3')
W4 = tf.Variable(tf.truncated_normal([output_layer_nodes, classes], stddev=stddev), name='W4')
# Biases Vectors
b1 = tf.Variable(tf.truncated_normal([hidden_layer_1_nodes], stddev=stddev), name='b1')
b2 = tf.Variable(tf.truncated_normal([hidden_layer_2_nodes], stddev=stddev), name='b2')
b3 = tf.Variable(tf.truncated_normal([output_layer_nodes], stddev=stddev), name='b3')
b4 = tf.Variable(tf.truncated_normal([classes], stddev=stddev), name='b4')
# Define the Neural Network
def nn_model(X):
input_layer = {'weights': W1, 'biases': b1}
hidden_layer_1 = {'weights': W2, 'biases': b2}
hidden_layer_2 = {'weights': W3, 'biases': b3}
output_layer = {'weights': W4, 'biases': b4}
input_layer_sum = tf.add(tf.matmul(X, input_layer['weights']), input_layer['biases'])
input_layer_sum = tf.nn.relu(input_layer_sum)
hidden_layer_1_sum = tf.add(tf.matmul(input_layer_sum, hidden_layer_1['weights']), hidden_layer_1['biases'])
hidden_layer_1_sum = tf.nn.relu(hidden_layer_1_sum)
hidden_layer_2_sum = tf.add(tf.matmul(hidden_layer_1_sum, hidden_layer_2['weights']), hidden_layer_2['biases'])
hidden_layer_2_sum = tf.nn.relu(hidden_layer_2_sum)
output_layer_sum = tf.add(tf.matmul(hidden_layer_2_sum, output_layer['weights']), output_layer['biases'])
return output_layer_sum
# Train the Neural Network
def nn_train(X):
pred = nn_model(X)
pred = tf.identity(pred)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(cost)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
#saver = tf.train.Saver()
sess.run(init_op)
for epoch in range(epochs):
epoch_loss = 0.0
i = 0
while i < len(X_train_data):
start = i
end = i+batch_size
# reshape before feeding.
batch_x = np.array(X_train_data[start:end]).reshape(batch_size, 25 * 4)
batch_y = np.array(y_train_data[start:end]).reshape(batch_size, classes)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_x, y: batch_y})
epoch_loss += c
i+= batch_size
epoch_errors.append(epoch_loss)
print('Epoch ', epoch + 1, ' of ', epochs, ' with loss: ', epoch_loss)
correct_result = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_result, 'float'))
print('Acc: ', accuracy.eval({X:X_test_data.reshape(-1, 25 * 4), y:y_test_data.reshape(-1, classes)}))
if __name__ == "__main__":
nn_train(X)

UPDATE: Sorry I posted the wrong error:
#Kumar, I changed the batch size to 30 (so 21*30 = 630). It prints now the epochs, but in some weird way:
Epoch 1 of 10 with loss: 1680690.2648780346
Epoch 2 of 10 with loss: 2382142.9208984375
Epoch 3 of 10 with loss: 4215628.857421875
Epoch 4 of 10 with loss: 9046892.295166016
Epoch 5 of 10 with loss: 23961644.453125
Epoch 6 of 10 with loss: 31733882.34375
Epoch 7 of 10 with loss: 46124696.609375
Epoch 8 of 10 with loss: 61760446.28125
Epoch 9 of 10 with loss: 89145610.59375
Epoch 10 of 10 with loss: 121249417.25
And I received a next error for:
print('Acc: ', accuracy.eval({X:X_test_data, y:y_test_data}))
ValueError: Cannot feed value of shape (270, 25, 4) for Tensor 'X:0', which has shape '(?, 100)'
Process finished with exit code 1

Related

tf.nn.in_top_k(logits,y,1) out of range error but equal actually

I'm doing my first Neural Network with a binary classification, but I got an error when I try to evaluate the model with:
correct = tf.nn.in_top_k(logits,y,1)
where
logits tensor is : predictions : shape [batch_size = 52, num_classes = 1], type float32
y tensor is : targets: shape [batch_size=52], type int32
I got this error :
targets[1] is out of range
[[{{node in_top_k/InTopKV2}}]]
After some debugging time , I understood that the values of my tensor y must be <= to num_classes, so the first value of the tensor y equal to 1 is considered as out of range, even tough the parameter num_classes = 1.
How can I allow my tensor values to be equal to num_classes and only strictly inferior ? Or is there another way ?
In my opinion, num_classes should equal 1 because it's a binary classification so 1 neuron output is needed.
EDIT
Here's my full code :
import tensorflow as tf
n_inputs = 28
n_hidden1 = 15
n_hidden2 = 5
n_outputs = 1
reset_graph()
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y") #None => any
def neuron_layer(X, n_neurons, name, activation=None):
with tf.name_scope(name):
n_inputs = int(X.shape[1])
stddev = 2 / np.sqrt(n_inputs)
init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev) #matrice n_inputs x n_neurons values proche de 0
W = tf.Variable(init,name="kernel") #weights random
b = tf.Variable(tf.zeros([n_neurons]), name="bias")
Z = tf.matmul(X, W) + b
tf.cast(Z,tf.int32)
if activation is not None:
return activation(Z)
else:
return Z
def to_one_hot(y):
n_classes = y.max() + 1
m = len(y)
Y_one_hot = np.zeros((m, n_classes))
Y_one_hot[np.arange(m), y] = 1
return Y_one_hot
hidden1 = neuron_layer(X, n_hidden1, name="hidden1",
activation=tf.nn.relu)
hidden2 = neuron_layer(hidden1, n_hidden2, name="hidden2",
activation=tf.nn.relu)
logits = neuron_layer(hidden2, n_outputs, name="outputs")
xentropy = tf.keras.backend.binary_crossentropy(tf.to_float(y),logits)
loss = tf.reduce_mean(xentropy)
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits,y,1)
labels_max = tf.reduce_max(y)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 40
batch_size = 50
def shuffle_batch(X, y, batch_size): #Homogeneisation et decoupage en paquets(n_batches)
rnd_idx = np.random.permutation(len(X))
n_batches = len(X) // batch_size
for batch_idx in np.array_split(rnd_idx, n_batches):
X_batch, y_batch = X[batch_idx], y[batch_idx]
yield X_batch, y_batch
with tf.Session() as sess:
init.run()
X_temp,Y_temp = X_batch,y_batch
feed_dict={X: X_batch, y: y_batch}
print("feed",feed_dict)
print("\n y_batch :",y_batch,y_batch.dtype)
print("\n X_batch :",X_batch,X_batch.dtype,X_batch.shape)
for epoch in range(n_epochs):
for X_batch, y_batch in shuffle_batch(X_train, Y_train, batch_size):
y_batch=y_batch.astype(np.int32)
X_batch=X_batch.astype(np.float32)
sess.run(training_op,feed_dict={X: X_batch, y: y_batch})
#acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
#acc_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
#print(epoch, "Batch accuracy:", acc_batch, "Val accuracy:", acc_val)
save_path = saver.save(sess, "./my_model_final.ckpt")
#some tests
print("y eval :",y.eval(feed_dict={X:X_temp,y:Y_temp}).shape)
y_one_hot=to_one_hot(y.eval(feed_dict={X:X_temp,y:Y_temp}))
print("y_one_hot :",y_one_hot.shape)
print("logits eval : ",logits.eval(feed_dict={X:X_temp,y:Y_temp}))
#print(correct.eval(feed_dict={X:X_temp,y:Y_temp}))
print(labels_max.eval(feed_dict={X:X_temp,y:Y_temp}))
As per the documentation here, tf.nn.in_top_k(predictions, targets, k) has arguments:
predictions: A Tensor of type float32. A batch_size x classes tensor.
targets: A Tensor. Must be one of the following types: int32, int64. A batch_size vector of class ids.
k: An int. Number of top elements to look at for computing precision.
As you are performing binary classification, i.e., has two classes, so the shape of logits tensor in your case should be (52, 2) while the shape of y should be (52,). Here, logits is basically one-hot encoded tensor. This is the reason why your are getting above error.
Consider the below example:
Example 1:
res = tf.nn.in_top_k([[0,1], [1,0], [0,1], [1, 0], [0, 1]], [0, 1, 1, 1, 1], 1)
Here, shape of logits is (5, 2) while y is (5,). If you will do tf.reduce_max(y), you will get 1, which is less than number of classes and hence okay.
This will work fine and output [False False True False True]
Example 2:
res = tf.nn.in_top_k([[0,1], [1,0], [0,1], [1, 0], [0, 1]], [0, 2, 1, 1, 1], 1)
If you will do tf.reduce_max(y), you will get 2, which is equal to the number of classes.
This will raises an error: InvalidArgumentError: targets[1] is out of range
EDIT: In your above code, make following modifications:
change n_outputs = 1 to n_outputs = 2
change sess.run(training_op,feed_dict={X: X_batch, y: y_batch}) to _, cost, acc = sess.run([training_op, loss, accuracy], feed_dict={X: X_batch, y: to_one_hot(y_batch)})
change correct = tf.nn.in_top_k(logits, y, 1) to correct = tf.nn.in_top_k(logits, tf.argmax(y, 1), 1)
Code(random data used):
n_inputs = 28
n_hidden1 = 15
n_hidden2 = 5
n_outputs = 2
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None, 2), name="y") #None => any
def neuron_layer(X, n_neurons, name, activation=None):
with tf.name_scope(name):
n_inputs = int(X.shape[1])
stddev = 2 / np.sqrt(n_inputs)
init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev) #matrice n_inputs x n_neurons values proche de 0
W = tf.Variable(init,name="kernel") #weights random
b = tf.Variable(tf.zeros([n_neurons]), name="bias")
Z = tf.matmul(X, W) + b
tf.cast(Z,tf.int32)
if activation is not None:
return activation(Z)
else:
return Z
def to_one_hot(y):
n_classes = y.max() + 1
m = len(y)
Y_one_hot = np.zeros((m, n_classes))
Y_one_hot[np.arange(m), y] = 1
return Y_one_hot
hidden1 = neuron_layer(X, n_hidden1, name="hidden1",
activation=tf.nn.relu)
hidden2 = neuron_layer(hidden1, n_hidden2, name="hidden2",
activation=tf.nn.relu)
logits = neuron_layer(hidden2, n_outputs, name="outputs")
xentropy = tf.keras.backend.binary_crossentropy(tf.to_float(y),logits)
loss = tf.reduce_mean(xentropy)
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits,tf.argmax(y, 1),1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 1
X_train = np.random.rand(100, 28)
X_train = X_train.astype(np.float32)
Y_train = np.random.randint(low = 0, high = 2, size = 100, dtype=np.int32)
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
_, cost, corr, acc = sess.run([training_op, loss, correct, accuracy], feed_dict={X: X_train, y: to_one_hot(Y_train)})
print(corr)
print('Loss: {} Accuracy: {}'.format(cost, acc))

My model got a loss value of 0, but it just classifies all the input into the same class, what's wrong?

I trained this model to classify the images in the dataset fashion-mnist. When the weights have not been trained the loss value seems normal, but after the first epoch, the loss reduces to 0, and all the input images are classified into class 0.
If regularization added, the weights update slower, but eventually get the same result, say all the images classified to class 0 with a loss value of 0.
import tensorflow as tf
from tensorflow import keras
import numpy as np
EPOCH = 10
BATCH_SIZE = 30
DATA_SIZE = 60000
REGULARIZER = 0.001
def main():
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
train_labels = train_labels.reshape((60000, 1))
train_images = train_images.reshape((60000, 784))
test_images = test_images.reshape((10000, 784))
judge_labels = test_labels.reshape((10000, 1))
x = tf.placeholder(tf.float32, (None, 784))
y_ = tf.placeholder(tf.float32, (None, 1))
w1 = tf.Variable(np.random.rand(784 * 24).reshape([784, 24]) * 10, dtype=tf.float32)
# tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(REGULARIZER)(w1))
w2 = tf.Variable(np.random.rand(24 * 24).reshape([24, 24]) * 10, dtype=tf.float32)
# tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(REGULARIZER)(w2))
w3 = tf.Variable(np.random.rand(24 * 10).reshape([24, 10]) * 10, dtype=tf.float32)
# tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(REGULARIZER)(w3))
bias1 = tf.constant(1, shape=(24,), dtype=tf.float32)
bias2 = tf.constant(1, shape=(24,), dtype=tf.float32)
y1 = tf.nn.relu(tf.matmul(x, w1) + bias1)
y2 = tf.nn.relu(tf.matmul(y1, w2) + bias2)
y = tf.matmul(y2, w3)
predict = tf.argmax(y, axis=1)
y_spy = tf.nn.softmax(y, axis=1)
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(y_, 1), logits=y)
# loss = tf.reduce_mean(ce) + tf.add_n(tf.get_collection('losses'))
loss = tf.reduce_mean(ce)
train_step = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
print('current out loss: ', end='')
print(sess.run(loss, feed_dict={x: test_images, y_: judge_labels}))
pre = sess.run(predict, feed_dict={x: test_images})
miss = pre - test_labels
print('right number: ', end='')
print((np.sum(miss == 0)))
for epoch in range(EPOCH):
for i in range(DATA_SIZE // BATCH_SIZE):
start = i * BATCH_SIZE
end = (i + 1) * BATCH_SIZE
_ = sess.run([train_step], feed_dict={x: train_images[start:end],
y_: train_labels[start:end]})
print('epochs %d :' % epoch)
print('current in loss: ', end='')
print(sess.run(loss, feed_dict={x: train_images[start:end],
y_: train_labels[start:end]}))
print('current out loss: ', end='')
print(sess.run(loss, feed_dict={x: test_images, y_: judge_labels}))
miss = sess.run(predict, feed_dict={x: test_images}) - test_labels
print('right number: ', end='')
print((np.sum(miss == 0)))
if __name__ == "__main__":
main()
Mistake 1: Loss function should be
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(y_, [-1]), logits=y)
because labels are flat values for this loss function. (change y placeholder to int32 type)
Mistake 2: The weight are initialled to very large values.
GradientDescentOptimizer is very slow optimizer. Use AdamOptimizer instead
Fixed code:
import tensorflow as tf
from tensorflow import keras
import numpy as np
EPOCH = 10
BATCH_SIZE = 64
DATA_SIZE = 60000
REGULARIZER = 0.001
def main():
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
train_labels = train_labels.reshape((60000, 1))
train_images = train_images.reshape((60000, 784))
test_images = test_images.reshape((10000, 784))
judge_labels = test_labels.reshape((10000, 1))
x = tf.placeholder(tf.float32, (None, 784))
y_ = tf.placeholder(tf.int32, (None, 1))
w1 = tf.Variable(np.random.rand(784 * 24).reshape([784, 24]), dtype=tf.float32)
# tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(REGULARIZER)(w1))
w2 = tf.Variable(np.random.rand(24 * 24).reshape([24, 24]), dtype=tf.float32)
# tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(REGULARIZER)(w2))
w3 = tf.Variable(np.random.rand(24 * 10).reshape([24, 10]), dtype=tf.float32)
# tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(REGULARIZER)(w3))
bias1 = tf.constant(1, shape=(24,), dtype=tf.float32)
bias2 = tf.constant(1, shape=(24,), dtype=tf.float32)
y1 = tf.nn.relu(tf.matmul(x, w1) + bias1)
y2 = tf.nn.relu(tf.matmul(y1, w2) + bias2)
y = tf.matmul(y2, w3)
predict = tf.argmax(y, axis=1)
y_spy = tf.nn.softmax(y, axis=1)
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(y_, [-1]), logits=y)
# loss = tf.reduce_mean(ce) + tf.add_n(tf.get_collection('losses'))
loss = tf.reduce_mean(ce)
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
print('current out loss: ', end='')
print(sess.run(loss, feed_dict={x: test_images, y_: judge_labels}))
pre = sess.run(predict, feed_dict={x: test_images})
miss = pre - test_labels
print('right number: ', end='')
print((np.sum(miss == 0)))
for epoch in range(EPOCH):
for i in range(DATA_SIZE // BATCH_SIZE):
start = i * BATCH_SIZE
end = (i + 1) * BATCH_SIZE
_ = sess.run([train_step], feed_dict={x: train_images[start:end],
y_: train_labels[start:end]})
print('epochs %d :' % epoch)
print('current in loss: ', end='')
print(sess.run(loss, feed_dict={x: train_images[start:end],
y_: train_labels[start:end]}))
print('current out loss: ', end='')
print(sess.run(loss, feed_dict={x: test_images, y_: judge_labels}))
miss = sess.run(predict, feed_dict={x: test_images}) - test_labels
print('right number: ', end='')
print((np.sum(miss == 0)))
miss = sess.run(predict, feed_dict={x: test_images})
print (miss[0:10], test_labels[0:10])
if __name__ == "__main__":
main()
Output (selective):
...
Sample predictions: [9 2 4 3 2 4 4 4 7 7], Actual: [9 2 1 1 6 1 4 6 5 7]
...
Sample predictions: [9 2 1 1 6 1 4 6 1 7], Actual: [9 2 1 1 6 1 4 6 5 7]
...
Sample predictions: [7 2 1 1 6 1 4 6 1 7], Actual: [9 2 1 1 6 1 4 6 5 7]
...
Sample predictions: [9 2 1 1 6 1 4 6 1 7], Actual: [9 2 1 1 6 1 4 6 5 7]
...
Code with train, validation loss and train, validation accuracy and shuffling train data for each epoch
import tensorflow as tf
from tensorflow import keras
import numpy as np
from sklearn.metrics import classification_report, accuracy_score
EPOCH = 30
BATCH_SIZE = 64
DATA_SIZE = 60000
REGULARIZER = 0.001
def main():
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
train_labels = train_labels.reshape((60000, 1))
train_images = train_images.reshape((60000, 784))
test_images = test_images.reshape((10000, 784))
judge_labels = test_labels.reshape((10000, 1))
x = tf.placeholder(tf.float32, (None, 784))
y_ = tf.placeholder(tf.int32, (None, 1))
w1 = tf.Variable(np.random.rand(784 * 24).reshape([784, 24]), dtype=tf.float32)
tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(REGULARIZER)(w1))
w2 = tf.Variable(np.random.rand(24 * 24).reshape([24, 24]), dtype=tf.float32)
tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(REGULARIZER)(w2))
w3 = tf.Variable(np.random.rand(24 * 10).reshape([24, 10]), dtype=tf.float32)
tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(REGULARIZER)(w3))
bias1 = tf.constant(1, shape=(24,), dtype=tf.float32)
bias2 = tf.constant(1, shape=(24,), dtype=tf.float32)
y1 = tf.nn.relu(tf.matmul(x, w1) + bias1)
y2 = tf.nn.relu(tf.matmul(y1, w2) + bias2)
y = tf.matmul(y2, w3)
predict = tf.argmax(y, axis=1)
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(y_, [-1]), logits=y)
loss = tf.reduce_mean(ce)
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
idx = np.arange(DATA_SIZE)
for epoch in range(EPOCH):
train_loss = list()
train_predict = list()
np.random.shuffle(idx)
train_images = train_images[idx]
train_labels = train_labels[idx]
for i in range(DATA_SIZE // BATCH_SIZE):
start = i * BATCH_SIZE
end = (i + 1) * BATCH_SIZE
_, loss_, p_ = sess.run([train_step, loss, predict], feed_dict={x: train_images[start:end],
y_: train_labels[start:end]})
train_loss.append(loss_)
train_predict.extend(p_)
test_loss, test_predict = sess.run([loss, predict], feed_dict={x: test_images,
y_: judge_labels})
print ("Epoch: {}, Train Loss: {:.3f}, Test Loss: {:.3f},"\
"Train Acc: {:.3f}, Test Acc: {:.3f}".format(
epoch+1, np.mean(train_loss), test_loss,
accuracy_score(train_labels[0:len(train_predict)], train_predict),
accuracy_score(judge_labels, test_predict)))
if __name__ == "__main__":
main()
Output:
....
Epoch: 27, Train Loss: 0.842, Test Loss: 1.015,Train Acc: 0.816, Test Acc: 0.798
Epoch: 28, Train Loss: 0.832, Test Loss: 0.880,Train Acc: 0.816, Test Acc: 0.806
Epoch: 29, Train Loss: 0.788, Test Loss: 0.886,Train Acc: 0.820, Test Acc: 0.805
Epoch: 30, Train Loss: 0.704, Test Loss: 0.742,Train Acc: 0.826, Test Acc: 0.815

tensorflow - relating the accuracy back to the graph

This is from a tutorial, but there's a large piece of it that I can't see the logic in - and I'd rather not just accept it.
It's a tensorflow example to train and test against mnist images of digits.
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot = True)
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([784, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes])),}
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3,output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )
optimizer = tf.train.AdamOptimizer().minimize(cost)
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
train_neural_network(x)
My question is specifically about these three lines:
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
How does 'y' get its data - unless it merely holds the values for the last batch of the last epoch?
How do these 3 lines tie together and connect back to the graph created?
If the variable 'correct' is already an assessment of the output prediction against 'y', why is the apparent result of that comparison used to test against test data?

Placeholder shape doesn't match

I'm trying to implement a 1d convolution neural network in Tensorflow. this is the code which used for creating placeholders, convolution layer and max pooling layer:
import tensorflow as tf
import math
try:
from tqdm import tqdm
except ImportError:
def tqdm(x, *args, **kwargs):
return x
sess = tf.InteractiveSession()
# These will be inputs
## Input pixels, image with one channel (gray)
length=458
x = tf.placeholder("float", [None, length])
# Note that -1 is for reshaping
x_im = tf.reshape(x, [-1,length,1])
## Known labels
# None works during variable creation to be
# unspecified size
y_ = tf.placeholder("float", [None,2])
# Conv layer 1
num_filters1 = 2
winx1 = 3
W1 = tf.Variable(tf.truncated_normal(
[winx1, 1 , num_filters1],
stddev=1./math.sqrt(winx1)))
b1 = tf.Variable(tf.constant(0.1,
shape=[num_filters1]))
# 5 convolution, pad with zeros on edges
xw = tf.nn.conv1d(x_im, W1,
stride=5,
padding='SAME')
h1 = tf.nn.relu(xw + b1)
# 2 Max pooling, no padding on edges
p1 = tf.layers.max_pooling1d(h1, pool_size=2,
strides=1, padding='VALID')
# Conv layer 2
num_filters2 = 2
winx2 = 3
W2 = tf.Variable(tf.truncated_normal(
[winx2, num_filters1, num_filters2],
stddev=1./math.sqrt(winx2)))
b2 = tf.Variable(tf.constant(0.1,
shape=[num_filters2]))
# 3 convolution, pad with zeros on edges
p1w2 = tf.nn.conv1d(p1, W2,
stride=3, padding='SAME')
h1 = tf.nn.relu(p1w2 + b2)
# 2 Max pooling, no padding on edges
p2 = tf.layers.max_pooling1d(h1, pool_size=2,
strides=1, padding='VALID')
# Need to flatten convolutional output
p2_size = np.product(
[s.value for s in p2.get_shape()[1:]])
p2f = tf.reshape(p2, [-1, p2_size ])
# Dense layer
num_hidden = 2
W3 = tf.Variable(tf.truncated_normal(
[p2_size, num_hidden],
stddev=2./math.sqrt(p2_size)))
b3 = tf.Variable(tf.constant(0.2,
shape=[num_hidden]))
h3 = tf.nn.relu(tf.matmul(p2f,W3) + b3)
# Drop out training
keep_prob = tf.placeholder("float")
h3_drop = tf.nn.dropout(h3, keep_prob)
# Output Layer
W4 = tf.Variable(tf.truncated_normal(
[num_hidden, 2],
stddev=1./math.sqrt(num_hidden)))
b4 = tf.Variable(tf.constant(0.1,shape=[2]))
# Just initialize
sess.run(tf.global_variables_initializer())
# Define model
y = tf.nn.softmax(tf.matmul(h3_drop,W4) + b4)
### End model specification, begin training code
After constructing the model, it's time to define loss function as follows:
# Climb on cross-entropy
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(
logits=y + 1e-50, labels=y_))
# How we train
train_step = tf.train.GradientDescentOptimizer(
0.01).minimize(cross_entropy)
# Define accuracy
correct_prediction = tf.equal(tf.argmax(y,1),
tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(
correct_prediction, "float"))
But when I try to train the model using following code:
# Actually train
epochs = 10
train_acc = np.zeros(epochs//10)
test_acc = np.zeros(epochs//10)
for i in tqdm(range(epochs), ascii=True):
# Record summary data, and the accuracy
if i % 10 == 0:
# Check accuracy on train set
A = accuracy.eval(feed_dict={x: train,
y_: onehot_train, keep_prob: 1.0})
train_acc[i//10] = A
# And now the validation set
A = accuracy.eval(feed_dict={x: test,
y_: onehot_test, keep_prob: 1.0})
test_acc[i//10] = A
train_step.run(feed_dict={x: train,\
y_: onehot_train, keep_prob: 0.5})
It returns an error:
ValueError: Cannot feed value of shape (7487, 458) for Tensor
'Placeholder_8:0', which has shape '(?, 1, 458)'
I have 7478 (1D) signal which have a 458 length. Can somebody help me?!
you just need to reshape your input
train= np.reshape(train,[-1,length,1])
test= np.reshape(test,[-1,length,1])
And you're good to go!

AttributeError: module 'tensorflow.contrib.rnn' has no attribute 'BasicLSTMCell'

I got the value error when I ran the recurrent neural network tensorflow code for a simple classification task on mnist dataset. I could not figure out what is wrong with the code. Seems to me everything is right. Help wanted! thank you!
My tensorflow version is 0.12.1
Here is the message I got:
AttributeError: module 'tensorflow.contrib.rnn' has no attribute 'BasicLSTMCell'
Below is the tensorflow code:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# set random seed for comparing the two result calculations
tf.set_random_seed(1)
# this is data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# hyperparameters
lr = 0.001
training_iters = 100000
batch_size = 128
n_inputs = 28 # MNIST data input (img shape: 28*28)
n_steps = 28 # time steps
n_hidden_units = 128 # neurons in hidden layer
n_classes = 10 # MNIST classes (0-9 digits)
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_classes])
# Define weights
weights = {
# (28, 128)
'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])),
# (128, 10)
'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes]))
}
biases = {
# (128, )
'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])),
# (10, )
'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ]))
}
def RNN(X, weights, biases):
# hidden layer for input to cell
########################################
# transpose the inputs shape from
# X ==> (128 batch * 28 steps, 28 inputs)
X = tf.reshape(X, [-1, n_inputs])
# into hidden
# X_in = (128 batch * 28 steps, 128 hidden)
X_in = tf.matmul(X, weights['in']) + biases['in']
# X_in ==> (128 batch, 28 steps, 128 hidden)
X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units])
# cell
##########################################
# basic LSTM Cell.
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units, forget_bias=1.0, state_is_tuple=True)
else:
lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units)
# lstm cell is divided into two parts (c_state, h_state)
init_state = lstm_cell.zero_state(batch_size, dtype=tf.float32)
outputs, final_state = tf.nn.dynamic_rnn(lstm_cell, X_in, initial_state=init_state, time_major=False)
# unpack to list [(batch, outputs)..] * steps
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
outputs = tf.unpack(tf.transpose(outputs, [1, 0, 2])) # states is the last outputs
else:
outputs = tf.unstack(tf.transpose(outputs, [1,0,2]))
results = tf.matmul(outputs[-1], weights['out']) + biases['out'] # shape = (128, 10)
return results
pred = RNN(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
train_op = tf.train.AdamOptimizer(lr).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
with tf.Session() as sess:
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess.run(init)
step = 0
while step * batch_size < training_iters:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
batch_xs = batch_xs.reshape([batch_size, n_steps, n_inputs])
sess.run([train_op], feed_dict={
x: batch_xs,
y: batch_ys,
})
if step % 20 == 0:
print(sess.run(accuracy, feed_dict={
x: batch_xs,
y: batch_ys,
}))
step += 1
Try to replace
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units, forget_bias=1.0, state_is_tuple=True)
else:
lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units)
with just
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units, forget_bias=1.0, state_is_tuple=True)
It seems that this is the correct line in both versions.

Categories

Resources