Tensor' object is not iterable - python

I have the following message 'Tensor' object is not iterable." when trying to run my code, can someone tell me where is my mistake?
All the problem started because I need to normalize the the data that I am feeding. Therefore the problems comes from this part of my code:
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(total_len / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x = tf.train.batch(X_train_norm, batch_size)
batch_y = tf.train.batch(Y_train, batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c, p = sess.run([optimizer, cost, pred], feed_dict={x: batch_x, y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# sample prediction
label_value = batch_y
estimate = p
err = label_value - estimate
print("num batch:", total_batch)
Here is my whole code:
import tensorflow as tf
import numpy as np
X_train = np.genfromtxt('data/train500X.csv', delimiter=',', dtype=float)
Y_train = np.genfromtxt('/data/train500Y.csv', delimiter=',', dtype=float)
X_test = np.genfromtxt('data/test100X.csv', delimiter=',', dtype=float,usecols=(14, 7, 33, 13, 32, 60, 16, 50, 18, 61, 17, 34, 26, 59, 85, 53))
Y_test = np.genfromtxt('data/test100Y.csv', delimiter=',', dtype=float)
X_train_norm = tf.nn.l2_normalize(X_train, 0, epsilon=1e-12)
# Parameters
learning_rate = 0.001
training_epochs = 5000
batch_size = 50
display_step = 1
# Network Parameters
n_hidden_1 = 100 # 1st layer number of features
n_hidden_2 = 200 # 2nd layer number of features
n_hidden_3 = 200 # 3rd layer number of features
n_hidden_4 = 256 # 4th layer number of features
n_out = 1
n_input = X_train.shape[1]
total_len = X_train.shape[0]
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None])
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with RELU activation
layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
layer_4 = tf.nn.relu(layer_4)
# Output layer with linear activation
out_layer = tf.matmul(layer_4, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 0.1)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], 0, 0.1)),
'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3], 0, 0.1)),
'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_hidden_4, n_out], 0, 0.1))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 0.1)),
'b2': tf.Variable(tf.random_normal([n_hidden_2], 0, 0.1)),
'b3': tf.Variable(tf.random_normal([n_hidden_3], 0, 0.1)),
'b4': tf.Variable(tf.random_normal([n_hidden_4], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_out], 0, 0.1))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
# cost = tf.reduce_mean(tf.square(pred - y))
cost = tf.reduce_mean(tf.square(tf.transpose(pred) - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(total_len / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x = tf.train.batch(X_train_norm, batch_size)
batch_y = tf.train.batch(Y_train, batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c, p = sess.run([optimizer, cost, pred], feed_dict={x: batch_x, y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# sample prediction
label_value = batch_y
estimate = p
err = label_value - estimate
print("num batch:", total_batch)
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", \
"{:.9f}".format(avg_cost))
print("[*]----------------------------")
for i in range(3):
print("label value:", label_value[i], \
"estimated value:", estimate[i])
print("[*]============================")
print("Optimization Finished!")
Thank you in advance for your help.

I found out that if I put the result in a session run will give me what I needed
batch_x_norm = tf.Session().run(tf.nn.l2_normalize(batch_x, 0, epsilon=1e-12))

Related

Generate Histogram of TensorFlow Predictions

I wish to log the predictions every N epochs\iterations and generate a histogram for each class. My question is how do I log the predictions into an array, including the label in order to generate the histograms?
How do I make sure it happens of every N epochs\iterations?
I have edited the post to add the code so you will be able to see what I am talking about. The last 2 code chunks should somehow be used for what I requested.
Thanks in advance!
import tensorflow as tf
import numpy as np
import math
from random import random
from array import array
from ROOT import TFile, TTree, TH1D, TH2D, TBranch, vector
NUM_EXAMPLES = 1.6e4
TRAIN_SPLIT = .8
MINI_BATCH_SIZE = 1000
#NUM_EPOCHS = 3500
F_PATH = "/home/cauchy/Documents/Machine_Learning"
F_TEST = []
F_TEST += ["d3pd-ckt12rmd2030pp-G_ww_qqqq_%d%d00.root" % (1,2)]
F_TEST += ["d3pd-ckt12rmd2030pp-pyj%d.root" % (4)]
F_TEST += ["d3pd-ckt12rmd2030pp-pyj%d.root" % (5)]
F_TEST += ["d3pd-ckt12rmd2030pp-pyj%d.root" % (6)]
F_TEST += ["d3pd-ckt12rmd2030pp-pyj%d.root" % (7)]
#CALIBRATION_TARGET = "pt" # you can use pt,m,eta
INPUTS = ['m', 'grootau21', 'ysfilt', 'ungrngtrk'] # Removed pt
PT_MIN = 450 #for file 1200
PT_MAX = 730 #for file 1200
F_OUTPUT = "G1200_signaltobackground_from_pt_mass_ysfilt_grootau21_ungrngtrk.root"
N_INPUTS = len(INPUTS)
#============== inputs / target ====================================
jet_features = []
target = []
#=================== branches for training and validation ===========
pt = []
m = []
grootau21 =[]
ysfilt = []
ungrngtrk = []
#weight = []
#================ Prepare the dataset ========================
# I need to change the data to include the multiplication by the weight (constant)
for fi in F_TEST: #Should it include background AND signal files? Yes.
current_e = 0
f = TFile(F_PATH + '/' + fi, 'read')
t = TTree()
f.GetObject("dibjet", t) # Changed from "Tree" to "dibjet"
for entry in t:
current_e += 1
if current_e > NUM_EXAMPLES: # NUM_EXAMPLES should change for the different files
break
if (t.jet1_pt > PT_MAX or t.jet1_pt < PT_MIN):
continue
tmp = []
if 'pt' in INPUTS: tmp += [t.jet1_pt / MAX_PT] #for file 1200
if 'm' in INPUTS: tmp += [t.jet1_m / 500] #for file 1200
if 'grootau21' in INPUTS: tmp += [t.jet1_grootau21]
if 'ysfilt' in INPUTS: tmp += [t.jet1_ysfilt]
if 'ungrngtrk' in INPUTS: tmp += [t.jet1_ungrngtrk / 110] #for file 1200
# We need only look at the class {background, signal} of the entry in terms of target
jet_features += [tmp]
# One-hot encoder
if fi == 'd3pd-ckt12rmd2030pp-G_ww_qqqq_1200.root': target += [[1, 0]]
else: target += [[0, 1]]
pt += [t.jet1_pt]
m += [t.jet1_m]
grootau21 += [t.jet1_grootau21]
ysfilt += [t.jet1_ysfilt]
ungrngtrk += [t.jet1_ungrngtrk]
#weight += [t.weight]
######################################
###### prepare inputs for NN #########
trainset = list(zip(jet_features, target)) # remove ref_target?
np.random.shuffle(trainset)
jet_features, target = zip(*trainset) # What does this line do? Rearranges jetmoments\target...
total_sample = len(target)
train_size = int(total_sample*TRAIN_SPLIT)
all_x = np.float32((jet_features)) # Converts the list type? Why double paranthesis?
all_y = np.float32(target)
train_x = all_x[:train_size] # Create training\testing partitions?
test_x = all_x[train_size:]
train_y = all_y[:train_size]
test_y = all_y[train_size:]
# Define important parameters and variable to work with the tensors
learning_rate = 0.3
training_epochs = 500
cost_history = np.empty(shape=[1], dtype=float)
n_dim = N_INPUTS
#print("n_dim", n_dim)
n_class = 2
model_path = "/home/cauchy/Documents/TensorFlow/Cuts_W" # Forgot what this path is used for
# Define the number of hidden layers and number of neurons for each layer
n_hidden_1 = 10
n_hidden_2 = 10
n_hidden_3 = 10
n_hidden_4 = 10
x = tf.placeholder(tf.float32, [None, n_dim])
W = tf.Variable(tf.zeros([n_dim, n_class]))
b = tf.Variable(tf.zeros([n_class]))
y_ = tf.placeholder(tf.float32, [None, n_class]) # Should we use a vector instead with 1 for signal and 0 for background?
# Define the model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with sigmoid activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.sigmoid(layer_1)
# Hidden layer with sigmoid activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.sigmoid(layer_2)
# Hidden layer with sigmoid activation
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3 = tf.nn.sigmoid(layer_3)
# Hidden layer with ReLU activation
layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
layer_4 = tf.nn.relu(layer_4)
# Output layer with linear activation
out_layer = tf.matmul(layer_4, weights['out']) + biases['out']
return out_layer
# Define the weights and the biases for each layer
weights = {
'h1': tf.Variable(tf.truncated_normal([n_dim, n_hidden_1])),
'h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2])),
'h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3])),
'h4': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4])),
'out': tf.Variable(tf.truncated_normal([n_hidden_4, n_class]))
}
biases = {
'b1': tf.Variable(tf.truncated_normal([n_hidden_1])),
'b2': tf.Variable(tf.truncated_normal([n_hidden_2])),
'b3': tf.Variable(tf.truncated_normal([n_hidden_3])),
'b4': tf.Variable(tf.truncated_normal([n_hidden_4])),
'out': tf.Variable(tf.truncated_normal([n_class]))
}
# Initialize all the variables
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# Call your model defined
y = multilayer_perceptron(x, weights, biases)
# Define the cost function and optimizer
cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
training_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)
sess = tf.Session
sess.run(init)
# Calculate the cost and the accuracy for each epoch
mse_history = [] # mean squared error
accuracy_history = []
for epoch in range(training_epochs):
sess.run(training_step, feed_dict={x: train_x, y_: train_y})
cost = sess.run(cost_function, feed_dict={x: train_x, y_: train_y})
cost_history = np.append(cost_history, cost)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# print("Accuracy: ", (sess.run(accuracy, feed_dict={x:test_x, y_:test_y})))
pred_y = sess.run(y, feed_dict={x: test_x})
mse = tf.reduce_mean(tf.square(pred_y - test_y))
mse_ = sess.run(mse)
mse_history.append(mse_)
accuracy = (sess.run(accuracy, feed_dict={x: train_x, y_: train_y}))
accuracy_history.append(accuracy)
print('epoch: ', epoch, ' - ','cost: ', cost, " - MSE: ", mse_, "- Train Accuracy: ", accuracy)
save_path = saver.save(sess, model_path)
print("Model saved in file: %s" % save_path)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Test Accuracy: ", (sess.run(accuracy, feed_dict={x: test_x, y_: test_y})))
# Print the final mean square error
pred_y = sess.run(y, feed_dict={x: test_x})
mse = tf.reduce_mean(tf.square(pred_y - test_y))
print("MSE: $.4f" % sess.run(mse))
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)

How to solve: "ValueError: Validation size should be between 0 and 0. Received: 5000."?

I am trying to make a character recognition classifier for bangla alphabets. The images are size of 50x50. There are in total of 50 classes. Using the below CNN model to train but I am encountering this error: "ValueError: Validation size should be between 0 and 0. Received: 5000."
How do I resolve this?
MODEL
# Python 3.6.0
# tensorflow 1.1.0
import os
import os.path as path
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
from tensorflow.python.tools import optimize_for_inference_lib
from tensorflow.examples.tutorials.mnist import input_data
MODEL_NAME = 'mnist_convnet'
NUM_STEPS = 3000
BATCH_SIZE = 16
def model_input(input_node_name, keep_prob_node_name):
x = tf.placeholder(tf.float32, shape=[None, 50*50], name=input_node_name)
keep_prob = tf.placeholder(tf.float32, name=keep_prob_node_name)
y_ = tf.placeholder(tf.float32, shape=[None, 50])
return x, keep_prob, y_
def build_model(x, keep_prob, y_, output_node_name):
x_image = tf.reshape(x, [-1, 50, 50, 1])
# 50*50*1
conv1 = tf.layers.conv2d(x_image, 64, 3, 1, 'same', activation=tf.nn.relu)
# 50*50*64
pool1 = tf.layers.max_pooling2d(conv1, 2, 2, 'same')
# 14*14*64
conv2 = tf.layers.conv2d(pool1, 128, 3, 1, 'same', activation=tf.nn.relu)
# 14*14*128
pool2 = tf.layers.max_pooling2d(conv2, 2, 2, 'same')
# 7*7*128
conv3 = tf.layers.conv2d(pool2, 256, 3, 1, 'same', activation=tf.nn.relu)
# 7*7*256
pool3 = tf.layers.max_pooling2d(conv3, 2, 2, 'same')
# 4*4*256
flatten = tf.reshape(pool3, [-1, 4*4*256])
fc = tf.layers.dense(flatten, 1024, activation=tf.nn.relu)
dropout = tf.nn.dropout(fc, keep_prob)
logits = tf.layers.dense(dropout, 50)
outputs = tf.nn.softmax(logits, name=output_node_name)
# loss
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=logits))
# train step
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
# accuracy
correct_prediction = tf.equal(tf.argmax(outputs, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy", accuracy)
merged_summary_op = tf.summary.merge_all()
return train_step, loss, accuracy, merged_summary_op
def train(x, keep_prob, y_, train_step, loss, accuracy,
merged_summary_op, saver):
print("training start...")
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
tf.train.write_graph(sess.graph_def, 'out',
MODEL_NAME + '.pbtxt', True)
# op to write logs to Tensorboard
summary_writer = tf.summary.FileWriter('logs/',
graph=tf.get_default_graph())
for step in range(NUM_STEPS):
batch = mnist.train.next_batch(BATCH_SIZE)
if step % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %f' % (step, train_accuracy))
_, summary = sess.run([train_step, merged_summary_op],
feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
summary_writer.add_summary(summary, step)
saver.save(sess, 'out/' + MODEL_NAME + '.chkp')
test_accuracy = accuracy.eval(feed_dict={x: mnist.test.images,
y_: mnist.test.labels,
keep_prob: 1.0})
print('test accuracy %g' % test_accuracy)
print("training finished!")
def export_model(input_node_names, output_node_name):
freeze_graph.freeze_graph('out/' + MODEL_NAME + '.pbtxt', None, False,
'out/' + MODEL_NAME + '.chkp', output_node_name, "save/restore_all",
"save/Const:0", 'out/frozen_' + MODEL_NAME + '.pb', True, "")
input_graph_def = tf.GraphDef()
with tf.gfile.Open('out/frozen_' + MODEL_NAME + '.pb', "rb") as f:
input_graph_def.ParseFromString(f.read())
output_graph_def = optimize_for_inference_lib.optimize_for_inference(
input_graph_def, input_node_names, [output_node_name],
tf.float32.as_datatype_enum)
with tf.gfile.FastGFile('out/opt_' + MODEL_NAME + '.pb', "wb") as f:
f.write(output_graph_def.SerializeToString())
print("graph saved!")
def main():
if not path.exists('out'):
os.mkdir('out')
input_node_name = 'input'
keep_prob_node_name = 'keep_prob'
output_node_name = 'output'
x, keep_prob, y_ = model_input(input_node_name, keep_prob_node_name)
train_step, loss, accuracy, merged_summary_op = build_model(x, keep_prob, y_, output_node_name)
saver = tf.train.Saver()
train(x, keep_prob, y_, train_step, loss, accuracy, merged_summary_op, saver)
export_model([input_node_name, keep_prob_node_name], output_node_name)
if __name__ == '__main__':
main()
ERROR
ValueError Traceback (most recent call last)
<ipython-input-2-2015e0ea466d> in <module>()
136
137 if __name__ == '__main__':
--> 138 main()
<ipython-input-2-2015e0ea466d> in main()
131 saver = tf.train.Saver()
132
--> 133 train(x, keep_prob, y_, train_step, loss, accuracy, merged_summary_op, saver)
134
135 export_model([input_node_name, keep_prob_node_name], output_node_name)
<ipython-input-2-2015e0ea466d> in train(x, keep_prob, y_, train_step, loss, accuracy, merged_summary_op, saver)
67 print("training start...")
68
---> 69 mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
70
71 init_op = tf.global_variables_initializer()
/anaconda3/envs/nlpTFnltk/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py in read_data_sets(train_dir, fake_data, one_hot, dtype, reshape, validation_size)
247 raise ValueError(
248 'Validation size should be between 0 and {}. Received: {}.'
--> 249 .format(len(train_images), validation_size))
250
251 validation_images = train_images[:validation_size]
ValueError: Validation size should be between 0 and 0. Received: 5000.
You're using the MNIST tutorial code, which is calling read_data_sets from here; note that validation_size of 5000 comes from that function's default parameters. It's expecting to get data from the following files:
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
Normally it would try to download those files if it doesn't find them, but the fact that you're getting a validation_size of 0 suggests it isn't doing so. This wouldn't help you anyway, since you don't want to use the MNIST data.
Even if you rename your train and test files to match the above filenames, your code won't work because the MNIST code is also calling extract_labels, which has a default parameter num_classes=10 while you want this to be 50. Your best bet is probably to get rid of the MNIST import completely and read about how to set up an input pipeline; it's not difficult compared to the stuff you've done already.

tensorflow multi-gpu mnist example, loss does not decrease

I'm trying to write my own mnist example which could use all the two gpu of one machine.
It is a simple multi-layer perceptron.
Here is my code. You can run it directly.
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
import tensorflow as tf
learning_rate = 0.001
training_steps = 100000
batch_size = 100
display_step = 100
n_hidden_1 = 256
n_hidden_2 = 256
n_input = 784
n_classes = 10
def _variable_on_cpu(name, shape, initializer):
with tf.device('/cpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def build_model():
def multilayer_perceptron(x, weights, biases):
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
with tf.variable_scope('aaa'):
weights = {
'h1': _variable_on_cpu('h1',[n_input, n_hidden_1],tf.constant_initializer(0.0)),
'h2': _variable_on_cpu('h2',[n_hidden_1, n_hidden_2],tf.constant_initializer(0.0)),
'out': _variable_on_cpu('out_w',[n_hidden_2, n_classes],tf.constant_initializer(0.0))
}
biases = {
'b1': _variable_on_cpu('b1',[n_hidden_1],tf.constant_initializer(0.0)),
'b2': _variable_on_cpu('b2',[n_hidden_2],tf.constant_initializer(0.0)),
'out': _variable_on_cpu('out_b',[n_classes],tf.constant_initializer(0.0))
}
pred = multilayer_perceptron(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
return cost
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g,_ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
with tf.Graph().as_default(), tf.device('/cpu:0'):
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
tower_grads = []
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(2):
with tf.device('/gpu:%d' % i):
cost = build_model()
tf.get_variable_scope().reuse_variables()
grads = optimizer.compute_gradients(cost)
tower_grads.append(grads)
grads = average_gradients(tower_grads)
apply_gradient_op = optimizer.apply_gradients(grads)
train_op = apply_gradient_op
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for step in range(training_steps):
image_batch, label_batch = mnist.train.next_batch(batch_size)
_, cost_print = sess.run([train_op, cost],
{x:image_batch,
y:label_batch})
if step % display_step == 0:
print("step=%04d" % (step+1)+ " cost=" + str(cost_print))
print("Optimization Finished!")
sess.close()
The print info looks like:
step=0001 cost=2.30258
step=0101 cost=2.30246
step=0201 cost=2.30128
step=0301 cost=2.30376
step=0401 cost=2.29817
step=0501 cost=2.2992
step=0601 cost=2.3104
step=0701 cost=2.29995
step=0801 cost=2.29802
step=0901 cost=2.30524
step=1001 cost=2.29673
step=1101 cost=2.30016
step=1201 cost=2.31057
step=1301 cost=2.29815
step=1401 cost=2.29669
step=1501 cost=2.30345
step=1601 cost=2.29811
step=1701 cost=2.30867
step=1801 cost=2.30757
step=1901 cost=2.29716
step=2001 cost=2.30394
The loss doesn't decrease. I don't know how to fix it.
By the way, GPU-Util is about 26% and 26%. How to increase the GPU-Util?
The problem is that,
I should use tf.constant_initializer(0.1) for the weights instead of tf.constant_initializer(0)

Why does Tensorflow network predict different results each run?

I have a tensorflow network that is currently predicting a value 1-5 from an input of 14 numbers. Right now it is set up with the input test_X = np.array([1,1,1,1,1,1,1,1,1,1,1,1,1,1]) and dispite seeding it, the network returns different classifications every time. I am also unsure of how to predict the confidence of the prediction (I'm assuming this should be a float value in between 0 and 1). My code can be found below:
import tensorflow as tf
import pandas as pd
import numpy as np
df = pd.read_csv('/Users/zach/desktop/export.csv')
data_ = df.drop(['ID','Species'], axis=1)
n_classes = data_["Phylum"].nunique()
dim = 14
learning_rate = 0.0001
display_step = 10
n_hidden_1 = 2000
n_hidden_2 = 1500
n_hidden_3 = 1000
n_hidden_4 = 500
X = tf.placeholder(tf.float32, [None, dim])
train_set = data_.sample(frac=0.7)
test_set = data_.loc[~data_.index.isin(train_set.index)]
train_size = train_set.size
inputY_test = pd.get_dummies(test_set['Phylum'])
inputY_train = pd.get_dummies(train_set['Phylum'])
train_X = train_set.iloc[:train_size, 5:].as_matrix()
train_X = pd.DataFrame(data=train_X)
train_X = train_X.fillna(value=0).as_matrix()
train_Y = inputY_train.as_matrix()
train_Y = pd.DataFrame(data=train_Y)
train_Y = train_Y.fillna(value=0).as_matrix()
#test_X = test_set.iloc[:, 5:].as_matrix()
#test_X = pd.DataFrame(data=test_X)
#test_X = test_X.fillna(value=0).as_matrix()
test_X = np.array([1,1,1,1,1,1,1,1,1,1,1,1,1,1])
test_X = pd.DataFrame(data=test_X)
test_X = test_X.fillna(value=0).as_matrix()
test_X.resize(1,14)
test_Y = inputY_test.as_matrix()
test_Y = pd.DataFrame(data=test_Y)
test_Y = test_Y.fillna(value=0).as_matrix()
n_samples = train_Y.size
total_len = train_X.shape[0]
n_input = train_X.shape[1]
batch_size = 5
W = tf.Variable(tf.zeros([dim, n_classes]))
b = tf.Variable(tf.zeros([n_classes]))
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3 = tf.nn.relu(layer_3)
# Hidden layer with RELU activation
layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
layer_4 = tf.nn.relu(layer_4)
# Output layer with linear activation
out_layer = tf.matmul(layer_4, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 0.1)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], 0, 0.1)),
'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3], 0, 0.1)),
'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_hidden_4, n_classes], 0, 0.1))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 0.1)),
'b2': tf.Variable(tf.random_normal([n_hidden_2], 0, 0.1)),
'b3': tf.Variable(tf.random_normal([n_hidden_3], 0, 0.1)),
'b4': tf.Variable(tf.random_normal([n_hidden_4], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_classes], 0, 0.1))
}
# Construct model
pred = multilayer_perceptron(X, weights, biases)
pred1 = tf.argmax(pred,1)
y = tf.placeholder(tf.float32, [None, n_classes])
#cost = -tf.reduce_sum(y*tf.log(tf.clip_by_value(pred,1e-10,1.0)))
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
hm_epochs = 20
tf.set_random_seed(1234)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for epoch in range(hm_epochs):
avg_cost = 0
total_batch = int(total_len/batch_size)
for i in range(total_batch-1):
batch_x = train_X[i*batch_size:(i+1)*batch_size]
batch_y = train_Y[i*batch_size:(i+1)*batch_size]
_, c, p = sess.run([optimizer, cost, pred], feed_dict={X: batch_x,
y: batch_y})
avg_cost += c / total_batch
label_value = batch_y
estimate = p
err = label_value-estimate
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost)
print "[*]----------------------------------------------------"
for i in xrange(3):
print "label value:", label_value[i], \
"estimated value:", estimate[i]
print "======================================================="
print "Optimization Finished!"
#Test model
#correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
#Calculate accuracy
#accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#print ("Accuracy:", accuracy.eval({X: test_X, y: test_Y}))
feed_dict = {X: test_X}
classification = pred.eval(feed_dict)
print "Network Prediction:", classification
print "Classification: ", np.argmax(classification) + 1
The network returns something like this (with the "Network Prediction" and "classification" values differing significantly with each run)
Epoch: 0001 cost= 18.784451194
[*]----------------------------------------------------
label value: [1 0 0 0 0] estimated value: [ 7.22390413 14.61596966 -0.08044712 41.44412231 -2.90992975]
label value: [1 0 0 0 0] estimated value: [-18.6686306 51.33540726 65.4961853 67.72460938 -3.51442194]
label value: [0 0 0 1 0] estimated value: [-32.95540619 22.96526909 -18.13385201 70.66561127 -28.7767086 ]
=======================================================
Epoch: 0011 cost= 0.798838628
[*]----------------------------------------------------
label value: [1 0 0 0 0] estimated value: [ 11.22842598 -3.67433786 -14.22806835 1.45541549 -7.12817001]
label value: [1 0 0 0 0] estimated value: [ 24.90147591 16.51822853 23.89280701 24.66276169 21.37914276]
label value: [0 0 0 1 0] estimated value: [ 0.13968639 7.37386036 -21.62825203 21.48152542 -23.18427277]
=======================================================
Optimization Finished!
Network Prediction: [[ 3.85959864 0.32653514 0.26948914 -0.70163095 -2.03481865]]
Classification: 1
How can I stabilized the predictions of the network so it predicts the same outcome each time when the input remains constant? Also, how would I display the confidence of this prediction?

Tensorflow The same training accuracy continues

I'm stuck on CNN model on Tensorflow.
My code as below.
Libraries
# -*- coding: utf-8 -*-
import tensorflow as tf
import time
import json
import numpy as np
import matplotlib.pyplot as plt
import random
import multiprocessing as mp
import glob
import os
Model
def inference(images_placeholder, keep_prob):
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# convolution
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# X2 pooling
def max_pool_2x128(x):
return tf.nn.max_pool(x, ksize=[1, 2, 1, 1],strides=[1, 2, 1, 1], padding='VALID')
# X4 pooling
def max_pool_4x128(x):
return tf.nn.max_pool(x, ksize=[1, 4, 1, 1],strides=[1, 4, 1, 1], padding='VALID')
x_image = tf.reshape(images_placeholder, [-1,599,1,128])
#1st conv
with tf.name_scope('conv1') as scope:
W_conv1 = weight_variable([4, 1, 128, 256])
b_conv1 = bias_variable([256])
print "image変形後のshape"
print tf.Tensor.get_shape(x_image)
print "conv1の形"
print tf.Tensor.get_shape(conv2d(x_image, W_conv1))
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
#1st pooling X4
with tf.name_scope('pool1') as scope:
h_pool1 = max_pool_4x128(h_conv1)
print "h_pool1の形"
print tf.Tensor.get_shape(h_pool1)
#2nd conv
with tf.name_scope('conv2') as scope:
W_conv2 = weight_variable([4, 1, 256, 256])
b_conv2 = bias_variable([256])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
#2nd pooling X2
with tf.name_scope('pool2') as scope:
h_pool2 = max_pool_2x128(h_conv2)
print "h_pool2の形"
print tf.Tensor.get_shape(h_pool2)
#3rd conv
with tf.name_scope('conv3') as scope:
W_conv3 = weight_variable([4, 1, 256, 512])
b_conv3 = bias_variable([512])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
#3rd pooling X2
with tf.name_scope('pool3') as scope:
h_pool3 = max_pool_2x128(h_conv3)
print "h_pool3の形"
print tf.Tensor.get_shape(h_pool3)
#flatten + 1st fully connected
with tf.name_scope('fc1') as scope:
W_fc1 = weight_variable([37 * 1 * 512, 2048])
b_fc1 = bias_variable([2048])
h_pool3_flat = tf.reshape(h_pool3, [-1, 37 * 1 * 512])
h_fc1 = tf.nn.relu(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)
#ドロップ層の設定
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#2nd fully connected
with tf.name_scope('fc2') as scope:
W_fc2 = weight_variable([2048, NUM_CLASSES])
b_fc2 = bias_variable([NUM_CLASSES])
#softmax output
with tf.name_scope('softmax') as scope:
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
return y_conv
Loss
def loss(logits, labels):
# cross entropy
cross_entropy = -tf.reduce_sum(labels*tf.log(tf.clip_by_value(logits,1e-10,1.0)))
# TensorBoard
tf.scalar_summary("cross_entropy", cross_entropy)
return cross_entropy
Training
def training(loss, learning_rate):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
return train_step
Accuracy
def accuracy(logits, labels):
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
tf.scalar_summary("accuracy", accuracy)
return accuracy
Main
if __name__ == '__main__':
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('train_dir', '/tmp/data', 'Directory to put the training data.')
flags.DEFINE_integer('max_steps', , 'Number of steps to run trainer.')
flags.DEFINE_integer('batch_size', 10, 'Batch size'
'Must divide evenly into the dataset sizes.')
flags.DEFINE_float('learning_rate', 1e-4, 'Initial learning rate.')
#num output
NUM_CLASSES = 5
#num frame
IMAGE_SIZE = 599
#tensor shape
IMAGE_PIXELS = IMAGE_SIZE*1*128
##################
#modify the data #
##################
#number of training data
train_num = 70
#loading data limit
data_limit = 100
flatten_data = []
flatten_label = []
# データの整形
filenames = glob.glob(os.path.join('/Users/kosukefukui/Qosmo/WASABEAT/song_features/*.json'))
filenames = filenames[0:data_limit]
print "----loading data---"
for file_path in filenames:
data = json.load(open(file_path))
data = np.array(data)
for_flat = np.array(data)
assert for_flat.flatten().shape == (IMAGE_PIXELS,)
flatten_data.append(for_flat.flatten().tolist())
# ラベルの整形
f2 = open("id_information.txt")
print "---loading labels----"
for line in f2:
line = line.rstrip()
l = line.split(",")
tmp = np.zeros(NUM_CLASSES)
tmp[int(l[4])] = 1
flatten_label.append(tmp)
flatten_label = flatten_label[0:data_limit]
print "データ数 %s" % len(flatten_data)
print "ラベルデータ数 %s" % len(flatten_label)
#train data
train_image = np.asarray(flatten_data[0:train_num], dtype=np.float32)
train_label = np.asarray(flatten_label[0:train_num],dtype=np.float32)
print "訓練データ数 %s" % len(train_image)
#test data
test_image = np.asarray(flatten_data[train_num:data_limit], dtype=np.float32)
test_label = np.asarray(flatten_label[train_num:data_limit],dtype=np.float32)
print "テストデータ数 %s" % len(test_image)
print "599×128 = "
print len(train_image[0])
f2.close()
if 1==1:
# Image Tensor
images_placeholder = tf.placeholder("float", shape=(None, IMAGE_PIXELS))
# Label Tensor
labels_placeholder = tf.placeholder("float", shape=(None, NUM_CLASSES))
# dropout Tensor
keep_prob = tf.placeholder("float")
# construct model
logits = inference(images_placeholder, keep_prob)
# calculate loss
loss_value = loss(logits, labels_placeholder)
# training
train_op = training(loss_value, FLAGS.learning_rate)
# accuracy
acc = accuracy(logits, labels_placeholder)
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.initialize_all_variables())
# for TensorBoard
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph_def)
# Training
for step in range(FLAGS.max_steps):
for i in range(len(train_image)/FLAGS.batch_size):
# train for batch_size
batch = FLAGS.batch_size*i
sess.run(train_op, feed_dict={
images_placeholder: train_image[batch:batch+FLAGS.batch_size],
labels_placeholder: train_label[batch:batch+FLAGS.batch_size],
keep_prob: 0.5})
# calculate accuracy at each step
train_accuracy = sess.run(acc, feed_dict={
images_placeholder: train_image,
labels_placeholder: train_label,
keep_prob: 1.0})
print "step %d, training accuracy %g"%(step, train_accuracy)
# add value for Tensorboard at each step
summary_str = sess.run(summary_op, feed_dict={
images_placeholder: train_image,
labels_placeholder: train_label,
keep_prob:1.0})
summary_writer.add_summary(summary_str, step)
# show accuracy for test data
print "test accuracy %g"%sess.run(acc, feed_dict={
images_placeholder: test_image,
labels_placeholder: test_label,
keep_prob: 1.0})
# save the last model
save_path = saver.save(sess, "model.ckpt")
However, I got the same training accuracy. How to fix this problem?
step 0, training accuracy 0.142857
step 1, training accuracy 0.142857
step 2, training accuracy 0.142857
step 3, training accuracy 0.142857
step 4, training accuracy 0.142857
step 5, training accuracy 0.142857
step 6, training accuracy 0.142857
step 7, training accuracy 0.142857
step 8, training accuracy 0.142857
step 9, training accuracy 0.142857
test accuracy 0.133333
I referred the following model and my tensorboard is as below.
Could it be that you are not minimizing the right tensor?
You are minimizing cross_entropy, but should be cross_entropy_mean (accuracy in your code).
Basically with the following logic:
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits, ground_truth_placeholder)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(
cross_entropy_mean)

Categories

Resources