tensorflow logits and labels must be same size - python

I'm quite new to tensorflow and python, and currently trying to modify the MNIST for expert tutorial for a 240x320x3 image. I have 2 .py script
tfrecord_reeader.py
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
data_path = 'train.tfrecords' # address to save the hdf5 file
def read_data():
with tf.Session() as sess:
feature = {'train/image': tf.FixedLenFeature([], tf.string),
'train/label': tf.FixedLenFeature([], tf.int64)}
# Create a list of filenames and pass it to a queue
filename_queue = tf.train.string_input_producer([data_path], num_epochs=1)
# Define a reader and read the next record
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
# Decode the record read by the reader
features = tf.parse_single_example(serialized_example, features=feature)
# Convert the image data from string back to the numbers
image = tf.decode_raw(features['train/image'], tf.float32)
# Cast label data into int32
label = tf.cast(features['train/label'], tf.int32)
# Reshape image data into the original shape
image = tf.reshape(image, [240, 320, 3])
sess.close()
return image, label
def next_batch(image, label, batchSize):
imageBatch, labelBatch = tf.train.shuffle_batch([image, label], batch_size=batchSize, capacity=30, num_threads=1,
min_after_dequeue=10)
return imageBatch, labelBatch
train.py
import tensorflow as tf
from random import shuffle
import glob
import sys
#import cv2
from tfrecord_reader import read_data, next_batch
import argparse # For passing arguments
import numpy as np
import math
import time
IMAGE_WIDTH = 240
IMAGE_HEIGHT = 320
IMAGE_DEPTH = 3
IMAGE_SIZE = 240*320*3
NUM_CLASSES = 5
BATCH_SIZE = 50
# Creates a weight tensor sized by shape
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# Creates a bias tensor sized by shape
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def main(argv):
# Perform training
x = tf.placeholder(tf.float32, [None, IMAGE_SIZE]) # 240*320=76800
W = tf.Variable(tf.zeros([IMAGE_SIZE, NUM_CLASSES]))
b = tf.Variable(tf.zeros([NUM_CLASSES]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, NUM_CLASSES]) # Desired output
# First convolutional layer
W_conv1 = weight_variable([5, 5, IMAGE_DEPTH, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_DEPTH])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# First fully connected layer
W_fc1 = weight_variable([60 * 80 * 64, 1024])
b_fc1 = bias_variable([1024])
# Flatten the layer
h_pool2_flat = tf.reshape(h_pool2, [-1, 60 * 80 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Drop out layer
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Second fully connected layer
W_fc2 = weight_variable([1024, NUM_CLASSES])
b_fc2 = bias_variable([NUM_CLASSES])
# Output layer
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print(y_conv.shape)
# print(y_conv.get_shape)
# Get the loss
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
# Minimize the loss
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# Read all data from tfrecord file
imageList, labelList = read_data()
imageBatch, labelBatch = next_batch(imageList, labelList, BATCH_SIZE)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
train_images, train_labels = sess.run([imageBatch, labelBatch])
train_images = np.reshape(train_images, (-1, IMAGE_SIZE))
train_labels = np.reshape(train_labels, (-1, NUM_CLASSES))
sess.run(train_step, feed_dict = {x: train_images, y_: train_labels, keep_prob: 1.0})
coord.request_stop()
coord.join(threads)
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
When I run the program, I'm getting
InvalidArgumentError (see above for traceback): logits and labels must be same size: logits_size=[50,5] labels_size=[10,5]
[[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](Reshape_2, Reshape_3)]]
I've done several hours of search on this problem, but could not see why the logits are not matching label size. If I change batchsize to 10, the labels size will become [2,5] as if it's always being divided by 5. Can someone help me out here?

Most likely your labels are single integer values rather than one-hot vectors, so your labelBatch is a vector of size [50] containing single numbers like "1" or "4". Now, when you reshape them using train_labels = np.reshape(train_labels, (-1, NUM_CLASSES))
you're changing the shape to [10, 5].
The tf.nn.softmax_cross_entropy_with_logits function expects labels to be "one-hot" encodings of the labels (this means that a label of 3 translates into a vector of size 5 with a 1 in position 3 and zeros elsewhere). You can achieve this using the tf.nn.one_hot function, but an easier way to do it is instead to use the tf.nn.sparse_softmax_cross_entropy_with_logits function which is designed to work with these single-valued labels. To achieve this, you'll need to change these line:
y_ = tf.placeholder(tf.float32, [None]) # Desired output
cross_entropy = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
And get rid of the train_labels = np.reshape(train_labels, (-1, NUM_CLASSES)) line.
(By the way, you don't actually need to use placeholders when reading data in this way - you can just directly use the output tensors.)

Related

Tensorflow model saving and loading

How can save a tensorflow model with model graph like we do in do keras.
Instead of defining the whole graph again in prediction file, can we save whole model ( weight and graph) and import it later
In Keras:
checkpoint = ModelCheckpoint('RightLane-{epoch:03d}.h5',monitor='val_loss', verbose=0, save_best_only=False, mode='auto')
will give one h5 file that we can use for prediction
model = load_model("RightLane-030.h5")
how to do same in native tensorflow
Method 1: Freeze graph and weights in one file (retraining might not be possible)
This option shows how to save the graph and weights in one file. Its intended use case is for deploying/sharing a model after it has been trained. To this end, we will use the protobuf (pb) format.
Given a tensorflow session (and graph), you can generate a protobuf with
# freeze variables
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def =sess.graph.as_graph_def(),
output_node_names=['myMode/conv/output'])
# write protobuf to disk
with tf.gfile.GFile('graph.pb', "wb") as f:
f.write(output_graph_def.SerializeToString())
where output_node_names expects a list of name strings for the result nodes of the graph (cf. tensorflow documentation).
Then, you can load the protobuf and get the graph with its weight to perform forward passes easily.
with tf.gfile.GFile(path_to_pb, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name='')
return graph
Method 2: Restoring metagraph and checkpoint (easy retraining)
If you want to be able to continue training the model, you might need to restore the full graph, i.e. the weights but also the loss function, some gradient informations (for Adam optimiser for instance), etc.
You need the meta and the checkpoint files generated by tensorflow when you use
saver = tf.train.Saver(...variables...)
saver.save(sess, 'my-model')
This will generate two files, my-model and my-model.meta.
From these two files, you can load the graph with:
new_saver = tf.train.import_meta_graph('my-model.meta')
new_saver.restore(sess, 'my-model')
For more details, you can look at the official documentation.
This is a complete example based on tensorflow github. I copied it from another reply I did elsewhere on SO. There's probably other/better ways to do this somewhere.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import argparse
import sys
import tempfile
​
from tensorflow.examples.tutorials.mnist import input_data
​
import tensorflow as tf
​
FLAGS = None
​
​
def deepnn(x):
"""deepnn builds the graph for a deep net for classifying digits.
​
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
​
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 28, 28, 1])
​
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
​
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
​
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
​
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
​
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
with tf.name_scope('fc1'):
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
​
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
​
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
​
keep_prob = tf.placeholder_with_default(1.0,())
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
​
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
​
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
​
​
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
​
​
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
​
​
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
​
​
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
​
​
# Import data
mnist = input_data.read_data_sets("/tmp")
# Create the model
x = tf.placeholder(tf.float32, [None, 784], name="x")
# Define loss and optimizer
y_ = tf.placeholder(tf.int64, [None])
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x)
with tf.name_scope('loss'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
labels=y_, logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
​
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
​
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), y_)
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
​
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
​
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(1000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
​
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
simg = np.reshape(mnist.test.images[0],(-1,784))
output = sess.run(y_conv,feed_dict={x:simg,keep_prob:1.0})
print(tf.argmax(output,1).eval())
saver = tf.train.Saver()
saver.save(sess,"/tmp/network")
Restore from a new python run:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import argparse
import sys
import tempfile
from tensorflow.examples.tutorials.mnist import input_data
sess = tf.Session()
saver = tf.train.import_meta_graph('/tmp/network.meta')
saver.restore(sess,tf.train.latest_checkpoint('/tmp'))
graph = tf.get_default_graph()
mnist = input_data.read_data_sets("/tmp")
simg = np.reshape(mnist.test.images[0],(-1,784))
op_to_restore = graph.get_tensor_by_name("fc2/MatMul:0")
x = graph.get_tensor_by_name("x:0")
output = sess.run(op_to_restore,feed_dict= {x:simg})
print("Result = ", np.argmax(output))

Tensorflow MNIST classification on a trained model

Here's my modified version of Tensorflow MNIST example:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.learn.python.learn.datasets.mnist import DataSet
import tensorflow as tf
import numpy as np
FLAGS = None
def deepnn(x, numclasses):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 28, 28, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
with tf.name_scope('fc1'):
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = weight_variable([1024, numclasses])
b_fc2 = bias_variable([numclasses])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main(_):
# Import data
images = np.load("../rwclassi/db/images.npy")
labels = np.load("../rwclassi/db/labels.npy")
train = DataSet(images, labels, reshape=True)
numpixels = images.shape[1] * images.shape[2] * images.shape[3]
numclasses = labels.shape[1]
#test = train
#mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, numpixels])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, numclasses])
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x, numclasses)
with tf.name_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
saver = tf.train.Saver()
resume = True
with tf.Session() as sess:
if resume:
saver.restore(sess, "model.ckpt")
print("Model restored.")
else:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = train.next_batch(100)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
if i % 1000 == 0:
saver.save(sess,"model.ckpt")
print ("Model saved")
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--modelfile', type=str,
default='model.ckpt',
help='Model file')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
How do I predict/classify using this trained model? sess.run(???)? argmax?
Figured it out myself.
answer = sess.run(y_conv, feed_dict={x: [train.images[5230]], keep_prob: 1.0})
print (answer)
The line
y_conv, keep_prob = deepnn(x, numclasses)
gets the network structure, where y_conv is the output and keep_prob is a scalar placeholder for the probability of dropout.

Tensorflow MNIST: terminate called after throwing an instance of 'std::bad_alloc'

I am trying to implement a Convolutional Neural Network on Tensorflow, using their default MNIST data set.
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def compute_accuracy(v_xs, v_ys):
global prediction
y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})
return result
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
# stride [1, x_movement, y_movement, 1]
# Must have strides[0] = strides[3] = 1
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
# stride [1, x_movement, y_movement, 1]
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 784]) # 28x28
ys = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs, [-1, 28, 28, 1])
# print(x_image.shape) # [n_samples, 28,28,1]
## conv1 layer ##
W_conv1 = weight_variable([5,5, 1,32]) # patch 5x5, in size 1, out size 32
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # output size 28x28x32
h_pool1 = max_pool_2x2(h_conv1) # output size 14x14x32
## conv2 layer ##
W_conv2 = weight_variable([5,5, 32, 64]) # patch 5x5, in size 32, out size 64
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # output size 14x14x64
h_pool2 = max_pool_2x2(h_conv2) # output size 7x7x64
## fc1 layer ##
W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])
# [n_samples, 7, 7, 64] ->> [n_samples, 7*7*64]
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
## fc2 layer ##
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# the error between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
reduction_indices=[1])) # loss
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(100):
batch_xs, batch_ys = mnist.train.next_batch(10)
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
if i % 10 == 0:
print(compute_accuracy(
mnist.test.images, mnist.test.labels))
On executing, python crashes with this message:
terminate called after throwing an instance of 'std::bad_alloc'
what(): std::bad_alloc
I was able to point out that this happens when I call the compute_accuracy function, or in general, when I load the whole mnist.test iamges and labels.
Any suggestions on what can be done, given I wish to use this data. I have been able to work with images as a whole, in a different case.
I think you're running out of memory. It runs okay on my machine (6GB graphics card). Try decreasing the batch size, or using a smaller fully connected layer.
I had the same issue. I resolved it by reducing the number of test images to compute accuracy, e.g. I replaced
print(compute_accuracy(mnist.test.images, mnist.test.labels))
With something similar to
batch_test = mnist.test.next_batch(5000)
print(compute_accuracy(batch_test[0], batch_test[1])
I hope this helps.

Tensorflow Model Evaluation is based on batch size

I have a graph in TensorFlow which I've trained based on a batch size of 32 observations over hundreds of epochs. I now want to to predict some new data based on the trained graph so I've saved it and reloaded it but I'm forced to always pass in the same amount of observations as my batch size because I've declared a placeholder in my graph which corresponds to the batch size. How can I make my graph accept any amount of observations?
How should I configure this so that I can train on any amount of observations and then run a different amount later?
Below is a excerpt of some of the important parts of the code.
Building the graph:
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(tf.float32, shape=[batch_size, self.image_height, self.image_width, 1], name="data")
y_ = tf.placeholder(tf.float32, shape=[batch_size, num_labels], name="labels")
# Layer 1
W_conv1 = weight_variable([patch_size, patch_size, 1, depth], name="weight_1")
b_conv1 = bias_variable([depth], name="bias_1")
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1, name="conv_1") + b_conv1, name="relu_1")
h_pool1 = max_pool_2x2(h_conv1, name="pool_1")
#Layer 2
#W_conv2 = weight_variable([patch_size, patch_size, depth, depth*2])
#b_conv2 = bias_variable([depth*2])
#h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
#h_pool2 = max_pool_2x2(h_conv2)
# Densely Connected Layer
W_fc1 = weight_variable([self.image_height/4 * self.image_width/2 * depth*2, depth], name="weight_2")
b_fc1 = bias_variable([depth], name="bias_2")
h_pool2_flat = tf.reshape(h_pool1, [-1, self.image_height/2 * self.image_width/2 * depth], name="reshape_1")
h_fc1 = tf.nn.relu(tf.nn.xw_plus_b(h_pool2_flat, W_fc1, b_fc1), name="relu_2")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob, name="drop_1")
W_fc2 = weight_variable([depth, num_labels], name="dense_weight")
b_fc2 = bias_variable([num_labels], name="dense_bias")
logits = tf.nn.xw_plus_b(h_fc1_drop, W_fc2, b_fc2)
tf.add_to_collection("logits", logits)
y_conv = tf.nn.softmax(logits, name="softmax_1")
tf.add_to_collection("y_conv", y_conv)
with tf.name_scope("cross-entropy") as scope:
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_, name="cross_entropy_1"))
ce_summ = tf.scalar_summary("cross entropy", cross_entropy, name="cross_entropy")
optimizer = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy, name="min_adam_1")
with tf.name_scope("prediction") as scope:
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy_summary = tf.scalar_summary("accuracy", accuracy, name="accuracy_summary")
merged = tf.merge_all_summaries()
Loading and running new data
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph('./simple_model/one-layer-50.meta')
new_saver.restore(sess, './simple_model/one-layer-50')
logger.info("Model restored")
image, _ = tf_nn.reformat(images, None, 3)
x_image = tf.placeholder(tf.float32, shape=[image.shape[0], 28, 28, 1],
name="data")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
feed_dict = {x_image: image, keep_prob: .01}
y_ = tf.get_collection("y_")
prediction = sess.run(y_, feed_dict=feed_dict)
You can define your placeholders to have flexible size on one of the dimensions by using None instead of a specific number like this:
x = tf.placeholder(tf.float32, shape=[None, self.image_height, self.image_width, 1], name="data")
y_ = tf.placeholder(tf.float32, shape=[None, num_labels], name="labels")
Edit: There's a section in the TensorFlow faq about this.
My approach would have been to define batch_size as a tf.variable and then feed the value of the batchsize you want to use when running the session. This has worked fine for me in the past, but I guess the solution by Stryke would be more elegant.

Having problems feeding data to tensorflow graph

I am trying to adjust the MNIST2 problem in tensorflow tutorial to train a neural network using my own images. But I am having problems feeding data to the graph.
My code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_epochs', 2, 'Number of epochs to run trainer.')
flags.DEFINE_integer('batch_size', 100, 'Batch size.')
flags.DEFINE_string('train_dir', '/root/data', 'Directory with the training data.')
# Constants used for dealing with the files, matches convert_to_records.
TRAIN_FILE = 'train.tfrecords'
VALIDATION_FILE = 'validation.tfrecords'
# Set-up dos pacotes
sess = tf.InteractiveSession()
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
dense_keys=['image_raw', 'label'],
# Defaults are not specified since both keys are required.
dense_types=[tf.string, tf.int64])
# Convert from a scalar string tensor (whose single string has
# length mnist.IMAGE_PIXELS) to a uint8 tensor with shape
# [mnist.IMAGE_PIXELS].
image = tf.decode_raw(features['image_raw'], tf.uint8)
image.set_shape([mnist.IMAGE_PIXELS])
# OPTIONAL: Could reshape into a 28x28 image and apply distortions
# here. Since we are not applying any distortions in this
# example, and the next step expects the image to be flattened
# into a vector, we don't bother.
# Convert from [0, 255] -> [-0.5, 0.5] floats.
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
# Convert label from a scalar uint8 tensor to an int32 scalar.
label = tf.cast(features['label'], tf.int32)
return image, label
def inputs(train, batch_size, num_epochs):
"""Reads input data num_epochs times.
Args:
train: Selects between the training (True) and validation (False) data.
batch_size: Number of examples per returned batch.
num_epochs: Number of times to read the input data, or 0/None to
train forever.
Returns:
A tuple (images, labels), where:
* images is a float tensor with shape [batch_size, 30,26,1]
in the range [-0.5, 0.5].
* labels is an int32 tensor with shape [batch_size] with the true label,
a number in the range [0, char letras).
Note that an tf.train.QueueRunner is added to the graph, which
must be run using e.g. tf.train.start_queue_runners().
"""
if not num_epochs: num_epochs = None
filename = os.path.join(FLAGS.train_dir,
TRAIN_FILE if train else VALIDATION_FILE)
with tf.name_scope('input'):
filename_queue = tf.train.string_input_producer(
[filename], num_epochs=num_epochs)
# Even when reading in multiple threads, share the filename
# queue.
image, label = read_and_decode(filename_queue)
# Shuffle the examples and collect them into batch_size batches.
# (Internally uses a RandomShuffleQueue.)
# We run this in two threads to avoid being a bottleneck.
images, sparse_labels = tf.train.shuffle_batch(
[image, label], batch_size=batch_size, num_threads=2,
capacity=1000 + 3 * batch_size,
# Ensures a minimum amount of shuffling of examples.
min_after_dequeue=1000)
return images, sparse_labels
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
#Variaveis
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 36])
#Layer 1
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
#Layer 2
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
#Densely Connected Layer
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
#Dropout - reduz overfitting
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#Readout layer
W_fc2 = weight_variable([1024, 36])
b_fc2 = bias_variable([36])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
#Train and evaluate
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.initialize_all_variables())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(20000):
batch = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs)
if i%100 == 0:
print (batch[0])
print (type(batch[0]))
print (tf.shape(batch[0],name=None))
a=np.reshape(batch[0],(100,784))
#batch[1]=np.reshape(batch[1],[1])
train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
test = inputs(train=False, batch_size=2000)
print("test accuracy %g"%accuracy.eval(feed_dict={x: test[0], y_: test[1], keep_prob: 1.0}))
coord.join(threads)
sess.close()
The program outputs the following error:
Traceback (most recent call last):
File "4_Treino_Rede_Neural.py", line 158, in <module>
train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0})
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 460, in eval
return _eval_using_default_session(self, feed_dict, self.graph, session)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2910, in _eval_using_default_session
return session.run(tensors, feed_dict)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 357, in run
np_val = np.array(subfeed_val, dtype=subfeed_t.dtype.as_numpy_dtype)
ValueError: setting an array element with a sequence.
I am not sure how to fix this issue.
Could anyone point me to the right direction?
Thanks
Marcelo V
You are trying to feed the feed_dict argument with TensorFlow tensors. TensorFlow then tries to convert these tf.Tensor to numpy arrays but cannot and returns your error.
As you use an input queue, you don't need a feed_dict.
Instead of:
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 36])
Just use:
x, y_ = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs)

Categories

Resources