Tensorflow Model Evaluation is based on batch size - python

I have a graph in TensorFlow which I've trained based on a batch size of 32 observations over hundreds of epochs. I now want to to predict some new data based on the trained graph so I've saved it and reloaded it but I'm forced to always pass in the same amount of observations as my batch size because I've declared a placeholder in my graph which corresponds to the batch size. How can I make my graph accept any amount of observations?
How should I configure this so that I can train on any amount of observations and then run a different amount later?
Below is a excerpt of some of the important parts of the code.
Building the graph:
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(tf.float32, shape=[batch_size, self.image_height, self.image_width, 1], name="data")
y_ = tf.placeholder(tf.float32, shape=[batch_size, num_labels], name="labels")
# Layer 1
W_conv1 = weight_variable([patch_size, patch_size, 1, depth], name="weight_1")
b_conv1 = bias_variable([depth], name="bias_1")
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1, name="conv_1") + b_conv1, name="relu_1")
h_pool1 = max_pool_2x2(h_conv1, name="pool_1")
#Layer 2
#W_conv2 = weight_variable([patch_size, patch_size, depth, depth*2])
#b_conv2 = bias_variable([depth*2])
#h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
#h_pool2 = max_pool_2x2(h_conv2)
# Densely Connected Layer
W_fc1 = weight_variable([self.image_height/4 * self.image_width/2 * depth*2, depth], name="weight_2")
b_fc1 = bias_variable([depth], name="bias_2")
h_pool2_flat = tf.reshape(h_pool1, [-1, self.image_height/2 * self.image_width/2 * depth], name="reshape_1")
h_fc1 = tf.nn.relu(tf.nn.xw_plus_b(h_pool2_flat, W_fc1, b_fc1), name="relu_2")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob, name="drop_1")
W_fc2 = weight_variable([depth, num_labels], name="dense_weight")
b_fc2 = bias_variable([num_labels], name="dense_bias")
logits = tf.nn.xw_plus_b(h_fc1_drop, W_fc2, b_fc2)
tf.add_to_collection("logits", logits)
y_conv = tf.nn.softmax(logits, name="softmax_1")
tf.add_to_collection("y_conv", y_conv)
with tf.name_scope("cross-entropy") as scope:
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_, name="cross_entropy_1"))
ce_summ = tf.scalar_summary("cross entropy", cross_entropy, name="cross_entropy")
optimizer = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy, name="min_adam_1")
with tf.name_scope("prediction") as scope:
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracy_summary = tf.scalar_summary("accuracy", accuracy, name="accuracy_summary")
merged = tf.merge_all_summaries()
Loading and running new data
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph('./simple_model/one-layer-50.meta')
new_saver.restore(sess, './simple_model/one-layer-50')
logger.info("Model restored")
image, _ = tf_nn.reformat(images, None, 3)
x_image = tf.placeholder(tf.float32, shape=[image.shape[0], 28, 28, 1],
name="data")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
feed_dict = {x_image: image, keep_prob: .01}
y_ = tf.get_collection("y_")
prediction = sess.run(y_, feed_dict=feed_dict)

You can define your placeholders to have flexible size on one of the dimensions by using None instead of a specific number like this:
x = tf.placeholder(tf.float32, shape=[None, self.image_height, self.image_width, 1], name="data")
y_ = tf.placeholder(tf.float32, shape=[None, num_labels], name="labels")
Edit: There's a section in the TensorFlow faq about this.

My approach would have been to define batch_size as a tf.variable and then feed the value of the batchsize you want to use when running the session. This has worked fine for me in the past, but I guess the solution by Stryke would be more elegant.

Related

Tensorboard-- Compute time of a High-level node is not the same as the summation of compute times of its sub-nodes

Following the tutorial on TensorFlow, I am trying to understand run-time statistics using tensorboard.
I find that the compute time of a High-level node representing a name scope is not equal to the sum of compute times of its sub-nodes. Why isn't it the same?
For example, in the attached snapshot:
Compute time of ConvLayer2 = 75.5 ms, while the
Sub-nodes compute time = 55.2 (conv) + 1.73 (add) + 1 (other nodes) = 57.9 ms
Snapshot of ConvLayer2
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
g = tf.Graph()
with g.as_default():
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding = "SAME")
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME")
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
x = tf.placeholder(tf.float32, shape=[100, 784], name = "X_input") # Input layer
y_= tf.placeholder(tf.float32, shape=[100, 10], name = "Y_labels")
# Reshape input vector into a 4d tensor
x_image = tf.reshape(x, [-1, 28, 28, 1])
# Layer 1
with tf.name_scope('ConvLayer1'):
W_conv1 = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1), name = "Weights_L1")
b_conv1 = tf.Variable(tf.constant(0.1, shape = [32]), name = "Bias_L1")
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Layer 2
with tf.name_scope('ConvLayer2'):
W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1), name = "Weights_L2")
b_conv2 = tf.Variable(tf.constant(0.1, shape = [64]), name = "Bias_L2")
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Layer 3 : Fully Connected Layer
with tf.name_scope('FullyConnectLayer1'):
w_fc1 = tf.Variable(tf.truncated_normal([7*7*64, 1024], stddev=0.1), name = "Weights_fc1")
b_fc1 = tf.Variable(tf.constant(0.1, shape = [1024]), name = "Bias_fc1")
# Flatten
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
# Dropout to reduce overfitting
with tf.name_scope('performDropout'):
keep_probability = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_probability)
# Layer 4: Readout layer
with tf.name_scope('FullyConnectLayer2'):
w_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1), name = "Weights_fc2")
b_fc2 = tf.Variable(tf.constant(0.1, shape = [10]), name = "Bias_fc2")
y_out = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
# loss function
with tf.name_scope('xEntropy'):
loss_crossEntropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y_, logits = y_out), name="xent")
tf.summary.scalar("xEntropy", loss_crossEntropy)
with tf.name_scope('Train_AdamOptim'):
optimizer = tf.train.AdamOptimizer(learning_rate = 1e-4)
train_step = optimizer.minimize(loss_crossEntropy)
with tf.name_scope('accuracy'):
correct_predict = tf.equal(tf.argmax(y_out ,1), tf.argmax(y_ ,1))
accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))
tf.summary.scalar("accuracy", accuracy)
# Merge all summary ops into a single op
summary = tf.summary.merge_all()
# Operation: Initialize variables
var_init = tf.global_variables_initializer()
#### Add trace and metadata calls.
run_options = tf.RunOptions(trace_level = tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# Run the computational graph
with tf.Session() as sess:
# Initialize the variables that were created while building the computational graph
sess.run(var_init)
# Write the current session graph events onto a file using summary Filewriter. To be visulaized in Tensorboard.
writer = tf.summary.FileWriter("/tmp/mnist_demo/1")
writer.add_graph(sess.graph)
# Run the training step "required" number of times -- here, 20000 timesteps
for i in range(101): #20000
batch = mnist.train.next_batch(100)
# Training
sess.run(train_step, feed_dict={x:batch[0], y_:batch[1], keep_probability:0.5})
# validation
if i % 50 == 0:
[val_accuracy, s] = sess.run([accuracy, summary], feed_dict={x:mnist.validation.images[0:100, :],
y_:mnist.validation.labels[0:100, :], keep_probability: 1.0},
options=run_options, run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step %d' % i)
writer.add_summary(s, i)
writer.flush()
print("step %d, validation accuracy %g" % (i, val_accuracy))
Tensorflow tutorial doesn't provide any information on how the High-level node compute time is aggregated. Any help is greatly appreciated.

Tensorflow MNIST classification on a trained model

Here's my modified version of Tensorflow MNIST example:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.learn.python.learn.datasets.mnist import DataSet
import tensorflow as tf
import numpy as np
FLAGS = None
def deepnn(x, numclasses):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 28, 28, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
with tf.name_scope('fc1'):
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = weight_variable([1024, numclasses])
b_fc2 = bias_variable([numclasses])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main(_):
# Import data
images = np.load("../rwclassi/db/images.npy")
labels = np.load("../rwclassi/db/labels.npy")
train = DataSet(images, labels, reshape=True)
numpixels = images.shape[1] * images.shape[2] * images.shape[3]
numclasses = labels.shape[1]
#test = train
#mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, numpixels])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, numclasses])
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x, numclasses)
with tf.name_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
saver = tf.train.Saver()
resume = True
with tf.Session() as sess:
if resume:
saver.restore(sess, "model.ckpt")
print("Model restored.")
else:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = train.next_batch(100)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
if i % 1000 == 0:
saver.save(sess,"model.ckpt")
print ("Model saved")
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--modelfile', type=str,
default='model.ckpt',
help='Model file')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
How do I predict/classify using this trained model? sess.run(???)? argmax?
Figured it out myself.
answer = sess.run(y_conv, feed_dict={x: [train.images[5230]], keep_prob: 1.0})
print (answer)
The line
y_conv, keep_prob = deepnn(x, numclasses)
gets the network structure, where y_conv is the output and keep_prob is a scalar placeholder for the probability of dropout.

tensorflow logits and labels must be same size

I'm quite new to tensorflow and python, and currently trying to modify the MNIST for expert tutorial for a 240x320x3 image. I have 2 .py script
tfrecord_reeader.py
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
data_path = 'train.tfrecords' # address to save the hdf5 file
def read_data():
with tf.Session() as sess:
feature = {'train/image': tf.FixedLenFeature([], tf.string),
'train/label': tf.FixedLenFeature([], tf.int64)}
# Create a list of filenames and pass it to a queue
filename_queue = tf.train.string_input_producer([data_path], num_epochs=1)
# Define a reader and read the next record
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
# Decode the record read by the reader
features = tf.parse_single_example(serialized_example, features=feature)
# Convert the image data from string back to the numbers
image = tf.decode_raw(features['train/image'], tf.float32)
# Cast label data into int32
label = tf.cast(features['train/label'], tf.int32)
# Reshape image data into the original shape
image = tf.reshape(image, [240, 320, 3])
sess.close()
return image, label
def next_batch(image, label, batchSize):
imageBatch, labelBatch = tf.train.shuffle_batch([image, label], batch_size=batchSize, capacity=30, num_threads=1,
min_after_dequeue=10)
return imageBatch, labelBatch
train.py
import tensorflow as tf
from random import shuffle
import glob
import sys
#import cv2
from tfrecord_reader import read_data, next_batch
import argparse # For passing arguments
import numpy as np
import math
import time
IMAGE_WIDTH = 240
IMAGE_HEIGHT = 320
IMAGE_DEPTH = 3
IMAGE_SIZE = 240*320*3
NUM_CLASSES = 5
BATCH_SIZE = 50
# Creates a weight tensor sized by shape
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# Creates a bias tensor sized by shape
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def main(argv):
# Perform training
x = tf.placeholder(tf.float32, [None, IMAGE_SIZE]) # 240*320=76800
W = tf.Variable(tf.zeros([IMAGE_SIZE, NUM_CLASSES]))
b = tf.Variable(tf.zeros([NUM_CLASSES]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, NUM_CLASSES]) # Desired output
# First convolutional layer
W_conv1 = weight_variable([5, 5, IMAGE_DEPTH, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_DEPTH])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# First fully connected layer
W_fc1 = weight_variable([60 * 80 * 64, 1024])
b_fc1 = bias_variable([1024])
# Flatten the layer
h_pool2_flat = tf.reshape(h_pool2, [-1, 60 * 80 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Drop out layer
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Second fully connected layer
W_fc2 = weight_variable([1024, NUM_CLASSES])
b_fc2 = bias_variable([NUM_CLASSES])
# Output layer
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print(y_conv.shape)
# print(y_conv.get_shape)
# Get the loss
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
# Minimize the loss
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# Read all data from tfrecord file
imageList, labelList = read_data()
imageBatch, labelBatch = next_batch(imageList, labelList, BATCH_SIZE)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
train_images, train_labels = sess.run([imageBatch, labelBatch])
train_images = np.reshape(train_images, (-1, IMAGE_SIZE))
train_labels = np.reshape(train_labels, (-1, NUM_CLASSES))
sess.run(train_step, feed_dict = {x: train_images, y_: train_labels, keep_prob: 1.0})
coord.request_stop()
coord.join(threads)
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
When I run the program, I'm getting
InvalidArgumentError (see above for traceback): logits and labels must be same size: logits_size=[50,5] labels_size=[10,5]
[[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](Reshape_2, Reshape_3)]]
I've done several hours of search on this problem, but could not see why the logits are not matching label size. If I change batchsize to 10, the labels size will become [2,5] as if it's always being divided by 5. Can someone help me out here?
Most likely your labels are single integer values rather than one-hot vectors, so your labelBatch is a vector of size [50] containing single numbers like "1" or "4". Now, when you reshape them using train_labels = np.reshape(train_labels, (-1, NUM_CLASSES))
you're changing the shape to [10, 5].
The tf.nn.softmax_cross_entropy_with_logits function expects labels to be "one-hot" encodings of the labels (this means that a label of 3 translates into a vector of size 5 with a 1 in position 3 and zeros elsewhere). You can achieve this using the tf.nn.one_hot function, but an easier way to do it is instead to use the tf.nn.sparse_softmax_cross_entropy_with_logits function which is designed to work with these single-valued labels. To achieve this, you'll need to change these line:
y_ = tf.placeholder(tf.float32, [None]) # Desired output
cross_entropy = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
And get rid of the train_labels = np.reshape(train_labels, (-1, NUM_CLASSES)) line.
(By the way, you don't actually need to use placeholders when reading data in this way - you can just directly use the output tensors.)

AttributeError while using summary operation

I built a CNN using TensorFlow. The network worked fine, but I had a problem: I couldn't visualize and plot graphs from the learning process.
Therefore I implemented the necessary commands in order to use TensorBoard, following this tutorial.
However, when I run the code I get the following error message:
AttributeError: 'module' object has no attribute 'scalar'
Referring to the following commands (specific to the lines with the **):
in the main function:
W_conv1 = weight_variable([first_conv_kernel_size, first_conv_kernel_size,
with tf.name_scope('weights'):
**variable_summaries(W_conv1)**
in variable_summaries function:
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
**tf.summary.scalar('mean', mean)**
What is this error message? I followed the tutorial step by step and I couldn't find the mistake.
Appreciate your help, thanks! :)
The whole code:
import build_database_tuple
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
# few functions to initialize the weights of the layers properly (positive etc.)
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# convolution and pooling layers definition
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
# from the previous code (mnist):
print('START')
# INTIAL PARAMETERS
# database:
data_home_dir='/home/dir/to/data/'
validation_ratio=(1.0/8)
patch_size=32
test_images_num=5000*1 # csv_batchsize*number of test batches files
train_images_num=78000+78000-test_images_num # posnum + negnum
# model parameters:
first_conv_kernel_size=5
first_conv_output_channels=32
sec_conv_kernel_size=5
sec_conv_output_channels=64
fc_vec_size=512
# train and test parameters
train_epoches_num=5
train_batch_size=100
test_batch_size=100
learning_rate=1*(10**(-4))
summaries_dir='/dir/to/log/files/'
# load data
folds = build_database_tuple.load_data(data_home_dir=data_home_dir,validation_ratio=validation_ratio,patch_size=patch_size)
# starting the session. using the InteractiveSession we avoid build the entiee comp. graph before starting the session
sess = tf.InteractiveSession()
# start building the computational graph
# the 'None' indicates the number of classes - a value that we wanna leave open for now
x = tf.placeholder(tf.float32, shape=[None, patch_size**2]) #input images - 28x28=784
y_ = tf.placeholder(tf.float32, shape=[None, 2]) #output classes (using one-hot vectors)
# the vriables for the linear layer
W = tf.Variable(tf.zeros([(patch_size**2),2])) #weights - 784 input features and 10 outputs
b = tf.Variable(tf.zeros([2])) #biases - 10 classes
# initialize all the variables using the session, in order they could be used in it
sess.run(tf.initialize_all_variables())
# implementation of the regression model
y = tf.nn.softmax(tf.matmul(x,W) + b)
# Done!
# FIRST LAYER:
with tf.name_scope('layer1'):
# build the first layer
W_conv1 = weight_variable([first_conv_kernel_size, first_conv_kernel_size, 1, first_conv_output_channels]) # 5x5 patch, 1 input channel, 32 output channels (features)
b_conv1 = bias_variable([first_conv_output_channels])
x_image = tf.reshape(x, [-1,patch_size,patch_size,1]) # reshape x to a 4d tensor. 2,3 are the image dimensions, 4 is ine color channel
# apply the layers
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
with tf.name_scope('weights'):
variable_summaries(W_conv1)
with tf.name_scope('biases'):
variable_summaries(b_conv1)
# SECOND LAYER:
with tf.name_scope('layer2'):
# 64 features each 5x5 patch
W_conv2 = weight_variable([sec_conv_kernel_size, sec_conv_kernel_size, patch_size, sec_conv_output_channels])
b_conv2 = bias_variable([sec_conv_output_channels])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
with tf.name_scope('weights'):
variable_summaries(W_conv2)
with tf.name_scope('biases'):
variable_summaries(b_conv2)
# FULLY CONNECTED LAYER:
with tf.name_scope('fc'):
# 1024 neurons, 8x8 - new size after 2 pooling layers
W_fc1 = weight_variable([(patch_size/4) * (patch_size/4) * sec_conv_output_channels, fc_vec_size])
b_fc1 = bias_variable([fc_vec_size])
h_pool2_flat = tf.reshape(h_pool2, [-1, (patch_size/4) * (patch_size/4) * sec_conv_output_channels])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# dropout layer - meant to reduce over-fitting
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
tf.summary.scalar('dropout_keep_probability', keep_prob)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
with tf.name_scope('weights'):
variable_summaries(W_fc1)
with tf.name_scope('biases'):
variable_summaries(b_fc1)
# READOUT LAYER:
with tf.name_scope('softmax'):
# softmax regression
W_fc2 = weight_variable([fc_vec_size, 2])
b_fc2 = bias_variable([2])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
with tf.name_scope('weights'):
variable_summaries(W_fc2)
with tf.name_scope('biases'):
variable_summaries(b_fc2)
# TRAIN AND EVALUATION:
with tf.name_scope('cross_entropy'):
# cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1])) # can be numerically unstable. old working calculation
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
merged = tf.summary.merge_all()
train_writer = tf.train.SummaryWriter(summaries_dir + '/train', sess.graph)
test_writer = tf.train.SummaryWriter(summaries_dir + '/test')
#tf.global_variables_initializer().run()
sess.run(tf.initialize_all_variables())
# variables for the plotting process
p11 = []
p12 = []
p21 = []
p22 = []
f0 = plt.figure()
f1 = plt.figure()
train_accuracy=0
# starting the training process
for i in range(((train_images_num*train_epoches_num)/train_batch_size)):
if i%50 == 0: # for every 100 iterations
#train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0})
# calculate test accuracy
val_batch = folds.validation.next_batch(train_batch_size)
#val_accuracy = accuracy.eval(feed_dict={x: val_batch[0], y_: val_batch[1], keep_prob: 1.0})
summary, val_accuracy = sess.run([merged, accuracy], feed_dict={x: val_batch[0], y_: val_batch[1], keep_prob: 1.0})
test_writer.add_summary(summary, i)
print('Accuracy at step %s: %s' % (i, val_accuracy))
# The train step
else:
summary, _ = sess.run([merged, train_step], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
train_writer.add_summary(summary, i)
# Save Network
saver = tf.train.Saver()
save_path = saver.save(sess,'/dir/to/model/files/model.ckpt')
print("Model saved in file: %s" % save_path)
Following the comment of sunside, I updated my tensorflow version and the problem solved.
Apparently, tf.scalar_summary() worked at tensorflow version 0.10, but updated to tf.summary.scalar() at newer versions (0.12, at least).
pip install -U tensorflow in the terminal solved the problem immediately :)

Tensorflow MNIST: terminate called after throwing an instance of 'std::bad_alloc'

I am trying to implement a Convolutional Neural Network on Tensorflow, using their default MNIST data set.
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def compute_accuracy(v_xs, v_ys):
global prediction
y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})
return result
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
# stride [1, x_movement, y_movement, 1]
# Must have strides[0] = strides[3] = 1
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
# stride [1, x_movement, y_movement, 1]
return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 784]) # 28x28
ys = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs, [-1, 28, 28, 1])
# print(x_image.shape) # [n_samples, 28,28,1]
## conv1 layer ##
W_conv1 = weight_variable([5,5, 1,32]) # patch 5x5, in size 1, out size 32
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # output size 28x28x32
h_pool1 = max_pool_2x2(h_conv1) # output size 14x14x32
## conv2 layer ##
W_conv2 = weight_variable([5,5, 32, 64]) # patch 5x5, in size 32, out size 64
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # output size 14x14x64
h_pool2 = max_pool_2x2(h_conv2) # output size 7x7x64
## fc1 layer ##
W_fc1 = weight_variable([7*7*64, 1024])
b_fc1 = bias_variable([1024])
# [n_samples, 7, 7, 64] ->> [n_samples, 7*7*64]
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
## fc2 layer ##
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# the error between prediction and real data
cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction),
reduction_indices=[1])) # loss
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(100):
batch_xs, batch_ys = mnist.train.next_batch(10)
sess.run(train_step, feed_dict={xs: batch_xs, ys: batch_ys, keep_prob: 0.5})
if i % 10 == 0:
print(compute_accuracy(
mnist.test.images, mnist.test.labels))
On executing, python crashes with this message:
terminate called after throwing an instance of 'std::bad_alloc'
what(): std::bad_alloc
I was able to point out that this happens when I call the compute_accuracy function, or in general, when I load the whole mnist.test iamges and labels.
Any suggestions on what can be done, given I wish to use this data. I have been able to work with images as a whole, in a different case.
I think you're running out of memory. It runs okay on my machine (6GB graphics card). Try decreasing the batch size, or using a smaller fully connected layer.
I had the same issue. I resolved it by reducing the number of test images to compute accuracy, e.g. I replaced
print(compute_accuracy(mnist.test.images, mnist.test.labels))
With something similar to
batch_test = mnist.test.next_batch(5000)
print(compute_accuracy(batch_test[0], batch_test[1])
I hope this helps.

Categories

Resources