Create Alexnet for mnist in tensorflow - python

I wrote alexnet in tensorflow to perform on the mnist dataset. I get a ValueErorr saying: Negative dimension size caused by subtracting 2 from 1 for 'pool5' (op: 'MaxPool') with input shapes: [?,1,1,1024]. How to solve it? Here is my code:
from __future__ import print_function
import tensorflow as tf
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
import os
import random
import matplotlib.pyplot as plt
import numpy as np
# Parameters
learning_rate = 0.001
training_iters = 100000
batch_size = 1000
display = True
display_step_console = 5
learn_from_scratch = False
train = False
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.80 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# Create AlexNet model
def conv1st(name, l_input, w, b):
cov = tf.nn.conv2d(l_input, w, strides=[1, 4, 4, 1], padding='VALID')
return tf.nn.relu(tf.nn.bias_add(cov,b), name=name)
def conv2d(name, l_input, w, b):
cov = tf.nn.conv2d(l_input, w, strides=[1, 1, 1, 1], padding='SAME')
return tf.nn.relu(tf.nn.bias_add(cov,b), name=name)
def max_pool(name, l_input, k, s):
return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, s, s, 1],
padding='VALID', name=name)
def norm(name, l_input, lsize=4):
return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name=name)
def alex_net(_X, weights, biases, _dropout):
# Reshape input picture OH WAIT NOPE CUS JE SUIS UN TENSAI DESU
_X = tf.reshape(_X, shape=[-1, 28, 28, 1])
# Convolution Layer
conv1 = conv1st('conv1', _X, _weights['wc1'], _biases['bc1'])
# Max Pooling (down-sampling)
pool1 = max_pool('pool1', conv1, k=2, s=2)
# Apply Normalization
norm1 = norm('norm1', pool1, lsize=4)
# Apply Dropout
norm1 = tf.nn.dropout(norm1, _dropout)
# Convolution Layer
conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'])
# Max Pooling (down-sampling)
pool2 = max_pool('pool2', conv2, k=2, s=2)
# Apply Normalization
norm2 = norm('norm2', pool2, lsize=4)
# Apply Dropout
norm2 = tf.nn.dropout(norm2, _dropout)
# Convolution Layer
conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'])
conv4 = conv2d('conv4', conv3, _weights['wc4'], _biases['bc4'])
conv5 = conv2d('conv5', conv4, _weights['wc5'], _biases['bc5'])
# Max Pooling (down-sampling)
pool5 = max_pool('pool5', conv5, k=2, s=2)
# Apply Normalization
norm5 = norm('norm5', pool5, lsize=4)
# Apply Dropout
norm5 = tf.nn.dropout(norm5, _dropout)
# Fully connected layer
dense1 = tf.reshape(norm5, [-1, _weights['wd1'].get_shape().as_list()
[0]]) # Reshape conv3 output to fit dense layer input
dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'],
name='fc1') # Relu activation
dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'],
name='fc2') # Relu activation
# Output, class prediction
out = tf.matmul(dense2, _weights['out']) + _biases['out']
return out
# Store layers weight & bias
_weights = {
'wc1': tf.Variable(tf.random_normal([11, 11, 1, 96])),
'wc2': tf.Variable(tf.random_normal([5, 5, 96, 256])),
'wc3': tf.Variable(tf.random_normal([3, 3, 256, 512])),
'wc4': tf.Variable(tf.random_normal([3, 3, 512, 1024])),
'wc5': tf.Variable(tf.random_normal([3, 3, 1024, 1024])),
#'wd1': tf.Variable(tf.random_normal([8*8*256, 1024])),
'wd1': tf.Variable(tf.random_normal([6*6*256, 3072])),
'wd2': tf.Variable(tf.random_normal([3072, 4096])),
'out': tf.Variable(tf.random_normal([4096, n_classes]))
}
_biases = {
'bc1': tf.Variable(tf.random_normal([96])),
'bc2': tf.Variable(tf.random_normal([256])),
'bc3': tf.Variable(tf.random_normal([512])),
'bc4': tf.Variable(tf.random_normal([1024])),
'bc5': tf.Variable(tf.random_normal([1024])),
'bd1': tf.Variable(tf.random_normal([3072])),
'bd2': tf.Variable(tf.random_normal([4096])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
############### NOT SO IMPORTANT ANYMORE###################################
# Construct model
pred = alex_net(x, _weights, _biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,
labels=y))
optimizer =
tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
#Create summary scalars and operation
n1 = tf.summary.scalar("cost", cost)
n2 = tf.summary.scalar("accuracy", accuracy)
train_summary_op = tf.summary.merge([n1,n2])
#Do writer
log_dir = "./alexnet-classification-model-checkpoints/summary"
train_writer = tf.summary.FileWriter(log_dir+'/train',
graph=tf.get_default_graph())
# Initializing the variables
init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep=1)
initial_step = 0
# Launch the graph
with tf.Session() as sess:
if learn_from_scratch == False:
if os.path.isfile('./alexnet-classification-model-
checkpoints/step.txt'):
with open("alexnet-classification-model-checkpoints/step.txt",
"r") as file:
step = file.read()
print(step)
initial_step = int(step)
if os.path.isfile('./alexnet-classification-model-
checkpoints/checkpoint') and os.path.isfile('./alexnet-classification-model-
checkpoints/my-model.ckpt.meta'):
saver = tf.train.import_meta_graph('alexnet-classification-
model-checkpoints/my-model.ckpt.meta')
saver.restore(sess, 'alexnet-classification-model-checkpoints/my-
model.ckpt')
print("Loaded model successfully!")
else:
print("A saved model wasn't found, starting from scratch")
sess.run(init)
else:
sess.run(init)
if train:
print("Starting training!")
step = 1
# Keep training until reach max iterations
while step * batch_size <= training_iters:
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
keep_prob: dropout})
if step % display_step_console == 0:
if display:
batch_x_eval, batch_y_eval = mnist.train.next_batch(500,
shuffle=True)
# Calculate batch loss and accuracy
loss, acc, summary = sess.run([cost, accuracy,
train_summary_op], feed_dict={x: batch_x_eval,
y: batch_y_eval,
keep_prob: 1.0})
train_writer.add_summary(summary, global_step=((step +
initial_step)*batch_size))
print("Iter " + str((step + initial_step)*batch_size) +
", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " +
\
"{:.5f}".format(acc))
else:
print("Still training...
{}%".format(round((step*batch_size / training_iters)*100), 2))
step += 1
print("Optimization Finished!")
savePath = saver.save(sess, 'alexnet-classification-model-
checkpoints/my-model.ckpt')
with open("alexnet-classification-model-checkpoints/step.txt", "w") as file:
file.write(str(initial_step+step))
print("Saved!")
# Calculate accuracy for 256 mnist test images
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: mnist.test.images,
y: mnist.test.labels,
keep_prob: 1.}))
num = random.randint(0, mnist.test.images.shape[0])
img = mnist.test.images[num]
cls = sess.run(tf.argmax(conv_net(img, weights, biases, dropout), 1))
cls2 = mnist.test.labels[num]
plt.imshow(img.reshape(28, 28), cmap=plt.cm.binary)
print ('NN predicted', cls, np.argmax(cls2))
plt.show()

Related

Illegal argument error: logits and labels must be same size, batch size is different

I am using CNN to classifies MNIST dataset into 10 classes.
But the error shows the batch size of the pred is different.
InvalidArgumentError (see above for traceback): logits and labels must be same size: logits_size=[36,10] labels_size=[64,10]
[[Node: SoftmaxCrossEntropyWithLogits = SoftmaxCrossEntropyWithLogits[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/cpu:0"](Reshape_2, Reshape_3)]]
I can't find the reason why the batch size became 36 instead of 64.
Here is my code. The image size is 28*28*1.
import tensorflow as tf
# input data
from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets('/tmp/data/', one_hot=True)
mnist = input_data.read_data_sets('./MNIST_data', one_hot=True) # runing on server
learning_rate = 0.001
training_iters = 200000
batch_size = 64
display_step = 10
n_input = 784
n_classes = 10
dropout = 0.75
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32) # dropout
def conv2d(name, x, W, b, s=1):
return tf.nn.relu(tf.nn.conv2d(x, W, strides=[1, s, s, 1], padding='SAME'))
def maxpool2d(name, x, k=2, s=2):
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, s, s, 1],
padding='VALID', name=name)
def norm(name, l_input, lsize=4):
return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.001 / 9.0,
beta=0.75, name=name)
def alex_net(x, weights, biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 28, 28, 1])
conv1 = conv2d('conv1', x, weights['wc1'], biases['bc1'], s=1)
pool1 = maxpool2d('pool1', conv1, k=2, s=2)
norm1 = norm('norm1', pool1)
conv2 = conv2d('conv2', norm1, weights['wc2'], biases['bc2'], s=1)
pool2 = maxpool2d('pool2', conv2, k=2, s=2)
norm2 = norm('norm2', pool2)
conv3 = conv2d('conv3', norm2, weights['wc3'], biases['bc3'], s=1)
pool3 = maxpool2d('pool3', conv3, k=2, s=2)
norm3 = norm('norm3', pool3)
fc1 = tf.reshape(norm3, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
fc2 = tf.add(tf.matmul(fc1, weights['wd2']), biases['bd2'])
fc2 = tf.nn.relu(fc2)
out = tf.matmul(fc2, weights['out']) + biases['out']
return out
weights = {
'wc1': tf.Variable(tf.random_normal([3, 3, 1, 64])),
'wc2': tf.Variable(tf.random_normal([3, 3, 64, 128])),
'wc3': tf.Variable(tf.random_normal([3, 3, 128, 256])),
'wd1': tf.Variable(tf.random_normal([4*4*256, 1024])),
'wd2': tf.Variable(tf.random_normal([1024, 1024])),
'out': tf.Variable(tf.random_normal([1024, n_classes]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([64])),
'bc2': tf.Variable(tf.random_normal([128])),
'bc3': tf.Variable(tf.random_normal([256])),
'bd1': tf.Variable(tf.random_normal([1024])),
'bd2': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
pred = alex_net(x, weights, biases, keep_prob)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
step = 1
while step * batch_size < training_iters:
batch_x, batch_y = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
keep_prob: dropout})
if step % display_step == 0:
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y,
keep_prob: 1.})
print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
step += 1
print("Optimization Finished!")
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: mnist.test.images[:256],
y: mnist.test.labels[:256],
keep_prob: 1.}))
It should be because the padding used for maxpool2d is 'VALID' instead of 'SAME'. How it affected the batch layer was due to reshaping fc1 = tf.reshape(norm3, [-1, weights['wd1'].get_shape().as_list()[0]])
If the above answer didn't correct the error, you should check the output shape of each function by running function_name.eval(sess, feed_dict = {x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})).shape in your terminal and see what is the output shape, and whether it is the desired shape for that layer.

How to visualize loss and accuracy the best?

I wrote a convolutional neural network in tensorflow to perform on the mnist dataset. Everything works just fine, but i want to visualize the model in tensorboard. How am i gonna do it? Here is my code:
from __future__ import print_function
import tensorflow as tf
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.001
training_iters = 200000
batch_size = 128
display_step = 10
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.75 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling)
conv1 = maxpool2d(conv1, k=2)
# Convolution Layer
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
# Max Pooling (down-sampling)
conv2 = maxpool2d(conv2, k=2)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout)
# Output, class prediction
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out
# Store layers weight & bias
weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),
# 1024 inputs, 10 outputs (class prediction)
'out': tf.Variable(tf.random_normal([1024, n_classes]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = conv_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,
labels=y))
optimizer =
tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
keep_prob: dropout})
if step % display_step == 0:
# Calculate batch loss and accuracy
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y,
keep_prob: 1.})
print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
step += 1
print("Optimization Finished!")
# Calculate accuracy for 256 mnist test images
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: mnist.test.images[:256],
y: mnist.test.labels[:256],
keep_prob: 1.}))
Create summary scalars and operation:
cost = tf.summary.scalar("cost", cost)
accuracy = tf.summary.scalar("accuracy", accuracy)
train_summary_op = tf.summary.merge([cost,accuracy])
Do writer:
train_writer = tf.summary.FileWriter(log_dir+'/train',
graph=tf.get_default_graph())
Use it in session run and write the summary:
loss, acc, summary = sess.run([cost, accuracy, train_summary_op], feed_dict={x: batch_x,y: batch_y,keep_prob: 1.})
train_writer.add_summary(train_summary_str, step)
Later, use tensorboard (point to folder you're writing summary) to visualize graphs of loss and accuracy.
I hope this is helpful.

How to save trained model in tensorflow?

I wrote a convolutional neural network in tensorflow to perform on the mnist dataset. Everything works just fine, but i want to save the model with the tf.train.Saver(). How am i gonna do it?
Here is my code:
from __future__ import print_function
import tensorflow as tf
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Parameters
learning_rate = 0.001
training_iters = 200000
batch_size = 128
display_step = 10
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.75 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling)
conv1 = maxpool2d(conv1, k=2)
# Convolution Layer
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
# Max Pooling (down-sampling)
conv2 = maxpool2d(conv2, k=2)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout)
# Output, class prediction
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out
# Store layers weight & bias
weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),
# 1024 inputs, 10 outputs (class prediction)
'out': tf.Variable(tf.random_normal([1024, n_classes]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = conv_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,
labels=y))
optimizer =
tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
keep_prob: dropout})
if step % display_step == 0:
# Calculate batch loss and accuracy
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y,
keep_prob: 1.})
print("Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
step += 1
print("Optimization Finished!")
# Calculate accuracy for 256 mnist test images
print("Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: mnist.test.images[:256],
y: mnist.test.labels[:256],
keep_prob: 1.}))
Simplest way to save and restore:
To save:
saver = tf.train.Saver(max_to_keep=1)
with tf.Session() as sess:
# train your model, then:
savePath = saver.save(sess, 'someDir/my_model.ckpt')
To restore:
with tf.Session() as sess:
saver = tf.train.import_meta_graph('someDir/my_model.ckpt.meta')
saver.restore(sess, pathModel + 'someDir/my_model.ckpt')
# access a variable from the saved Graph, and so on:
someVar = sess.run('varName:0')
This should do it
Consider saving results of Neural Network training to database.
The root of the idea is described here:
A neural network scoring engine in PL/SQL for recognizing handwritten digits
https://db-blog.web.cern.ch/blog/luca-canali/2016-07-neural-network-scoring-engine-plsql-recognizing-handwritten-digits
The code repository for this example is located here:
https://github.com/LucaCanali/Miscellaneous/tree/master/PLSQL_Neural_Network
By doing so it's possible to train the network once and then use database procedures to use it.

Tensorflow conv3d_transpose *** Error in 'python': free(): invalid pointer

EDIT opened issue: https://github.com/tensorflow/tensorflow/issues/3128
I'm trying to use the newly added conv3d_transpose created here
But I'm getting the error in the title: *** Error in 'python': free(): invalid pointer
I have the network working in 2D (using the same data), but it won't run in 3D.
I have tried on two different linux machines both running Ubuntu 14.04. I am currently trying to get it to run on my Mac, but I can't find a build with with the conv3d_transpose included. I had to install the nightly build on my linux machine, but the nightly build for OSX doesn't include it. Does anyone know where I could find it?
The way I input the 3D data may be the cause. My data inputs in 2D images, and I essentially concatenate them into a 3D matrix. For testing purposes, I'm keeping my data set small. My batch size is 1 and my depth is 5. I pull in n_depth * batch_size from my data class, and then reshape it into [batch_size, n_depth, x, y, n_classes]
The network itself builds, and it doesn't throw any dimension errors. The error occurs on the first training session.
Full code is below:
import tensorflow as tf
import pdb
import numpy as np
from numpy import genfromtxt
from PIL import Image
from tensorflow.contrib.learn.python.learn.datasets.scroll import scroll_data
# Parameters
learning_rate = 0.001
training_iters = 1000000
batch_size = 1
display_step = 1
# Network Parameters
n_input_x = 200 # Input image x-dimension
n_input_y = 200 # Input image y-dimension
n_depth = 5
n_classes = 2 # Binary classification -- on a surface or not
dropout = 0.75 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_depth, n_input_x, n_input_y])
y = tf.placeholder(tf.float32, [None, n_depth, n_input_x, n_input_y, n_classes], name="ground_truth")
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# This function converts the ground truth data into a
# 2 channel classification -- n_input_x x n_input_y x 2
# one layer for 0's and the other for 1's
def convert_to_2_channel(x, size):
#assume input has dimension (batch_size,x,y)
#output will have dimension (batch_size,x,y,2)
output = np.empty((size, 200, 200, 2))
temp_temp_arr1 = np.empty((size, 200, 200))
temp_temp_arr2 = np.empty((size, 200, 200))
for i in xrange(size):
for j in xrange(n_input_x):
for k in xrange(n_input_y):
if x[i][j][k] == 1:
temp_temp_arr1[i][j][k] = 1
temp_temp_arr2[i][j][k] = 0
else:
temp_temp_arr1[i][j][k] = 0
temp_temp_arr2[i][j][k] = 1
for i in xrange(size):
for j in xrange(n_input_x):
for k in xrange(n_input_y):
for l in xrange(2):
try:
if l == 0:
output[i][j][k][l] = temp_temp_arr1[i][j][k]
else:
output[i][j][k][l] = temp_temp_arr2[i][j][k]
except IndexError:
print "Index error"
pdb.set_trace()
return output
# Create some wrappers for simplicity
def conv3d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv3d(x, W, strides=[1, strides, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool3d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool3d(x, ksize=[1, k, k, k, 1], strides=[1, k, k, k, 1],
padding='SAME')
def deconv3d(prev_layer, w, b, output_shape, strides):
# Deconv layer
deconv = tf.nn.conv3d_transpose(prev_layer, w, output_shape=output_shape, strides=strides, padding="VALID")
deconv = tf.nn.bias_add(deconv, b)
deconv = tf.nn.relu(deconv)
return deconv
# Create model
def conv_net(x, weights, biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 5, 200, 200, 1])
with tf.name_scope("conv1") as scope:
# Convolution Layer
conv1 = conv3d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling)
#conv1 = tf.nn.local_response_normalization(conv1)
conv1 = maxpool3d(conv1, k=2)
# Convolution Layer
with tf.name_scope("conv2") as scope:
conv2 = conv3d(conv1, weights['wc2'], biases['bc2'])
# Max Pooling (down-sampling)
# conv2 = tf.nn.local_response_normalization(conv2)
conv2 = maxpool3d(conv2, k=2)
# Convolution Layer
with tf.name_scope("conv3") as scope:
conv3 = conv3d(conv2, weights['wc3'], biases['bc3'])
# Max Pooling (down-sampling)
# conv3 = tf.nn.local_response_normalization(conv3)
conv3 = maxpool3d(conv3, k=2)
pdb.set_trace()
temp_batch_size = tf.shape(x)[0] #batch_size shape
with tf.name_scope("deconv1") as scope:
output_shape = [temp_batch_size, 2, 50, 50, 64]
strides = [1,1,2,2,1]
conv4 = deconv3d(conv3, weights['wdc1'], biases['bdc1'], output_shape, strides)
# conv4 = tf.nn.local_response_normalization(conv4)
with tf.name_scope("deconv2") as scope:
output_shape = [temp_batch_size, 3, 100, 100, 32]
strides = [1,1,2,2,1]
conv5 = deconv3d(conv4, weights['wdc2'], biases['bdc2'], output_shape, strides)
# conv5 = tf.nn.local_response_normalization(conv5)
with tf.name_scope("deconv3") as scope:
output_shape = [temp_batch_size, 5, 200, 200, 2]
#this time don't use ReLu -- since output layer
conv6 = tf.nn.conv3d_transpose(conv5, weights['wdc3'], output_shape=output_shape, strides=[1,1,2,2,1], padding="VALID")
conv6 = tf.nn.bias_add(conv6, biases['bdc3'])
# conv6 = tf.nn.relu(conv6)
# Include dropout
#conv6 = tf.nn.dropout(conv6, dropout)
return conv6
weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1' : tf.Variable(tf.random_normal([5, 5, 5, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2' : tf.Variable(tf.random_normal([3, 5, 5, 32, 64])),
# 5x5 conv, 32 inputs, 64 outputs
'wc3' : tf.Variable(tf.random_normal([2, 5, 5, 64, 128])),
'wdc1' : tf.Variable(tf.random_normal([2, 2, 2, 64, 128])),
'wdc2' : tf.Variable(tf.random_normal([2, 2, 2, 32, 64])),
'wdc3' : tf.Variable(tf.random_normal([3, 2, 2, 2, 32])),
}
biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bc3': tf.Variable(tf.random_normal([128])),
'bdc1': tf.Variable(tf.random_normal([64])),
'bdc2': tf.Variable(tf.random_normal([32])),
'bdc3': tf.Variable(tf.random_normal([2])),
}
# Construct model
# with tf.name_scope("net") as scope:
pred = conv_net(x, weights, biases, keep_prob)
pdb.set_trace()
pred = tf.reshape(pred, [-1, n_input_x, n_input_y, n_depth, n_classes]) #Reshape to shape-Y
# Define loss and optimizer
# Reshape for cost function
temp_pred = tf.reshape(pred, [-1, 2])
temp_y = tf.reshape(y, [-1, 2])
with tf.name_scope("loss") as scope:
# cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(pred, y))
cost = (tf.nn.softmax_cross_entropy_with_logits(temp_pred, temp_y))
with tf.name_scope("opt") as scope:
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
with tf.name_scope("acc") as scope:
# accuracy is the difference between prediction and ground truth matrices
correct_pred = tf.equal(0,tf.cast(tf.sub(tf.nn.softmax(temp_pred),temp_y), tf.int32))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
summary = tf.train.SummaryWriter('/tmp/logdir/', sess.graph) #initialize graph for tensorboard
step = 1
# Import data
data = scroll_data.read_data('/home/kendall/Desktop/')
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = data.train.next_batch(n_depth)
# Run optimization op (backprop)
batch_x = batch_x.reshape((batch_size, n_depth, n_input_x, n_input_y))
batch_y = batch_y.reshape((n_depth, n_input_x, n_input_y))
batch_y = convert_to_2_channel(batch_y, n_depth) # Converts the 200x200 ground truth to a 200x200x2 classification
batch_y = batch_y.reshape(batch_size * n_input_x * n_input_y * n_depth, 2)
sess.run(optimizer, feed_dict={x: batch_x, temp_y: batch_y,
keep_prob: dropout})
pdb.set_trace()
if step % display_step == 0:
batch_y = batch_y.reshape(batch_size, n_input_x, n_input_y, 2)
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y,
keep_prob: 1.0})
print "Accuracy = " + str(acc)
#print "Loss = " + str(loss)
# Save network and make prediction
if acc > 0.7:
# Save network
save_path = "model.ckpt"
saver.save(sess, save_path)
# Make prediction
im = Image.open('/home/kendall/Desktop/HA900_frames/frame0001.tif')
batch_x = np.array(im)
batch_x = batch_x.reshape((1, n_input_x, n_input_y))
batch_x = batch_x.astype(float)
prediction = sess.run(pred, feed_dict={x: batch_x, keep_prob: 1.0})
prediction = prediction.reshape((40000,2))
prediction = tf.nn.softmax(prediction)
prediction = prediction.eval()
prediction = prediction.reshape((n_input_x, n_input_y, 2))
# Temp arrays are to splice the prediction n_input_x x n_input_y x 2
# into 2 matrices n_input_x x n_input_y
temp_arr1 = np.empty((n_input_x, n_input_y))
temp_arr2 = np.empty((n_input_x, n_input_y))
for i in xrange(n_input_x):
for j in xrange(n_input_x):
for k in xrange(2):
if k == 0:
temp_arr1[i][j] = prediction[i][j][k]
else:
temp_arr2[i][j] = prediction[i][j][k]
# np.savetxt("small_dataset_1.csv", temp_arr1, delimiter=",")
# np.savetxt("small_dataset_2.csv", temp_arr2, delimiter=",")
if acc > 0.70 and acc < 0.73:
np.savetxt("run_1_step_1-1.csv", temp_arr1, delimiter=",")
# np.savetxt("run_1_step_1-2.csv", temp_arr2, delimiter=",")
if acc > 0.73 and acc < 0.78:
np.savetxt("run_1_step_2-1.csv", temp_arr1, delimiter=",")
# np.savetxt("run_1_step_2-2.csv", temp_arr2, delimiter=",")
if acc > 0.78 and acc < 0.81:
np.savetxt("run_1_step_3-1.csv", temp_arr1, delimiter=",")
# np.savetxt("run_1_step_3-2.csv", temp_arr2, delimiter=",")
if acc > 0.81 and acc < 0.84:
np.savetxt("run_1_step_4-1.csv", temp_arr1, delimiter=",")
# np.savetxt("run_1_step_4-2.csv", temp_arr2, delimiter=",")
if acc > 0.84 and acc < 0.87:
np.savetxt("run_1_step_5-1.csv", temp_arr1, delimiter=",")
# np.savetxt("run_1_step_5-2.csv", temp_arr2, delimiter=",")
if acc > 0.87 and acc < 0.9:
np.savetxt("run_1_step_6-1.csv", temp_arr1, delimiter=",")
# np.savetxt("run_1_step_6-2.csv", temp_arr2, delimiter=",")
if acc > 0.9 and acc < 0.95:
np.savetxt("run_1_step_7-1.csv", temp_arr1, delimiter=",")
# np.savetxt("run_1_step_7-2.csv", temp_arr2, delimiter=",")
if acc > 0.95:
np.savetxt("run_1_step_8-1.csv", temp_arr1, delimiter=",")
# np.savetxt("run_1_step_8-2.csv", temp_arr2, delimiter=",")
if acc > 0.98:
break
step += 1
print "Optimization Finished!"
# Measure accuracy on test set
print "Testing Accuracy:",
test_img = data.test.labels[0:].reshape((5, n_input_x, n_input_y))
test_img = convert_to_2_channel(test_img, 5)
acc = sess.run(accuracy, feed_dict={x: data.test.images[0:].reshape((5, n_input_x, n_input_y)),
y: test_img,
keep_prob: 1.})
print acc
You may need libtcmalloc-minimal4 to be installed.
sudo apt-get install libtcmalloc-minimal4
And export its path:
export LD_PRELOAD="/usr/lib/libtcmalloc_minimal.so.4"

Tensorflow converging but bad predictions

I posted a similar question the other day here, but I have since made edits to bugs that I found, and the problem of bad predictions remains.
I have two networks -- one with 3 conv layers and another with 3 conv layers followed by 3 deconv layers. Both take a 200x200 input image. The output is the same resolution 200x200 but it has two classifications (either a zero of a 1 -- it's a segmentation network), so the network predictions dimensions are 200x200x2 (plus batch_size). Let's talk about the network with deconv layers.
Here's the weird thing... out of 10 training runs, maybe 3 of them will converge. The other 7 will diverge down to an accuracy of 0.0.
The conv and deconv layers are activated by a ReLu. The optimizer does something weird. When I print predictions after every training iteration, the magnitude of the values start large -- which is correct considering they are all passed through ReLu's -- but after each iterations, the values get smaller until they are roughly between 0 and 2. I subsequently pass them through a sigmoid function (sigmoid_cross_entropy_wight_logits) -- thus squashing the large negative values to 0 and the large positive values to 1. When I make predictions, I reactivate the outputs by passing them through the sigmoid function again.
So after the first iteration, prediction values are reasonable...
Accuracy = 0.508033
[[[[ 1. 0.]
[ 0. 1.]
[ 0. 0.]
...,
[ 1. 0.]
[ 1. 1.]
[ 1. 0.]]
[[ 0. 1.]
[ 1. 1.]
[ 0. 0.]
...,
[ 1. 1.]
[ 1. 1.]
[ 0. 1.]]
but then after some iterations, and let's say it actually converges this time, the prediction values look like... (because the optimizer makes the outputs smaller, they are all in that weird middle ground of the sigmoid function)
[[ 0.51028508 0.63202268]
[ 0.24386917 0.52015287]
[ 0.62086064 0.6953823 ]
...,
[ 0.2593964 0.13163178]
[ 0.24617286 0.5210492 ]
[ 0.24692698 0.5876413 ]]]]
Accuracy = 0.999913
do I have the wrong optimizer function?
Here's the entire code... jump to def conv_net to see the network creation... and after that function is the definition of the cost function, optimizer, and accuracy. You'll notice when I measure accuracy and make predictions I reactivate the output with tf.nn.sigmoid(pred) -- this is because the cost function sigmoid_cross_entropy_with_logits combines the activation and the loss in the same function. In other words, pred (the network) outputs a linear value.
import tensorflow as tf
import pdb
import numpy as np
from numpy import genfromtxt
from PIL import Image
# Parameters
learning_rate = 0.001
training_iters = 10000
batch_size = 10
display_step = 1
# Network Parameters
n_input = 200 # MNIST data input (img shape: 28*28)
n_output = 40000
n_classes = 2 # MNIST total classes (0-9 digits)
#n_input = 200
dropout = 0.75 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input, n_input])
y = tf.placeholder(tf.float32, [None, n_input, n_input, n_classes])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
def convert_to_2_channel(x, batch_size):
#assume input has dimension (batch_size,x,y)
#output will have dimension (batch_size,x,y,2)
output = np.empty((batch_size, 200, 200, 2))
temp_arr1 = np.empty((batch_size, 200, 200))
temp_arr2 = np.empty((batch_size, 200, 200))
for i in xrange(batch_size):
for j in xrange(3):
for k in xrange(3):
if x[i][j][k] == 1:
temp_arr1[i][j][k] = 1
temp_arr2[i][j][k] = 0
else:
temp_arr1[i][j][k] = 0
temp_arr2[i][j][k] = 1
for i in xrange(batch_size):
for j in xrange(200):
for k in xrange(200):
for l in xrange(2):
if l == 0:
output[i][j][k][l] = temp_arr1[i][j][k]
else:
output[i][j][k][l] = temp_arr2[i][j][k]
return output
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 200, 200, 1])
# Convolution Layer
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling)
#conv1 = tf.nn.local_response_normalization(conv1)
conv1 = maxpool2d(conv1, k=2)
# Convolution Layer
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
# Max Pooling (down-sampling)
#conv2 = tf.nn.local_response_normalization(conv2)
conv2 = maxpool2d(conv2, k=2)
# Convolution Layer
conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])
# # Max Pooling (down-sampling)
#conv3 = tf.nn.local_response_normalization(conv3)
conv3 = maxpool2d(conv3, k=2)
temp_batch_size = tf.shape(x)[0]
output_shape = [temp_batch_size, 50, 50, 64]
conv4 = tf.nn.conv2d_transpose(conv3, weights['wdc1'], output_shape=output_shape, strides=[1,2,2,1], padding="VALID")
conv4 = tf.nn.bias_add(conv4, biases['bdc1'])
conv4 = tf.nn.relu(conv4)
# conv4 = tf.nn.local_response_normalization(conv4)
# output_shape = tf.pack([temp_batch_size, 100, 100, 32])
output_shape = [temp_batch_size, 100, 100, 32]
conv5 = tf.nn.conv2d_transpose(conv4, weights['wdc2'], output_shape=output_shape, strides=[1,2,2,1], padding="VALID")
conv5 = tf.nn.bias_add(conv5, biases['bdc2'])
conv5 = tf.nn.relu(conv5)
# conv5 = tf.nn.local_response_normalization(conv5)
# output_shape = tf.pack([temp_batch_size, 200, 200, 1])
output_shape = [temp_batch_size, 200, 200, 2]
conv6 = tf.nn.conv2d_transpose(conv5, weights['wdc3'], output_shape=output_shape, strides=[1,2,2,1], padding="VALID")
conv6 = tf.nn.bias_add(conv6, biases['bdc3'])
conv6 = tf.nn.relu(conv6)
# pdb.set_trace()
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv6, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout)
return (tf.add(tf.matmul(fc1, weights['out']), biases['out']))# Store layers weight & bias
weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1' : tf.Variable(tf.random_normal([5, 5, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2' : tf.Variable(tf.random_normal([5, 5, 32, 64])),
# 5x5 conv, 32 inputs, 64 outputs
'wc3' : tf.Variable(tf.random_normal([5, 5, 64, 128])),
'wdc1' : tf.Variable(tf.random_normal([2, 2, 64, 128])),
'wdc2' : tf.Variable(tf.random_normal([2, 2, 32, 64])),
'wdc3' : tf.Variable(tf.random_normal([2, 2, 2, 32])),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd1': tf.Variable(tf.random_normal([80000, 1024])),
# 1024 inputs, 10 outputs (class prediction)
'out': tf.Variable(tf.random_normal([1024, 80000]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bc3': tf.Variable(tf.random_normal([128])),
'bdc1': tf.Variable(tf.random_normal([64])),
'bdc2': tf.Variable(tf.random_normal([32])),
'bdc3': tf.Variable(tf.random_normal([2])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([80000]))
}
# Construct model
pred = conv_net(x, weights, biases, keep_prob)
pred = tf.reshape(pred, [-1,n_input,n_input,n_classes])
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(pred, y))
# cost = (tf.nn.sigmoid_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(0,tf.cast(tf.sub(tf.nn.sigmoid(pred),y), tf.int32))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
summary = tf.train.SummaryWriter('/tmp/logdir/', sess.graph)
step = 1
from tensorflow.contrib.learn.python.learn.datasets.scroll import scroll_data
data = scroll_data.read_data('/home/kendall/Desktop/')
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = data.train.next_batch(batch_size)
# Run optimization op (backprop)
batch_x = batch_x.reshape((batch_size, n_input, n_input))
batch_y = batch_y.reshape((batch_size, n_input, n_input))
batch_y = convert_to_2_channel(batch_y, batch_size) #converts the 200x200 ground truth to a 200x200x2 classification
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
keep_prob: dropout})
#measure prediction
prediction = sess.run(tf.nn.sigmoid(pred), feed_dict={x: batch_x, keep_prob: 1.})
print prediction
if step % display_step == 0:
# Calculate batch loss and accuracdef conv_net(x, weights, biases, dropout):
save_path = "model.ckpt"
saver.save(sess, save_path)
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y,
keep_prob: dropout})
print "Accuracy = " + str(acc)
if acc > 0.73:
break
step += 1
print "Optimization Finished!"
#make prediction
im = Image.open('/home/kendall/Desktop/HA900_frames/frame0035.tif')
batch_x = np.array(im)
# pdb.set_trace()
batch_x = batch_x.reshape((1, n_input, n_input))
batch_x = batch_x.astype(float)
pdb.set_trace()
prediction = sess.run(tf.nn.sigmoid(pred), feed_dict={x: batch_x, keep_prob: dropout})
print prediction
arr1 = np.empty((n_input,n_input))
arr2 = np.empty((n_input,n_input))
for i in xrange(n_input):
for j in xrange(n_input):
for k in xrange(2):
if k == 0:
arr1[i][j] = (prediction[0][i][j][k])
else:
arr2[i][j] = (prediction[0][i][j][k])
# prediction = np.asarray(prediction)
# prediction = np.reshape(prediction, (200,200))
# np.savetxt("prediction.csv", prediction, delimiter=",")
np.savetxt("prediction1.csv", arr1, delimiter=",")
np.savetxt("prediction2.csv", arr2, delimiter=",")
# np.savetxt("prediction2.csv", arr2, delimiter=",")
# Calculate accuracy for 256 mnist test images
print "Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: data.test.images[:256],
y: data.test.labels[:256],
keep_prob: 1.})
The correct_pred variable (the variable that measures accuracy) is a simple subtraction operator between the predictions and the ground truth, and then compared to zero (if the two are equivalent, then the difference should be zero).
Also, I have graphed the network, and it just looks very off to me. Here is a picture, I had to crop for viewing.
EDIT: I found out why my graph looks terrible (thanks Olivier), and I also tried changing my loss function, but to no end -- it still diverges in the same manor
with tf.name_scope("loss") as scope:
# cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(pred, y))
temp_pred = tf.reshape(pred, [-1, 2])
temp_y = tf.reshape(y, [-1, 2])
cost = (tf.nn.softmax_cross_entropy_with_logits(temp_pred, temp_y))
EDIT full code now looks like this (still diverging)
import tensorflow as tf
import pdb
import numpy as np
from numpy import genfromtxt
from PIL import Image
# Parameters
learning_rate = 0.001
training_iters = 10000
batch_size = 10
display_step = 1
# Network Parameters
n_input = 200 # MNIST data input (img shape: 28*28)
n_output = 40000
n_classes = 2 # MNIST total classes (0-9 digits)
#n_input = 200
dropout = 0.75 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input, n_input])
y = tf.placeholder(tf.float32, [None, n_input, n_input, n_classes])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
def convert_to_2_channel(x, batch_size):
#assume input has dimension (batch_size,x,y)
#output will have dimension (batch_size,x,y,2)
output = np.empty((batch_size, 200, 200, 2))
temp_arr1 = np.empty((batch_size, 200, 200))
temp_arr2 = np.empty((batch_size, 200, 200))
for i in xrange(batch_size):
for j in xrange(3):
for k in xrange(3):
if x[i][j][k] == 1:
temp_arr1[i][j][k] = 1
temp_arr2[i][j][k] = 0
else:
temp_arr1[i][j][k] = 0
temp_arr2[i][j][k] = 1
for i in xrange(batch_size):
for j in xrange(200):
for k in xrange(200):
for l in xrange(2):
if l == 0:
output[i][j][k][l] = temp_arr1[i][j][k]
else:
output[i][j][k][l] = temp_arr2[i][j][k]
return output
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 200, 200, 1])
with tf.name_scope("conv1") as scope:
# Convolution Layer
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling)
#conv1 = tf.nn.local_response_normalization(conv1)
conv1 = maxpool2d(conv1, k=2)
# Convolution Layer
with tf.name_scope("conv2") as scope:
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
# Max Pooling (down-sampling)
#conv2 = tf.nn.local_response_normalization(conv2)
conv2 = maxpool2d(conv2, k=2)
# Convolution Layer
with tf.name_scope("conv3") as scope:
conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])
# # Max Pooling (down-sampling)
#conv3 = tf.nn.local_response_normalization(conv3)
conv3 = maxpool2d(conv3, k=2)
temp_batch_size = tf.shape(x)[0]
with tf.name_scope("deconv1") as scope:
output_shape = [temp_batch_size, 50, 50, 64]
conv4 = tf.nn.conv2d_transpose(conv3, weights['wdc1'], output_shape=output_shape, strides=[1,2,2,1], padding="VALID")
conv4 = tf.nn.bias_add(conv4, biases['bdc1'])
conv4 = tf.nn.relu(conv4)
# conv4 = tf.nn.local_response_normalization(conv4)
with tf.name_scope("deconv2") as scope:
# output_shape = tf.pack([temp_batch_size, 100, 100, 32])
output_shape = [temp_batch_size, 100, 100, 32]
conv5 = tf.nn.conv2d_transpose(conv4, weights['wdc2'], output_shape=output_shape, strides=[1,2,2,1], padding="VALID")
conv5 = tf.nn.bias_add(conv5, biases['bdc2'])
conv5 = tf.nn.relu(conv5)
# conv5 = tf.nn.local_response_normalization(conv5)
with tf.name_scope("deconv3") as scope:
# output_shape = tf.pack([temp_batch_size, 200, 200, 1])
output_shape = [temp_batch_size, 200, 200, 2]
conv6 = tf.nn.conv2d_transpose(conv5, weights['wdc3'], output_shape=output_shape, strides=[1,2,2,1], padding="VALID")
conv6 = tf.nn.bias_add(conv6, biases['bdc3'])
# conv6 = tf.nn.relu(conv6)
# pdb.set_trace()
conv6 = tf.nn.dropout(conv6, dropout)
return conv6
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
# fc1 = tf.reshape(conv6, [-1, weights['wd1'].get_shape().as_list()[0]])
# fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
# fc1 = tf.nn.relu(fc1)
# # Apply Dropout
# fc1 = tf.nn.dropout(fc1, dropout)
#
# return (tf.add(tf.matmul(fc1, weights['out']), biases['out']))# Store layers weight & bias
weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1' : tf.Variable(tf.random_normal([5, 5, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2' : tf.Variable(tf.random_normal([5, 5, 32, 64])),
# 5x5 conv, 32 inputs, 64 outputs
'wc3' : tf.Variable(tf.random_normal([5, 5, 64, 128])),
'wdc1' : tf.Variable(tf.random_normal([2, 2, 64, 128])),
'wdc2' : tf.Variable(tf.random_normal([2, 2, 32, 64])),
'wdc3' : tf.Variable(tf.random_normal([2, 2, 2, 32])),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd1': tf.Variable(tf.random_normal([80000, 1024])),
# 1024 inputs, 10 outputs (class prediction)
'out': tf.Variable(tf.random_normal([1024, 80000]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bc3': tf.Variable(tf.random_normal([128])),
'bdc1': tf.Variable(tf.random_normal([64])),
'bdc2': tf.Variable(tf.random_normal([32])),
'bdc3': tf.Variable(tf.random_normal([2])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([80000]))
}
# Construct model
# with tf.name_scope("net") as scope:
pred = conv_net(x, weights, biases, keep_prob)
pred = tf.reshape(pred, [-1,n_input,n_input,n_classes])
# Define loss and optimizer
with tf.name_scope("loss") as scope:
# cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(pred, y))
temp_pred = tf.reshape(pred, [-1, 2])
temp_y = tf.reshape(y, [-1, 2])
cost = (tf.nn.softmax_cross_entropy_with_logits(temp_pred, temp_y))
with tf.name_scope("opt") as scope:
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
with tf.name_scope("acc") as scope:
correct_pred = tf.equal(0,tf.cast(tf.sub(tf.nn.softmax(temp_pred),y), tf.int32))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.initialize_all_variables()
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
summary = tf.train.SummaryWriter('/tmp/logdir/', sess.graph)
step = 1
from tensorflow.contrib.learn.python.learn.datasets.scroll import scroll_data
data = scroll_data.read_data('/home/kendall/Desktop/')
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = data.train.next_batch(batch_size)
# Run optimization op (backprop)
batch_x = batch_x.reshape((batch_size, n_input, n_input))
batch_y = batch_y.reshape((batch_size, n_input, n_input))
batch_y = convert_to_2_channel(batch_y, batch_size) #converts the 200x200 ground truth to a 200x200x2 classification
batch_y = batch_y.reshape(batch_size * n_input * n_input, 2)
sess.run(optimizer, feed_dict={x: batch_x, temp_y: batch_y,
keep_prob: dropout})
#measure prediction
prediction = sess.run(tf.nn.softmax(temp_pred), feed_dict={x: batch_x, keep_prob: dropout})
print prediction
if step % display_step == 0:
# Calculate batch loss and accuracdef conv_net(x, weights, biases, dropout):
save_path = "model.ckpt"
saver.save(sess, save_path)
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y,
keep_prob: dropout})
print "Accuracy = " + str(acc)
if acc > 0.73:
break
step += 1
print "Optimization Finished!"
#make prediction
im = Image.open('/home/kendall/Desktop/HA900_frames/frame0035.tif')
batch_x = np.array(im)
# pdb.set_trace()
batch_x = batch_x.reshape((1, n_input, n_input))
batch_x = batch_x.astype(float)
pdb.set_trace()
prediction = sess.run(tf.nn.sigmoid(pred), feed_dict={x: batch_x, keep_prob: dropout})
print prediction
arr1 = np.empty((n_input,n_input))
arr2 = np.empty((n_input,n_input))
for i in xrange(n_input):
for j in xrange(n_input):
for k in xrange(2):
if k == 0:
arr1[i][j] = (prediction[0][i][j][k])
else:
arr2[i][j] = (prediction[0][i][j][k])
# prediction = np.asarray(prediction)
# prediction = np.reshape(prediction, (200,200))
# np.savetxt("prediction.csv", prediction, delimiter=",")
np.savetxt("prediction1.csv", arr1, delimiter=",")
np.savetxt("prediction2.csv", arr2, delimiter=",")
# np.savetxt("prediction2.csv", arr2, delimiter=",")
# Calculate accuracy for 256 mnist test images
print "Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: data.test.images[:256],
y: data.test.labels[:256],
keep_prob: 1.})
The concept of deconvolution is to output something of the same size as the input.
At the line:
conv6 = tf.nn.bias_add(conv6, biases['bdc3'])
You have this output of shape [batch_size, 200, 200, 2], so you don't need to add your fully connected layers. Just return conv6 (without the final ReLU).
If you use 2 categories in your prediction and the true labels y, you need to use tf.nn.softmax_cross_entropy_with_logits(), not the sigmoid cross entropy.
Make sure that y always has values like: y[i, j] = [0., 1.] or y[i, j] = [1., 0.]
pred = conv_net(x, weights, biases, keep_prob) # NEW prediction conv6
pred = tf.reshape(pred, [-1, n_classes])
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
And if you want your TensorBoard graph to look nice (or at least readable), make sure to use tf.name_scope()
EDIT:
Your accuracy is also wrong. You measure if softmax(pred) and y are equal, but softmax(pred) can never be equal to 0. or 1., so you will have an accuracy of 0..
Here is what you should do:
with tf.name_scope("acc") as scope:
correct_pred = tf.equal(tf.argmax(temp_pred, 1), tf.argmax(temp_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
EDIT 2:
The real error was a typo in convert_to_2_channel, in the loop
for j in xrange(3):
It should be 200 instead of 3.
Lesson: when debugging, print everything step by step with very simple examples and you will find your that the buggy function returns bad output.

Categories

Resources