How do I get the tensorflow model to persist load? - python

Now I want to write 2 functions:
1 for loading model that I already trained,
2nd is using the the model to classify.
But the two function all need same session, so I make the session as a parameter, so as to seed it to the next function. But I received an error.
Here is my code. The first method is for loading the model, the second one is for using the model to predict something, but I have a few problems while init the session
def callmodel():
with tf.Graph().as_default():
#saver = tf.train.Saver()
model_path = 'E:/MyProject/MachineLearning/callTFModel/model/'
ckpt = tf.train.get_checkpoint_state(model_path)
sess = tf.Session()
saver = tf.train.import_meta_graph(ckpt.model_checkpoint_path + '.meta')
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(model_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("load model successful!")
return sess
else:
print("failed to load model!")
def test_one_image(sess,test_dir):
global p, logits
image = Image.open(test_dir)
image = image.resize([32, 32])
image_array = np.array(image)
image = tf.cast(image_array, tf.float32)
image = tf.reshape(image, [1, 32, 32, 3]) # 调整image的形状
p = mmodel(image, 1)
logits = tf.nn.softmax(p)
x = tf.placeholder(tf.float32, shape=[32, 32, 3])
prediction = sess.run(logits, feed_dict={x: image_array})
max_index = np.argmax(prediction)
if max_index == 0:
print('probability of good: %.6f' % prediction[:, 0])
else:
print('probability of Lack of glue: %.6f' % prediction[:, 1])
#######//test
sess=callmodel
path="c:/test/1001.jpg"
test_one_image(sess,path)
it occurs error:
File "E:/MyProject/python/C+pythonModel/test.py", line 175, in <module>
test_one_image(sess,path)
File "E:/MyProject/python/C+pythonModel/test.py", line 164, in test_one_image
prediction = sess.run(logits, feed_dict={x: image_array})
File "D:\study\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 895, in run
run_metadata_ptr)
File "D:\study\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1071, in _run
+ e.args[0])
TypeError: Cannot interpret feed_dict key as Tensor: Tensor Tensor("Placeholder:0", shape=(32, 32, 3), dtype=float32) is not an element of this graph.

The problem is not with using the session as parameter, it's with how you recover the input and output nodes of your graph: when you write
p = mmodel(image, 1)
logits = tf.nn.softmax(p)
x = tf.placeholder(tf.float32, shape=[32, 32, 3])
you are not recovering the corresponding nodes in the session graph, but creating new ones. You should instead use:
x= sess.graph().get_tensor_by_name("your_x_placeholder_name")
logits= sess.graph().get_tensor_by_name("your_logits_placeholder_name")
and then prediction = sess.run(logits, feed_dict={x: image_array})
Additionnally, you probably need to check if you have not made any mistake between image and image_array (right now you're reshaping image, not the array, which is useless if you feed with image_array...)

Related

tf.train.import_meta_graph(): unable to load some variable values

I'm using tensorflow 1.10.0. I've been following the tutorial for saving and loading a simple trained MLP model. Saving of data works perfectly and creates following files:
train.ckpt.data-00000-of-00001
train.ckpt.index
train.ckpt.meta
When I'm trying to load train_opt or accmetric variable using:
import tensorflow as tf
with tf.Session() as sess:
load_mod = tf.train.import_meta_graph('/home/akshay/train.ckpt.meta')
load_mod.restore(sess, tf.train.latest_checkpoint('/home/akshay/'))
print (tf.get_default_graph().get_tensor_by_name('train_opt:0'))
I get following error:
Traceback (most recent call last):
File "recover_tftrain.py", line 6, in <module>
print (tf.get_default_graph().get_tensor_by_name('accmetric:0'))
File "/home/arpita/anaconda2/lib/python2.7/site-
packages/tensorflow/python/framework/ops.py", line 3515, in get_tensor_by_name
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
File "/home/arpita/anaconda2/lib/python2.7/site-
packages/tensorflow/python/framework/ops.py", line 3339, in as_graph_element
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
File "/home/arpita/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 3381, in _as_graph_element_locked
"graph." % (repr(name), repr(op_name)))
KeyError: "The name 'accmetric:0' refers to a Tensor which does not exist.
The operation, 'accmetric', does not exist in the graph."
However, the loss variable loads perfectly:
Tensor("loss:0", shape=(), dtype=float32)
Are there only some specific variables that can be loaded? Or is there any issue of scope?
Complete code:
from create_batches import Batch
import extractData
import tensorflow as tf
# prepare input data and output labels for neural network
datafile = '/home/akshay/Desktop/datafile.csv'
labelfile = '/home/akshay/Desktop/labelfile.csv'
num_input = 2000
num_hidden1 = 200
num_hidden2 = 200
num_hidden3 = 200
num_output = 25
batch_size = 200
epochs = 25
batch = Batch(extractData.create_data(datafile), extractData.create_labels(labelfile), batch_size)
# create tensorflow networks
vowel_inp = tf.placeholder(dtype = tf.float32, shape = [None, 40000], name = "text_inp")
label_oup = tf.placeholder(dtype = tf.int32, shape = [None], name = "label_oup")
vowel_flat = tf.contrib.layers.flatten(vowel_inp)
# fully connected layers
hidden_1 = tf.layers.dense(inputs = vowel_flat, units = num_hidden1, name = "hidden1", activation = tf.nn.sigmoid)
hidden_2 = tf.layers.dense(inputs = hidden_1, units = num_hidden2, name = "hidden2", activation = tf.nn.sigmoid)
hidden_3 = tf.layers.dense(inputs = hidden_2, units = num_hidden3, name = "hidden3", activation = tf.nn.sigmoid)
train_oup = tf.layers.dense(inputs = hidden_3, units = num_output, name = "output")
# define a cost function
xentropy = tf.losses.sparse_softmax_cross_entropy(labels = label_oup, logits = train_oup)
# define a loss function
loss = tf.reduce_mean(xentropy, name = "loss")
# define an optimizer
train_opt = tf.train.AdagradOptimizer(learning_rate = 0.001).minimize(loss, name="train_opt")
# define accuracy metric
acc, acc_metric_update = tf.metrics.accuracy(label_oup, tf.argmax(train_oup, 1), name="accmetric")
loss_val, acc_val = 0, 0
sess = tf.Session()
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
for j in range(epochs):
batch.reset()
for i in range(int(2000/batch_size)):
x, y = batch.getBatch()
y = y.reshape(batch_size)
feed_dict = {vowel_inp: x, label_oup: y}
loss_val, _, acc_val = sess.run([loss, train_opt, acc_metric_update], feed_dict=feed_dict)
if j%25==0:
print ('Epoch:', j, 'Accuracy Val:', acc_val)
print ("Final score:",sess.run(acc))
#save the model
print ('Model saved in: ', saver.save(sess, '/home/akshay/train.ckpt'))
sess.close()

Error "no Variable to save" in tensorflow while storing session

I was trYing to save session in model so that i can use it later on but i am getting an error everytime. My code is like:
with tf.Session() as sess:
sess.run(init)
for j in range(3):
for i in range(xtest.shape[0]):
_, indices = sess.run(pred, feed_dict={x_train: xtrain, x_test: xtest[i,:]})
pred_label = getMajorityPredictions(ytrain, indices)
actual_val = get_char( int( (ytest[i]).argmax() ) )
# print("test: ", i, "prediction: ", get_char(pred_label), " actual: ", actual_val)
# print(pred_label, actual_val, type(pred_label), type(actual_val), sep=" --> ")
if get_char(pred_label) == actual_val:
accuracy += 1/len(xtest)
# print((i / (xtest.shape[0])) * 100)
# os.system("cls")
print("accuracy: ",accuracy)
savedPath = saver.save(sess, "/tmp/model.ckpt")
print("Model saved at: " ,savedPath)
and the error is like:
Traceback (most recent call last):
File "prac3.py", line 74, in <module>
saver = tf.train.Saver()
File "C:\Python36\lib\site-packages\tensorflow\python\training\saver.py", line 1239, in __init__
self.build()
File "C:\Python36\lib\site-packages\tensorflow\python\training\saver.py", line 1248, in build
self._build(self._filename, build_save=True, build_restore=True)
File "C:\Python36\lib\site-packages\tensorflow\python\training\saver.py", line 1272, in _build
raise ValueError("No variables to save")
ValueError: No variables to save
The code you provided does not give much information on the error. You might need to check your previous code to see if you actually have an variables to be saved. You can check tf.global_variables() and see if the list is empty.
In addition, you might want to put an indent before the savedPath = saver.save(sess, "/tmp/model.ckpt") as you used with tf.Session as sess, so the session is actually closed when you are outside that block, then you'll face the problem of 'Attempting to use closed session'.
x_train = tf.placeholder(tf.float32, shape=[None, 4096])
y_train = tf.placeholder(tf.float32, shape=[None, 62])
x_test = tf.placeholder(tf.float32, shape=[4096])
y_test = tf.placeholder(tf.float32, shape=[None, 62])
l1_distance = tf.abs(tf.subtract(x_train, x_test))
dis_l1 = tf.reduce_sum(l1_distance, axis=1)
pred = tf.nn.top_k(tf.negative(dis_l1), k=5)
xtrain, ytrain = TRAIN_SIZE(2852)
xtest, ytest = TEST_SIZE(557)
init = tf.global_variables_initializer()
accuracy = 0
saver = tf.train.Saver()
# --------------------- to create model
with tf.Session() as sess:
sess.run(init)
for j in range(3):
for i in range(xtest.shape[0]):
_, indices = sess.run(pred, feed_dict={x_train: xtrain, x_test: xtest[i,:]})
pred_label = getMajorityPredictions(ytrain, indices)
actual_val = get_char( int( (ytest[i]).argmax() ) )
# print("test: ", i, "prediction: ", get_char(pred_label), " actual: ", actual_val)
# print(pred_label, actual_val, type(pred_label), type(actual_val), sep=" --> ")
if get_char(pred_label) == actual_val:
accuracy += 1/len(xtest)
# print((i / (xtest.shape[0])) * 100)
# os.system("cls")
print("accuracy: ",accuracy)
savedPath = saver.save(sess, "/tmp/model.ckpt")
print("Model saved at: " ,savedPath)

With Tensorflow, 2 class classification using Neural Network

I'm trying 2 class classification of images by neural network using Tensorflow.
I want to extract 1000 pixels randomly.
However, I am in trouble with error:
"logits = inference(images_placeholder, keep_prob)
File "train5.py", line 83, in inference
list = random.sample(x_image(IMAGE_PIXELS),SAMPLE_PIXELS)
TypeError: 'Tensor' object is not callable"
Please tell me what should I do.
I will attach a code below.
import sys
sys.path.append('/usr/local/opt/opencv3/lib/python3.5.4/site-packages')
import cv2
import numpy as np
import tensorflow as tf
import tensorflow.python.platform
import tensorboard as tb
import os
import math
import time
import random
start_time = time.time()
# TensorBoard output information directory
log_dir = '/tmp/data1' #tensorboard --logdir=/tmp/data1
#directory delete and reconstruction
if tf.gfile.Exists(log_dir):
tf.gfile.DeleteRecursively(log_dir)
tf.gfile.MakeDirs(log_dir)
# Reserve memory
config = tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True))
sess = sess = tf.Session(config=config)
NUM_CLASSES = 2
IMAGE_SIZE_x = 1024
IMAGE_SIZE_y = 768
IMAGE_CHANNELS = 1
IMAGE_PIXELS = IMAGE_SIZE_x*IMAGE_SIZE_y*IMAGE_CHANNELS
SAMPLE_PIXELS = 1000
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('train', 'train.txt', 'File name of train data')
flags.DEFINE_string('test', 'test.txt', 'File name of train data')
flags.DEFINE_string('image_dir', 'trdata', 'Directory of images')
flags.DEFINE_string('train_dir', '/tmp/data', 'Directory to put the training data.')
flags.DEFINE_integer('max_steps', 20000, 'Number of steps to run trainer.')
flags.DEFINE_integer('batch_size', 10, 'Batch size'
'Must divide evenly into the dataset sizes.')
flags.DEFINE_float('learning_rate', 1e-5, 'Initial learning rate.')
def inference(images_placeholder, keep_prob):
""" Function to create predictive model
argument:
images_placeholder: image placeholder
keep_prob: dropout rate placeholder
Return:
y_conv:
"""
# Initialie with normal distribution with weight of 0.1
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# Initialized with normal distribution with bias of 0.1
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# Reshape input
x_image = images_placeholder
# ramdom sumpling pixels
list = random.sample(x_image(IMAGE_PIXELS),SAMPLE_PIXELS)
x_list = [samples[i] for i in list]
# Input
with tf.name_scope('fc1') as scope:
W_fc1 = weight_variable([x_list,10])
b_fc1 = bias_variable([10])
h_fc1 = tf.nn.relu(tf.matmul(x_image,W_fc1) + b_fc1)
# Affine1
with tf.name_scope('fc2') as scope:
W_fc2 = weight_variable([10,10])
b_fc2 = bias_variable([10])
h_fc2 = tf.nn.relu(tf.matmul(h_fc1,W_fc2) + b_fc2)
# Affine2
with tf.name_scope('fc3') as scope:
W_fc3 = weight_variable([10,10])
b_fc3 = bias_variable([10])
h_fc3 = tf.nn.relu(tf.matmul(h_fc2,W_fc3) + b_fc3)
# Affine3
with tf.name_scope('fc4') as scope:
W_fc4 = weight_variable([10,10])
b_fc4 = bias_variable([10])
h_fc4 = tf.nn.relu(tf.matmul(h_fc3,W_fc4) + b_fc4)
# Affine4
with tf.name_scope('fc5') as scope:
W_fc5 = weight_variable([10,2])
b_fc5 = bias_variable([2])
# softmax regression
with tf.name_scope('softmax') as scope:
y_out=tf.nn.softmax(tf.matmul(h_fc4, W_fc5) + b_fc5)
# return
return y_out
def loss(logits, labels):
""" loss function
引数:
logits: logit tensor, float - [batch_size, NUM_CLASSES]
labels: labrl tensor, int32 - [batch_size, NUM_CLASSES]
返り値:
cross_entropy:tensor, float
"""
# cross entropy
cross_entropy = -tf.reduce_sum(labels*tf.log(tf.clip_by_value(logits,1e-10,1.0)))
# TensorBoard
tf.summary.scalar("cross_entropy", cross_entropy)
return cross_entropy
def training(loss, learning_rate):
#Adam
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
return train_step
def accuracy(logits, labels):
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
tf.summary.scalar("accuracy", accuracy)
return accuracy
if __name__ == '__main__':
f = open(FLAGS.train, 'r')
# array data
train_image = []
train_label = []
for line in f:
# Separate space and remove newlines
line = line.rstrip()
l = line.split()
# Load data and resize
img = cv2.imread(FLAGS.image_dir + '/' + l[0])
img = cv2.resize(img, (IMAGE_SIZE_x, IMAGE_SIZE_y))
#transrate grayscale
img_gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# transrate one row and 0-1 float
train_image.append(img_gry.flatten().astype(np.float32)/255.0)
# Prepare with label 1-of-k method
tmp = np.zeros(NUM_CLASSES)
tmp[int(l[1])] = 1
train_label.append(tmp)
# transrate numpy
train_image = np.asarray(train_image)
train_label = np.asarray(train_label)
f.close()
f = open(FLAGS.test, 'r')
test_image = []
test_label = []
for line in f:
line = line.rstrip()
l = line.split()
img = cv2.imread(FLAGS.image_dir + '/' + l[0])
img = cv2.resize(img, (IMAGE_SIZE_x, IMAGE_SIZE_y))
#transrate grayscale
img_gry = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# transrate one row and 0-1 float
test_image.append(img_gry.flatten().astype(np.float32)/255.0)
tmp = np.zeros(NUM_CLASSES)
tmp[int(l[1])] = 1
test_label.append(tmp)
test_image = np.asarray(test_image)
test_label = np.asarray(test_label)
f.close()
with tf.Graph().as_default():
# Put the image Tensor
images_placeholder = tf.placeholder("float", shape=(None, IMAGE_PIXELS))
# Put the label Tensor
labels_placeholder = tf.placeholder("float", shape=(None, NUM_CLASSES))
# Put dropout rate Tensor
keep_prob = tf.placeholder("float")
# Load inference() and make model
logits = inference(images_placeholder, keep_prob)
# Load loss() and calculate loss
loss_value = loss(logits, labels_placeholder)
# Load training() and train
train_op = training(loss_value, FLAGS.learning_rate)
# calculate accuracy
acc = accuracy(logits, labels_placeholder)
# save
saver = tf.train.Saver()
# Make Session
sess = tf.Session()
# Initialize variable
sess.run(tf.global_variables_initializer())
# TensorBoard
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
# Start training
for step in range(FLAGS.max_steps):
for i in range(int(len(train_image)/FLAGS.batch_size)):
batch = FLAGS.batch_size*i
sess.run(train_op, feed_dict={
images_placeholder: train_image[batch:batch+FLAGS.batch_size],
labels_placeholder: train_label[batch:batch+FLAGS.batch_size],
keep_prob: 0.5})
# Accuracy calculation for every steps
train_accuracy = sess.run(acc, feed_dict={
images_placeholder: train_image,
labels_placeholder: train_label,
keep_prob: 1.0})
print("step %d, training accuracy %g" %(step, train_accuracy))
# Added value to be displayed in Tensorflow every 1step
summary_str = sess.run(summary_op, feed_dict={
images_placeholder: train_image,
labels_placeholder: train_label,
keep_prob: 1.0})
summary_writer.add_summary(summary_str, step)
# Display accuracy on test data after training
print(" test accuracy %g"%sess.run(acc, feed_dict={
images_placeholder: test_image,
labels_placeholder: test_label,
keep_prob: 1.0}))
duration = time.time() - start_time
print('%.3f sec' %duration)
# Save model
save_path = saver.save(sess, os.getcwd() + "\\model.ckpt")
The error is this:
images_placeholder = tf.placeholder("float", shape=(None, IMAGE_PIXELS))
...
x_image = images_placeholder
list = random.sample(x_image(IMAGE_PIXELS),SAMPLE_PIXELS)
x_image, just like images_placeholder is a variable node, so x_image(...) doesn't make sense and obviously leads to an error "TypeError: 'Tensor' object is not callable".
I assume you're trying to sample SAMPLE_PIXELS from each image in a batch. Note that random.sample won't work here, because x_image is a symbolic variable, its value is only known during the session. You have to use tf.boolean_mask with a random mask in order to select random pixels from the image.

add new output for pre-trained model

I am confused to add new class for a pre-trained model, what i have done until now restore the pre-trained checkpoint and create a matrix of size m * C+1 and a vector of length C+1, then initialize the first C rows/elements of these from the existing weights and freeze the previous layer by training just the FC layer in the Optimizer.minimize(). However when i run the code, i got this error :
Traceback (most recent call last):
File "/home/tensorflow/tensorflow/models/image/mnist/new_dataset/Nets.py", line 482, in <module>
new_op_w = optimizer_new.minimize(loss, var_list = resize_var_w)
File "/home/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/training/optimizer.py", line 279, in minimize
grad_loss=grad_loss)
File "/home/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/training/optimizer.py", line 337, in compute_gradients
processors = [_get_processor(v) for v in var_list]
File "/home/tensorflow/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 502, in __iter__
raise TypeError("'Tensor' object is not iterable.")
TypeError: 'Tensor' object is not iterable.
and that's the code :
with tf.Session(graph=graph) as sess:
if os.path.isfile(ckpt):
aver.restore(sess, 'path_to_checkpoint.ckpt')
w_b_new = {
'weight_4': tf.Variable(tf.random_normal([num_hidden, 1], stddev=0.1), name = 'weight_4'),
'bias_4' : tf.Variable(tf.constant(1.0, shape=[1]), name = 'bias_4'),}
change_1 = tf.unstack(w_b_not['weight_4'])
change_2 = tf.unstack(w_b_not['bias_4'])
change_3 = tf.unstack(w_b_new['weight_4'])
change_4 = tf.unstack(w_b_new['bias_4'])
changestep1 = []
for i in range(len(change_1)):
changestep1.append(tf.unstack(change_1[i]))
changestep3 = []
for i in range(len(change_3)):
changestep3.append(tf.unstack(change_3[i]))
for j in range(len(changestep3[i])):
changestep1[i].append(changestep3[i][j])
changestep1[i] = tf.stack(changestep1[i])
final1 = tf.stack(changestep1)
resize_var_w = tf.assign(w_b_not['weight_4'], final1, validate_shape=False)
final2 = tf.concat([w_b_not['bias_4'] , w_b_new['bias_4']], axis=0)
resize_var = tf.assign(w_b_not['bias_4'], final2, validate_shape=False)
optimizer_new = tf.train.GradientDescentOptimizer(0.01)
new_op_w = optimizer_new.minimize(loss, var_list = resize_var_w)
new_op_b = optimizer_new.minimize(loss, var_list = resize_var)
for step in range(num_steps,num_steps + num_train_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels , keep_prob:0.5}
_,_, l, predictions = sess.run([new_op_w,new_op_b, loss, train_prediction ], feed_dict=feed_dict)
if (step % 50 == 0):
print('%d\t%f\t%.1f%%\t%.1f%%' % (step, l, accuracy(predictions, batch_labels), accuracy(valid_prediction.eval(), valid_labels)))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval() , test_labels))
save_path_w_b = saver.save(sess, "path_checkpoint.ckpt")
print("Model saved in file: %s" % save_path_w_b)
According to the TensorFlow docs on GradientDescentOptimizer's minimize method, "var_list" must be a list of Variable objects. According to your code, resize_var_w is a single tensor.
EDIT
To be more specific:
If you give the optimizer var_list, as the name implies, this must be a list of variables. During backprop the optimizer will loop var_list and only update the variables in the list, as opposed to all trainable variables in the graph. A single variable is not iterable.
If you only want to update a single Tensor, you can simply try:
resize_var_w = [tf.assign(w_b_not['weight_4'], final1, validate_shape=False)]
I did not test, but should work.

Tensorflow and cifar 10, testing single images

I was trying to predict class for single images with the cifar-10 from tensorflow.
I found this code, but it failed with this error :
Assign requires shapes of both tensors to match. lhs shape= [18,384] rhs shape= [2304,384]
I understand this is because of the size of the batch who is only 1. (With expand_dims I create a fake batch.)
But I don't know how to fix this ?
I searched everywhere but no solutions..
Thanks in advance!
from PIL import Image
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10
width = 24
height = 24
categories = ["airplane","automobile","bird","cat","deer","dog","frog","horse","ship","truck" ]
filename = "path/to/jpg" # absolute path to input image
im = Image.open(filename)
im.save(filename, format='JPEG', subsampling=0, quality=100)
input_img = tf.image.decode_jpeg(tf.read_file(filename), channels=3)
tf_cast = tf.cast(input_img, tf.float32)
float_image = tf.image.resize_image_with_crop_or_pad(tf_cast, height, width)
images = tf.expand_dims(float_image, 0)
logits = cifar10.inference(images)
_, top_k_pred = tf.nn.top_k(logits, k=5)
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state('/tmp/cifar10_train')
if ckpt and ckpt.model_checkpoint_path:
print("ckpt.model_checkpoint_path ", ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
exit(0)
sess.run(init_op)
_, top_indices = sess.run([_, top_k_pred])
for key, value in enumerate(top_indices[0]):
print (categories[value] + ", " + str(_[0][key]))
EDIT
I tried to put a placeholder, with None in first shape, but I got this error :
Shape of a new variable (local3/weights) must be fully defined, but instead was (?, 384).
Now I'm really lost..
Here is the new code :
from PIL import Image
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10
import itertools
width = 24
height = 24
categories = [ "airplane","automobile","bird","cat","deer","dog","frog","horse","ship","truck" ]
filename = "toto.jpg" # absolute path to input image
im = Image.open(filename)
im.save(filename, format='JPEG', subsampling=0, quality=100)
x = tf.placeholder(tf.float32, [None, 24, 24, 3])
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
# Restore variables from training checkpoint.
input_img = tf.image.decode_jpeg(tf.read_file(filename), channels=3)
tf_cast = tf.cast(input_img, tf.float32)
float_image = tf.image.resize_image_with_crop_or_pad(tf_cast, height, width)
images = tf.expand_dims(float_image, 0)
i = images.eval()
print (i)
sess.run(init_op, feed_dict={x: i})
logits = cifar10.inference(x)
_, top_k_pred = tf.nn.top_k(logits, k=5)
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
ckpt = tf.train.get_checkpoint_state('/tmp/cifar10_train')
if ckpt and ckpt.model_checkpoint_path:
print("ckpt.model_checkpoint_path ", ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
exit(0)
_, top_indices = sess.run([_, top_k_pred])
for key, value in enumerate(top_indices[0]):
print (categories[value] + ", " + str(_[0][key]))
I think this is because the variable which get by tf.Variable or tf.get_variable must have the full defined shape. You can check your code and give the full defined shape.

Categories

Resources