How to run asynchronous predictions with TensorFlow Estimator API? - python

I am using the tf.estimator API to predict punctuation. I trained it with pre-processed data using TFRecords and tf.train.shuffle_batch. Now I want to make predictions. I can do this fine feeding static NumPy data into tf.constant and returning this from the input_fn.
However I am working with sequence data and I need to feed one example at a time and the next input is dependent on the previous output. I also want to be able to process data input through HTTP requests.
Every time estimator.predict is called it re-loads the checkpoint and recreates the entire graph. This is slow and expensive. So I need to be able to dynamically feed data to the input_fn.
My current attempt is roughly this:
feature_input = tf.placeholder(tf.int32, shape=[1, MAX_SUBSEQUENCE_LEN])
q = tf.FIFOQueue(1, tf.int32, shapes=[[1, MAX_SUBSEQUENCE_LEN]])
enqueue_op = q.enqueue(feature_input)
def input_fn():
return q.dequeue()
estimator = tf.estimator.Estimator(model_fn, model_dir=model_file)
predictor = estimator.predict(input_fn=input_fn)
sess = tf.Session()
output = None
while True:
x = get_numpy_data(x, output)
if x is None:
break
sess.run(enqueue_op, {feature_input: x})
output = predictor.next()
save_to_file(output)
sess.close()
However I am getting the following error:
ValueError: Input graph and Layer graph are not the same: Tensor("EmbedSequence/embedding_lookup:0", shape=(1, 200, 128), dtype=float32) is not from the passed-in graph.
How can I asynchronously plug data into my existing graph through an input_fn to get predictions one at a time?

It turns out the main problem is that all tensors need to be created inside the input_fn or they don't get added to the same graph. I needed to run an enqueue operation but it was impossible to access anything returned from the input function.
I ended up inheriting the Estimator class and creating a custom predict function which allows me to dynamically add data to the prediction queue and return the results:
# async_estimator.py
import six
import tensorflow as tf
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.estimator import _check_hooks_type
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.training import saver
from tensorflow.python.training import training
class AsyncEstimator(Estimator):
def async_predictor(self,
dtype,
shape=None,
predict_keys=None,
hooks=None,
checkpoint_path=None):
"""Returns a tuple of functions: first runs predicitons on the model, second cleans up
Args:
dtype: the dtype of the input
shape: the shape of the input placeholder (optional)
predict_keys: list of `str`, name of the keys to predict. It is used if
the `EstimatorSpec.predictions` is a `dict`. If `predict_keys` is used
then rest of the predictions will be filtered from the dictionary. If
`None`, returns all.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the prediction call.
checkpoint_path: Path of a specific checkpoint to predict. If `None`, the
latest checkpoint in `model_dir` is used.
Returns:
(predict, finish): tuple of functions
predict: runs a single prediction and returns the results
Args:
x: NumPy array of input
Returns:
Evaluated value of the prediction
finish: closes the session, allowing the program to exit
Raises:
ValueError: Could not find a trained model in model_dir.
ValueError: if batch length of predictions are not same.
ValueError: If there is a conflict between `predict_keys` and
`predictions`. For example if `predict_keys` is not `None` but
`EstimatorSpec.predictions` is not a `dict`.
"""
hooks = _check_hooks_type(hooks)
# Check that model has been trained.
if not checkpoint_path:
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise ValueError('Could not find trained model in model_dir: {}.'.format(
self._model_dir))
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
training.create_global_step(g)
input_placeholder = tf.placeholder(dtype=dtype, shape=shape)
queue = tf.FIFOQueue(1, dtype, shapes=shape)
enqueue_op = queue.enqueue(input_placeholder)
features = queue.dequeue()
estimator_spec = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.PREDICT)
predictions = self._extract_keys(estimator_spec.predictions, predict_keys)
mon_sess = training.MonitoredSession(
session_creator=training.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=estimator_spec.scaffold,
config=self._session_config),
hooks=hooks)
def predict(x):
if mon_sess.should_stop():
raise StopIteration
mon_sess.run(enqueue_op, {input_placeholder: x})
preds_evaluated = mon_sess.run(predictions)
if not isinstance(predictions, dict):
return preds_evaluated
else:
preds = []
for i in range(self._extract_batch_length(preds_evaluated)):
preds.append({
key: value[i]
for key, value in six.iteritems(preds_evaluated)
})
return preds
def finish():
mon_sess.close()
return predict, finish
And here is the rough code to use it:
import tensorflow as tf
from async_estimator import AsyncEstimator
def doPrediction(model_fn, model_dir, max_seq_length):
estimator = AsyncEstimator(model_fn, model_dir=model_dir)
predict, finish = estimator.async_predictor(dtype=tf.int32, shape=(1, max_seq_length))
output = None
while True:
# my input is dependent on the previous output
x = get_numpy_data(output)
if x is None:
break
output = predict(x)
save_to_disk(output)
finish()
Note: this is a simple solution which works for my needs, it may need to be modified for other cases. It is working on TensorFlow 1.2.1.
Hopefully TF will officially adopt something like this to make serving dynamic predictions with Estimator easier.

Related

How can I add sample weights to my Keras generator when I don't have a "y" output?

I'm trying to train a pre-trained model in Python 3.8 with Keras 2.3.1 and Tensorflow 2.2.3. Since my dataset is very large, I have to use a data generator. I want to assign sample weights to each sample, to make certain samples more important than others. I've already defined the weights that I want to assign to each sample, but I'm looking for a way to implement them in my training. Here's my custom data generator with the code that I've tried so far:
def __iter__(self):
batch_token_ids, batch_segment_ids = [], []
sample_weights = np.ones(batch_size)
for file in dataset:
for sample in file:
# Read the input, output, and sample weight from the dataset.
inputs, outputs, weight = get_data(sample)
# Encode the input and output using a tokenizer.
token_ids, segment_ids = tokenizer.encode(inputs, outputs, maxlen=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
# Add the weight to a np array (sample_weights)
sample_weights[len(batch_token_ids)-1] = weight
# Yield after getting batch_size samples.
if len(batch_token_ids) == self.batch_size:
"""
The input format that my model supports is the token and segment ids in x,
with None as the value for y. Changing it raises an error.
"""
yield [batch_token_ids, batch_segment_ids], None, sample_weights
batch_token_ids, batch_segment_ids = [], []
sample_weights = np.ones(batch_size)
According to the Keras documentation, I'm supposed to input the sample weights as the third value that the generator returns. However, I noticed that nothing changed in the training after implementing this. After doing some debugging into what happens to the sample weights after yielding, I noticed that the weights never get used when I try this, because of this code here in training.py of the Keras engine: (line 655)
if y is not None:
# Long code to process the inputs and sample weights.
# Since I don't have a y input, this doesn't get run.
else:
y = []
sample_weights = []
Is there a way to implement sample weights for my code without changing the input format? Also, if I'm using a custom loss function, would I need to change that as well for the sample weights to take effect?

Fitting an RNN estimator in tensor flow

I'm trying to train a TF estimator using the RNNEstimator() class, but I'm having trouble with defining the estimator. My goal is the following:
Create a tf.data.Dataset.
Feed it into the RNN estimator.
The first part seems to be working correctly. I define the
def _parse_func(record):
# takes tf record as input and returns the following tensors
# numeric_tensor.shape = (5,170) and y.shape=()
return {'numerical': numeric_tensor,}, y
def input_fn(filenames=['data.tfrecord']):
# Returns parsed tf record i.e. the tf.data.Dataset
dataset = tf.data.TFRecordDataset(filenames=filenames)
dataset = dataset.map(map_func=_parse_func)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size=BATCH_SIZE)
return dataset
Now let's move onto the meaty part.
Estimators take care of creating the session and graph. So I simply create the estimator in the following format:
# create the column
column = tf.contrib.feature_column.sequence_numeric_column('numerical')
# create the estimator
estimator = RNNEstimator(
head=tf.contrib.estimator.regression_head(),
sequence_feature_columns=[column],
num_units=[32, 16], cell_type='lstm')
# train the estimator
estimator.train(input_fn=input_fn, steps=100)
However, this doesn't work. It gives me a variety of errors! In particularly, at the moment I get:
TypeError: Input must be a SparseTensor.
Additionally, I seem to be unable to change the loss to log-loss. I tried setting it by passing it to the head parameter using:
head = tf.contrib.estimator.regression_head(loss_fn=tf.losses.log_loss)

Tensorflow feed multiple parameters through input_fn pipeline

I am writing a high-level tensorflow application exactly the same way this minst estimator is build except that I am building a simple RNN that predicts sequences. I am new to tensorflow so I am trying to get head around an issue that might be actually simple for people who have worked in tensorflow high level api before.
Here is a snippet of my code to give an idea:
def main(argv=None):
"""Run the training experiment."""
....
# Setup the Estimator
model_estimator = build_estimator(config, params)
# Setup and start training and validation
train_spec = tf.estimator.TrainSpec(
input_fn=lambda: get_train_inputs(128),
max_steps=2000)
...
tf.estimator.train_and_evaluate(model_estimator, train_spec, eval_spec)
def build_estimator(config, params):
return tf.estimator.Estimator(
model_fn=model_fn,
config=config,
params=params,
)
def model_fn(features, mode, params):
#Input data
_inputs = tf.placeholder(tf.int32, shape=[batch_size, times_steps])
_labels = tf.placeholder(tf.float32, shape=[batch_size, num_classes])
# Sequence lengths for dynamic allocation
_seqlens = tf.placeholder(tf.int32, shape=[batch_size])
...
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops
)
Here is my input pipleine function:
# Get train inputs function
def get_train_inputs(batch_size):
def train_inputs(batch_size):
# Build dataset iterator
x_batch, y_batch, seqlen_batch = sequence_generator.get_sentence_batch(
batch_size, sequence_generator.train_x, sequence_generator.train_y, sequence_generator.train_seqlens)
features={'_inputs': x_batch, '_labels': y_batch, '_seqlens': seqlen_batch}
return features
return train_inputs(batch_size)
Due to the size of my code, I have only pasted relevant pieces of code here.
The problem here is that during:
train_spec = tf.estimator.TrainSpec(
input_fn=lambda: get_train_inputs(128),
max_steps=2000)
get_train_inputs(128) feeds the features dictionary into _inputs placeholder of the model_fn so the _labels and _seqlens remain blank and throw out error during execution that no values specified for these place holders. The model_fn only accepts two feature parameters : features and labels. How do I feed all the three parameters _inputs, _labels and _seqlens into the model?
Any suggestions will be highly appreciated.
NOTE: The reason for inputing a third parameter _seqlens is because I am using tf.nn.dynamic_rnn in my model_fn which requires sequence lengths where as labels are being used in tf.nn.softmax_cross_entropy_with_logits in my softmax function.
You're not supposed to use placeholders at all with tf.Estimator. You should look into the tf.data API (see here). Your input function should return the get_next op of a one shot iterator. Apologies if you are already doing this, but it is not clear from your code what exactly your input function is returning.
Assuming you set this up to return a dict as in your example, you will then be able to simply use _inputs = features["_inputs"] etc. in your model function.
In addition to #xdurch0 answer, use FeatureColumns
tf.feature_column to describe the features of the dataset that are passed as inputs into the Estimator model_fn for training and evaluation.
Within the model_fn, use the method
tf.feature_column.input_layer()` to return a dense Tensor as an
input layer based on a specified FeatureColumn.
You can see examples of working with FeatureColumns here.

TF: how to create a dataset from user input data

I've started recently to play with tensorflow and, more specifically, with the new dataset API.
I've successfully used a dataset to feed training data to my simple model by plugging dataset's iterators to the nodes of my graph representing input and label. Something like:
input = input_dataset.make_one_shot_iterator().get_next()
label = label_dataset.make_one_shot_iterator().get_next()
Now I'm wondering what to do when I have to do inference on a user input, that is, the user gives me one single input value and I have to make my prediction. If I had a placeholder I would just put the user input in a feed_dict, but with the dataset api I have very little idea how to do something similar. Shall I have a separate graph only for inference in which my input variable is a placeholder?
I've tried already to make a feedable iterator as described here but that only works with a placeholder for strings, while my input are int32.
Thanks for any advice.
For that specific purpose, tensorflow provides tf.placeholder_with_default API
# Create a Dataset
dataset = tf.data.Dataset.zip((input_dataset, label_dataset)).batch(32).repeat(...)
# Create Iterator
input, label = dataset.make_one_shot_iterator()
# Create Placholders
x = tf.placeholder_with_default(input, shape=[...], name='input')
y = tf.placeholder_with_default(label, shape-[...], name='label')
def nn_model(features, labels):
logits = ...
loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=logits))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
return optimizer, loss
# Create Model
train_op, loss_op = nn_model(x, y)
# Training
sess.run(train_op)
# Inference
sess.run(logits, feed_dict={x:..., y:...})

Tensorflow: restoring a graph and model then running evaluation on a single image

I think it would be immensely helpful to the Tensorflow community if there was a well-documented solution to the crucial task of testing a single new image against the model created by the convnet in the CIFAR-10 tutorial.
I may be wrong, but this critical step that makes the trained model usable in practice seems to be lacking. There is a "missing link" in that tutorial—a script that would directly load a single image (as array or binary), compare it against the trained model, and return a classification.
Prior answers give partial solutions that explain the overall approach, but none of which I've been able to implement successfully. Other bits and pieces can be found here and there, but unfortunately haven't added up to a working solution. Kindly consider the research I've done, before tagging this as duplicate or already answered.
Tensorflow: how to save/restore a model?
Restoring TensorFlow model
Unable to restore models in tensorflow v0.8
https://gist.github.com/nikitakit/6ef3b72be67b86cb7868
The most popular answer is the first, in which #RyanSepassi and #YaroslavBulatov describe the problem and an approach: one needs to "manually construct a graph with identical node names, and use Saver to load the weights into it". Although both answers are helpful, it is not apparent how one would go about plugging this into the CIFAR-10 project.
A fully functional solution would be highly desirable so we could port it to other single image classification problems. There are several questions on SO in this regard that ask for this, but still no full answer (for example Load checkpoint and evaluate single image with tensorflow DNN).
I hope we can converge on a working script that everyone could use.
The below script is not yet functional, and I'd be happy to hear from you on how this can be improved to provide a solution for single-image classification using the CIFAR-10 TF tutorial trained model.
Assume all variables, file names etc. are untouched from the original tutorial.
New file: cifar10_eval_single.py
import cv2
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', './input/eval',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('checkpoint_dir', './input/train',
"""Directory where to read model checkpoints.""")
def get_single_img():
file_path = './input/data/single/test_image.tif'
pixels = cv2.imread(file_path, 0)
return pixels
def eval_single_img():
# below code adapted from #RyanSepassi, however not functional
# among other errors, saver throws an error that there are no
# variables to save
with tf.Graph().as_default():
# Get image.
image = get_single_img()
# Build a Graph.
# TODO
# Create dummy variables.
x = tf.placeholder(tf.float32)
w = tf.Variable(tf.zeros([1, 1], dtype=tf.float32))
b = tf.Variable(tf.ones([1, 1], dtype=tf.float32))
y_hat = tf.add(b, tf.matmul(x, w))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print('Checkpoint found')
else:
print('No checkpoint found')
# Run the model to get predictions
predictions = sess.run(y_hat, feed_dict={x: image})
print(predictions)
def main(argv=None):
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
eval_single_img()
if __name__ == '__main__':
tf.app.run()
There are two methods to feed a single new image to the cifar10 model. The first method is a cleaner approach but requires modification in the main file, hence will require retraining. The second method is applicable when a user does not want to modify the model files and instead wants to use the existing check-point/meta-graph files.
The code for the first approach is as follows:
import tensorflow as tf
import numpy as np
import cv2
sess = tf.Session('', tf.Graph())
with sess.graph.as_default():
# Read meta graph and checkpoint to restore tf session
saver = tf.train.import_meta_graph("/tmp/cifar10_train/model.ckpt-200.meta")
saver.restore(sess, "/tmp/cifar10_train/model.ckpt-200")
# Read a single image from a file.
img = cv2.imread('tmp.png')
img = np.expand_dims(img, axis=0)
# Start the queue runners. If they are not started the program will hang
# see e.g. https://www.tensorflow.org/programmers_guide/reading_data
coord = tf.train.Coordinator()
threads = []
for qr in sess.graph.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
# In the graph created above, feed "is_training" and "imgs" placeholders.
# Feeding them will disconnect the path from queue runners to the graph
# and enable a path from the placeholder instead. The "img" placeholder will be
# fed with the image that was read above.
logits = sess.run('softmax_linear/softmax_linear:0',
feed_dict={'is_training:0': False, 'imgs:0': img})
#Print classifiction results.
print(logits)
The script requires that a user creates two placeholders and a conditional execution statement for it to work.
The placeholders and conditional execution statement are added in cifar10_train.py as shown below:
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
with tf.device('/cpu:0'):
images, labels = cifar10.distorted_inputs()
is_training = tf.placeholder(dtype=bool,shape=(),name='is_training')
imgs = tf.placeholder(tf.float32, (1, 32, 32, 3), name='imgs')
images = tf.cond(is_training, lambda:images, lambda:imgs)
logits = cifar10.inference(images)
The inputs in cifar10 model are connected to queue runner object which is a multistage queue that can prefetch data from files in parallel. See a nice animation of queue runner here
While queue runners are efficient in prefetching large dataset for training, they are an overkill for inference/testing where only a single file is needed to be classified, also they are a bit more involved to modify/maintain.
For that reason, I have added a placeholder "is_training", which is set to False while training as shown below:
import numpy as np
tmp_img = np.ndarray(shape=(1,32,32,3), dtype=float)
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op, feed_dict={is_training: True, imgs: tmp_img})
Another placeholder "imgs" holds a tensor of shape (1,32,32,3) for the image that will be fed during inference -- the first dimension is the batch size which is one in this case. I have modified cifar model to accept 32x32 images instead of 24x24 as the original cifar10 images are 32x32.
Finally, the conditional statement feeds the placeholder or queue runner output to the graph. The "is_training" placeholder is set to False during inference and "img" placeholder is fed a numpy array -- the numpy array is reshaped from 3 to 4 dimensional vector to conform to the input tensor to inference function in the model.
That is all there is to it. Any model can be inferred with a single/user defined test data like shown in the script above. Essentially read the graph, feed data to the graph nodes and run the graph to get the final output.
Now the second method. The other approach is to hack cifar10.py and cifar10_eval.py to change batch size to one and replace the data coming from the queue runner with the one read from a file.
Set batch size to 1:
tf.app.flags.DEFINE_integer('batch_size', 1,
"""Number of images to process in a batch.""")
Call inference with an image file read.
def evaluate(): with tf.Graph().as_default() as g:
# Get images and labels for CIFAR-10.
eval_data = FLAGS.eval_data == 'test'
images, labels = cifar10.inputs(eval_data=eval_data)
import cv2
img = cv2.imread('tmp.png')
img = np.expand_dims(img, axis=0)
img = tf.cast(img, tf.float32)
logits = cifar10.inference(img)
Then pass logits to eval_once and modify eval once to evaluate logits:
def eval_once(saver, summary_writer, top_k_op, logits, summary_op):
...
while step < num_iter and not coord.should_stop():
predictions = sess.run([top_k_op])
print(sess.run(logits))
There is no separate script to run this method of inference, just run cifar10_eval.py which will now read a file from the user defined location with a batch size of one.
Here's how I ran a single image at a time. I'll admit it seems a bit hacky with the reuse of getting the scope.
This is a helper function
def restore_vars(saver, sess, chkpt_dir):
""" Restore saved net, global score and step, and epsilons OR
create checkpoint directory for later storage. """
sess.run(tf.initialize_all_variables())
checkpoint_dir = chkpt_dir
if not os.path.exists(checkpoint_dir):
try:
os.makedirs(checkpoint_dir)
except OSError:
pass
path = tf.train.get_checkpoint_state(checkpoint_dir)
#print("path1 = ",path)
#path = tf.train.latest_checkpoint(checkpoint_dir)
print(checkpoint_dir,"path = ",path)
if path is None:
return False
else:
saver.restore(sess, path.model_checkpoint_path)
return True
Here is the main part of the code that runs a single image at a time within the for loop.
to_restore = True
with tf.Session() as sess:
for i in test_img_idx_set:
# Gets the image
images = get_image(i)
images = np.asarray(images,dtype=np.float32)
images = tf.convert_to_tensor(images/255.0)
# resize image to whatever you're model takes in
images = tf.image.resize_images(images,256,256)
images = tf.reshape(images,(1,256,256,3))
images = tf.cast(images, tf.float32)
saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=1)
#print("infer")
with tf.variable_scope(tf.get_variable_scope()) as scope:
if to_restore:
logits = inference(images)
else:
scope.reuse_variables()
logits = inference(images)
if to_restore:
restored = restore_vars(saver, sess,FLAGS.train_dir)
print("restored ",restored)
to_restore = False
logit_val = sess.run(logits)
print(logit_val)
Here is an alternative implementation to the above using place holders it's a bit cleaner in my opinion. but I'll leave the above example for historical reasons.
imgs_place = tf.placeholder(tf.float32, shape=[my_img_shape_put_here])
images = tf.reshape(imgs_place,(1,256,256,3))
saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=1)
#print("infer")
logits = inference(images)
restored = restore_vars(saver, sess,FLAGS.train_dir)
print("restored ",restored)
with tf.Session() as sess:
for i in test_img_idx_set:
logit_val = sess.run(logits,feed_dict={imgs_place=i})
print(logit_val)
got it working with this
softmax = gn.inference(image)
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
with tf.Session() as sess:
saver.restore(sess, ckpt.model_checkpoint_path)
softmaxval = sess.run(softmax)
print(softmaxval)
output
[[ 6.73550041e-03 4.44930716e-04 9.92570221e-01 1.00681427e-06
3.05406687e-08 2.38927707e-04 1.89839399e-12 9.36238484e-06
1.51646684e-09 3.38977535e-09]]
I don't have working code for you I'm afraid, but here's how we often tackle this problem in production:
Save out the GraphDef to disk, using something like write_graph.
Use freeze_graph to load the GraphDef and checkpoints, and save out a GraphDef with the Variables converted into Constants.
Load the GraphDef in something like label_image or classify_image.
For your example this is overkill, but I would at least suggest serializing the graph in the original example as a GraphDef, and then loading it in your script (so you don't have to duplicate the code generating the graph). With the same graph created, you should be able to populate it from a SaverDef, and the freeze_graph script may help as an example.

Categories

Resources