I have created an Estimator from a TF Slim Resnet V2 checkpoint and tested it to make predictions. The main thing of what I did is basically similar to a normal Estimator together with assign_from_checkpoint_fn:
def model_fn(features, labels, mode, params):
...
slim.assign_from_checkpoint_fn(os.path.join(checkpoint_dir, 'resnet_v2_50.ckpt'), slim.get_model_variables('resnet_v2')
...
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes[:, tf.newaxis],
'probabilities': tf.nn.softmax(logits),
'logits': logits,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
To export the estimator as a SavedModel, I made a serving_input_fn as follows:
def image_preprocess(image_buffer):
image = tf.image.decode_jpeg(image_buffer, channels=3)
image_preprocessing_fn = preprocessing_factory.get_preprocessing('inception', is_training=False)
image = image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
return image
def serving_input_fn():
input_ph = tf.placeholder(tf.string, shape=[None], name='image_binary')
image_tensors = image_preprocess(input_ph)
return tf.estimator.export.ServingInputReceiver(image_tensors, input_ph)
In the main function, I use export_saved_model to try to export Estimator to SavedModel format:
def main():
...
classifier = tf.estimator.Estimator(model_fn=model_fn)
classifier.export_saved_model(dir_path, serving_input_fn)
However, when I try to run the codes, it says "Couldn't find trained model at /tmp/tmpn3spty2z". From what I understand, this export_saved_model tries to find a trained Estimator model to export to SavedModel. However, I would like to know if there are any ways I can restore the pretrained checkpoint into an Estimator and export the Estimator to a SavedModel without any further training?
I have solved my problem. To export TF Slim Resnet checkpoint with TF 1.14 to SavedModel, warm start can be used together with export_savedmodel as follows:
config = tf.estimator.RunConfig(save_summary_steps = None, save_checkpoints_secs = None)
warm_start = tf.estimator.WarmStartSettings(checkpoint_dir, checkpoint_name)
classifier = tf.estimator.Estimator(model_fn=model_fn, warm_start_from = warm_start, config = config)
classifier.export_savedmodel(export_dir_base = FLAGS.output_dir, serving_input_receiver_fn = serving_input_fn)
Related
I have trained a model with frozen feature extraction layers which was initialised as followed:
model = models.densenet161(pretrained=True)
for param in model.parameters():
param.requires_grad = False
num_ftrs = model.classifier.in_features
model.classifier = torch.nn.Linear(num_ftrs,2)
However, at inference, I am unsure of how to load the model correctly. In a separate script I do the following:
model = models.densenet161(pretrained=True)
for param in model.parameters():
param.requires_grad = False
num_ftrs = model.classifier.in_features
model.classifier = torch.nn.Linear(num_ftrs,2)
model.to(device)
# load the best model
bestmodel = get_best_model(best)
bestmodel = torch.load(bestmodel)
model.load_state_dict(bestmodel['classifier'])
# set model to evaluation mode
model.eval()
with torch.no_grad():
Does this look correct? Or do I set pretrained=False when introducing the model in my inference script?
I am trying to feed serialized data to my saved model.
I suspect either the model is not exported correctly, or serialization of the data is not done correctly. Any hints or suggestions would be appreciated.
import tensorflow as tf
import numpy as np
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': np.array([[1,2,3,4,3], [1,3,4,2,4], [10,2,4,1.3,4], [1,3,5.2,9, 0.3]]).astype(np.float32)},
y=np.array([0,0,1,1]).astype(np.float32),
batch_size=2,
shuffle=True
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': np.array([[1,2,3,1,4], [1,23,4,1,90]]).astype(np.float32)},
y=np.array([0,1]).astype(np.float32),
batch_size=2,
num_epochs=1,
shuffle=False
)
def my_model(features, labels, mode, params):
net = features['x']
net = tf.layers.dense(net, 32, activation=tf.nn.relu)
logits = tf.layers.dense(net, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=logits,
export_outputs=
{'logits':tf.estimator.export.PredictOutput(logits)}
)
loss = tf.reduce_sum(tf.square(labels-logits))
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.02)
train_op = optimizer.minimize(loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=5)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
my_estimator = tf.estimator.Estimator(model_fn=my_model)
tf.estimator.train_and_evaluate(my_estimator, train_spec, eval_spec)
saved_model = my_estimator.export_savedmodel('foo', tf.estimator.export.build_parsing_serving_input_receiver_fn({'x': tf.FixedLenFeature([5],tf.float32)}))
predict_fn = tf.contrib.predictor.from_saved_model(saved_model,)
features=tf.train.Features(feature={'x':tf.train.Feature(float_list=tf.train.FloatList(value=[1,2,3,4,1]))})
ex = tf.train.Example(features=features)
prediction = predict_fn({'examples':ex.SerializeToString()})
Here is the error message:
ValueError: Cannot feed value of shape () for Tensor 'input_example_tensor:0', which has shape '(?,)'
Turned out that the list of tf.train.Examples.SerializeToString() had to be provided.
In the last line, prediction = predict_fn({'examples': ex.SerializeToString()}) is changed to prediction = predict_fn({'examples': [ex.SerializeToString()]}).
I figured this out thanks to this nice tutorial:
http://shzhangji.com/blog/2018/05/14/serve-tensorflow-estimator-with-savedmodel/
I'm following a tutorial from codelabs. They use this script to optimize the model
python -m tensorflow.python.tools.optimize_for_inference \
--input=tf_files/retrained_graph.pb \
--output=tf_files/optimized_graph.pb \
--input_names="input" \
--output_names="final_result"
they verify the optimized_graph.pb using this script
python -m scripts.label_image \
--graph=tf_files/optimized_graph.pb \
--image=tf_files/flower_photos/daisy/3475870145_685a19116d.jpg
The problem is I try to use optimize_for_inference to my own code which is not for image classification.
Previously, before optimizing, I use this script to verify my model by test it to a sample data:
import tensorflow as tf
from tensorflow.contrib import predictor
from tensorflow.python.platform import gfile
import numpy as np
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="prefix")
input_name = graph.get_operations()[0].name+':0'
output_name = graph.get_operations()[-1].name+':0'
return graph, input_name, output_name
def predict(model_path, input_data):
# load tf graph
tf_model,tf_input,tf_output = load_graph(model_path)
x = tf_model.get_tensor_by_name(tf_input)
y = tf_model.get_tensor_by_name(tf_output)
model_input = tf.train.Example(
features=tf.train.Features(feature={
"thisisinput": tf.train.Feature(float_list=tf.train.FloatList(value=input_data)),
}))
model_input = model_input.SerializeToString()
num_outputs = 3
predictions = np.zeros(num_outputs)
with tf.Session(graph=tf_model) as sess:
y_out = sess.run(y, feed_dict={x: [model_input]})
predictions = y_out
return predictions
if __name__=="__main__":
input_data = [4.7,3.2,1.6,0.2] # my model recieve 4 inputs
print(np.argmax(predict("not_optimized_model.pb",x)))
but after optimizing the model, my testing script doesn't work. It raises an error:
ValueError: Input 0 of node import/ParseExample/ParseExample was
passed float from import/inputtensors:0 incompatible with expected
string.
So my question is how to verify my model after optimizing the model? I can't use --image command like the tutorial.
I've solved the error by changing the placeholder's type with tf.float32 when exporting the model:
def my_serving_input_fn():
input_data = {
"featurename" : tf.placeholder(tf.float32, [None, 4], name='inputtensors')
}
return tf.estimator.export.ServingInputReceiver(input_data, input_data)
and then change the prediction function above to:
def predict(model_path, input_data):
# load tf graph
tf_model, tf_input, tf_output = load_graph(model_path)
x = tf_model.get_tensor_by_name(tf_input)
y = tf_model.get_tensor_by_name(tf_output)
num_outputs = 3
predictions = np.zeros(num_outputs)
with tf.Session(graph=tf_model) as sess:
y_out = sess.run(y, feed_dict={x: [input_data]})
predictions = y_out
return predictions
After freezing the model, the prediction code above will be work. But unfortunately it raises another error when trying to load pb directly after exporting the model.
I am using Tensorflow Estimator API but haven't figured out how to use the L-BFGS optimizer available at tf.contrib.opt.ScipyOptimizerInterface.
It seems the estimator API expects some optimizer from the tf.train module but no BFGS implementation is available there. The only one defined in contrib does not follow the same interface.
To be more specific, in the official tutorial to define custom estimators, it's shown how to use the AdagradOptimizer:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
However, the API of the ScipyOptimizerInterface is as follows:
optimizer = ScipyOptimizerInterface(loss, options={'maxiter': 100})
with tf.Session() as session:
optimizer.minimize(session)
Taking a full example:
from sklearn import datasets
import numpy as np
def _custom_model_fn(features, labels, mode, feature_columns):
predictions = tf.feature_column.linear_model(features, feature_columns)
predictions = tf.reshape(predictions, [-1])
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'predictions': predictions}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions,
reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
# Create training op.
assert mode == tf.estimator.ModeKeys.TRAIN
# train_op = tf.contrib.opt.ScipyOptimizerInterface(loss, options={'maxiter': 10})
optimizer = tf.train.FtrlOptimizer(learning_rate=1.)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode,
predictions=predictions,
loss=loss,
train_op=train_op)
class MyRegressor(tf.estimator.Estimator):
def __init__(self, feature_columns, model_dir=None, config=None):
def _model_fn(features, labels, mode, config):
return _custom_model_fn(features, labels, mode, feature_columns)
super(MyRegressor, self).__init__(model_fn=_model_fn)
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_y = diabetes.target
# Create the custom estimator and train it
feature_columns = [tf.feature_column.numeric_column('x')]
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': np.array(diabetes.data[:, 2])},
y=np.array(diabetes.target),
num_epochs=None,
shuffle=True)
myregressor = MyRegressor(feature_columns)
myregressor.train(train_input_fn, steps=10000)
If I un-comment the line to use the ScipyOptimizer instead, I obviously get an error as follows
TypeError: train_op must be Operation or Tensor, given: <tensorflow.contrib.opt.python.training.external_optimizer.ScipyOptimizerInterface object
Is there an easy way to use the Scipy optimizer?
Thanks in advance.
I have this snippet, for my model
import pandas as pd
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python import SKCompat
#Assume my dataset is using X['train'] as input and y['train'] as output
regressor = SKCompat(learn.Estimator(model_fn=lstm_model(TIMESTEPS, RNN_LAYERS, DENSE_LAYERS),model_dir=LOG_DIR))
validation_monitor = learn.monitors.ValidationMonitor(X['val'], y['val'], every_n_steps=PRINT_STEPS, early_stopping_rounds=1000)
regressor.fit(X['train'], y['train'],
monitors=[validation_monitor],
batch_size=BATCH_SIZE,
steps=TRAINING_STEPS)
#After training this model I want to save it in a folder, so I can use the trained model for implementing in my algorithm to predict the output
#What is the correct format to use here to save my model in a folder called 'saved_model'
regressor.export_savedmodel('/saved_model/')
#I want to import it later in some other code, How can I import it?
#is there any function like import model from file?
How can I save this estimator? I tried finding some examples for tf.contrib.learn.Estimator.export_savedmodel, I did not have a success? Help Appreciated.
The function export_savedmodel requires the argument serving_input_receiver_fn, that is a function without arguments, which defines the input from the model and the predictor. Therefore, you must create your own serving_input_receiver_fn, where the model input type match with the model input in the training script, and the predictor input type match with the predictor input in the testing script.
On the other hand, if you create a custom model, you must define the export_outputs, defined by the function tf.estimator.export.PredictOutput, which input is a dictionary that define the name that has to match with the name of the predictor output in the testing script.
For example:
TRAINING SCRIPT
def serving_input_receiver_fn():
serialized_tf_example = tf.placeholder(dtype=tf.string, shape=[None], name='input_tensors')
receiver_tensors = {"predictor_inputs": serialized_tf_example}
feature_spec = {"words": tf.FixedLenFeature([25],tf.int64)}
features = tf.parse_example(serialized_tf_example, feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def estimator_spec_for_softmax_classification(logits, labels, mode):
predicted_classes = tf.argmax(logits, 1)
if (mode == tf.estimator.ModeKeys.PREDICT):
export_outputs = {'predict_output': tf.estimator.export.PredictOutput({"pred_output_classes": predicted_classes, 'probabilities': tf.nn.softmax(logits)})}
return tf.estimator.EstimatorSpec(mode=mode, predictions={'class': predicted_classes, 'prob': tf.nn.softmax(logits)}, export_outputs=export_outputs) # IMPORTANT!!!
onehot_labels = tf.one_hot(labels, 31, 1, 0)
loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)
if (mode == tf.estimator.ModeKeys.TRAIN):
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {'accuracy': tf.metrics.accuracy(labels=labels, predictions=predicted_classes)}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def model_custom(features, labels, mode):
bow_column = tf.feature_column.categorical_column_with_identity("words", num_buckets=1000)
bow_embedding_column = tf.feature_column.embedding_column(bow_column, dimension=50)
bow = tf.feature_column.input_layer(features, feature_columns=[bow_embedding_column])
logits = tf.layers.dense(bow, 31, activation=None)
return estimator_spec_for_softmax_classification(logits=logits, labels=labels, mode=mode)
def main():
# ...
# preprocess-> features_train_set and labels_train_set
# ...
classifier = tf.estimator.Estimator(model_fn = model_custom)
train_input_fn = tf.estimator.inputs.numpy_input_fn(x={"words": features_train_set}, y=labels_train_set, batch_size=batch_size_param, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
full_model_dir = classifier.export_savedmodel(export_dir_base="C:/models/directory_base", serving_input_receiver_fn=serving_input_receiver_fn)
TESTING SCRIPT
def main():
# ...
# preprocess-> features_test_set
# ...
with tf.Session() as sess:
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], full_model_dir)
predictor = tf.contrib.predictor.from_saved_model(full_model_dir)
model_input = tf.train.Example(features=tf.train.Features( feature={"words": tf.train.Feature(int64_list=tf.train.Int64List(value=features_test_set)) }))
model_input = model_input.SerializeToString()
output_dict = predictor({"predictor_inputs":[model_input]})
y_predicted = output_dict["pred_output_classes"][0]
(Code tested in Python 3.6.3, Tensorflow 1.4.0)