Add new layers to Tensorflow freeze_graph? - python

These discussion talked (1,2) about adding new layers to Tensorflow graph and retrain the model.
And the following code shows to add in new layer to restored trainable model.
import tensorflow as tf
sess=tf.Session()
#First let's load meta graph and restore weights
saver = tf.train.import_meta_graph('my_test_model-1000.meta')
saver.restore(sess,tf.train.latest_checkpoint('./'))
# Now, let's access and create placeholders variables and
# create feed-dict to feed new data
graph = tf.get_default_graph()
w1 = graph.get_tensor_by_name("w1:0")
w2 = graph.get_tensor_by_name("w2:0")
feed_dict ={w1:13.0,w2:17.0}
#Now, access the op that you want to run.
op_to_restore = graph.get_tensor_by_name("op_to_restore:0")
#Add more to the current graph
add_on_op = tf.multiply(op_to_restore,2)
print sess.run(add_on_op,feed_dict)
#This will print 120.
But I like to add in layers to restored frozen graph.
I have frozen model only for an application. I like to add in layers to the model and freeze again.
Those layers are more for post processing and not necessary to train so not in the trained model.
The reason why is I am converting the freeze graph to TensorRT and I like to include those layers into Int8 engine.

I hope below will help you. I have a custom Op which was supposed to be added to my existing graph which i loaded from .pb file (freezed model file)
With this i was able to append new nodes to my existing graph.
Source code below:
import tensorflow as tf
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.util import compat
# Utility functions for Loading and Freezing graphs
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="")
return graph
def freeze_graph(sess, output_graph):
output_node_names = [
"custom_op_zero","custom_op_zero_1"
output_node_names = ",".join(output_node_names)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names.split(",")
)
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("{} ops written to {}.".format(len(output_graph_def.node), output_graph))
## load custom Ops shared object file
zero_out_ops = load_library.load_op_library(
resource_loader.get_path_to_datafile('my-op/tensorflow_zero_out/python/ops/_zero_out_ops.so'))
zero_out = zero_out_ops.zero_out
frozen_graph = load_graph("frozen_model.pb")
all_tensors = [tensor for op in frozen_graph.get_operations() for tensor in op.values()]
#print (all_tensors[29])
# Input to the new node is the output of last node
zero_out_custom = zero_out(all_tensors[-1],name="custom_op_zero")
zero_out_custom1 = zero_out(all_tensors[-1],name="custom_op_zero_1")
#print (new_op)
# save new freezed model file
with tf.Session(graph=frozen_graph) as persisted_sess:
for op in persisted_sess.graph.get_operations():
print(op)
freeze_graph(persisted_sess,"new_model.pb")

Related

Accuracy loss after transfer keras (.h5) model to tf frozen graph (.pd) format

I'm using tensorflow 1.12.0 and keras 2.1.4 (I used 2.2.4 at beginning, the problem is same.)
I am working on some model transferring task and one of the step is to convert keras model (.h5) to tensorflow frozen graph format (.pb). The model accuracy was 98% while loading .h5 and test in keras, but only remain 10% while loading frozen graph (.pb) and test in tensorflow.
I transfer the h5 to frozen graph through following code.
import tensorflow.keras as keras
import tensorflow as tf
import os
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
#param session The TensorFlow session to be frozen.
#param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
#param output_names Names of the relevant graph outputs.
#param clear_devices Remove the device directives from the graph for better portability.
#return The frozen graph definition.
"""
session.run(tf.initialize_all_variables())
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ''
frozen_graph = tf.graph_util.convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
return frozen_graph
#keras h5 file
input_path='./'
input_file = './CNN_Mnist.h5'
weight_file_path = os.path.join(input_path, input_file)
output_graph_name = 'frozen_graph.pb'
#Load Module
keras.backend.set_learning_phase(0)
h5_model = keras.models.load_model(weight_file_path)
frozen_graph = freeze_session(keras.backend.get_session(), output_names=[out.op.name for out in h5_model.outputs])
tf.train.write_graph(frozen_graph, input_path, output_graph_name, as_text=False)
print('finish!')
Then I'm trying to load the frozen graph in tensorflow and evaluate it again, but I got very worse result. Here is my evaluation code.
mport os
import argparse
import shutil
import tensorflow as tf
import numpy as np
from tensorflow.python.platform import gfile
import tensorflow.contrib.decent_q
def graph_eval(input_graph_def, input_node, output_node):
# MNIST dataset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_test = (x_test/255.0).astype(np.float32)
x_test = np.reshape(x_test, [-1, 28, 28, 1])
y_test = tf.keras.utils.to_categorical(y_test, num_classes=10)
tf.import_graph_def(input_graph_def,name = '')
# Get input placeholders & tensors
images_in = tf.get_default_graph().get_tensor_by_name(input_node+':0')
labels = tf.placeholder(tf.int32,shape = [None,10])
# get output tensors
logits = tf.get_default_graph().get_tensor_by_name(output_node+':0')
# top 5 and top 1 accuracy
in_top5 = tf.nn.in_top_k(predictions=logits, targets=tf.argmax(labels, 1), k=5)
in_top1 = tf.nn.in_top_k(predictions=logits, targets=tf.argmax(labels, 1), k=1)
top5_acc = tf.reduce_mean(tf.cast(in_top5, tf.float32))
top1_acc = tf.reduce_mean(tf.cast(in_top1, tf.float32))
# Create the Computational graph
with tf.Session() as sess:
sess.run(tf.initializers.global_variables())
feed_dict={images_in: x_test, labels: y_test}
t5_acc,t1_acc = sess.run([top5_acc,top1_acc], feed_dict)
print (' Top 1 accuracy with validation set: {:1.4f}'.format(t1_acc))
print (' Top 5 accuracy with validation set: {:1.4f}'.format(t5_acc))
print ('FINISHED!')
#define arguments
graph='./frozen_graph_opt.pb'#output_graph #'./frozen_graph.pb'
input_node='conv2d_1_input_5'
output_node='dense_2_3/Softmax'
####
input_graph_def = tf.Graph().as_graph_def()
input_graph_def.ParseFromString(tf.gfile.FastGFile(graph, "rb").read())
graph_eval(input_graph_def,input_node,output_node)
The data pre-processing in training and evaluation are both use the same function.
Could someone please help to point me out what part should I check or something should be noticed while transferring .h5 to .pd file?
Or is there any problem in my evaluation code?
Here is my model read in Netron.
keras .h5 model
keras .h5 model
Tensorflow .pb model
Tensorflow .pb model

there are something wrong when changing keras .h5 model to tensorflow .pb model

I want to change the keras .h5 file to tensorflow .pb file. It seems there are something wrong with the .pb file. My code is shown as follow:
network_eval = model.vggvox_resnet2d_icassp(input_dim=params['dim'],
num_class=params['n_classes'],
mode='eval', args=args)
path = 'XXX'
name = 'XXX.pb'
network_eval.load_weights(os.path.join(args.resume), by_name=True) # load model
# I use parts of the keras_to_tensorflow util, see https://github.com/amir-abdi/keras_to_tensorflow
orig_output_node_names = [node.op.name for node in network_eval.outputs]
# I do not change the output_nodes_prefix, so
converted_output_node_names = orig_output_node_names
sess = K.get_session()
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), converted_output_node_names)
graph_io.write_graph(constant_graph, path, name, as_text=False)
The .pb file was generated successfully, but the predicted outputs of the test file using .pb model are different from those using original .h5 keras model. The test code is shown as
# using .h5 model
for spec in specs: # specs is sliced magtitude spectrum of a .wav file for predicting
spec = np.expand_dims(np.expand_dims(spec, 0), -1)
v_1 = network_eval.predict(spec)
print(v_1)
# using .pb model
sess = tf.Session()
with gfile.FastGFile('XXX.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
sess.run(tf.global_variables_initializer())
# 'lambda_1/l2_normalize' is the name of the last layer of the network
# I got this name by printing network_eval.output.name
op = sess.graph.get_tensor_by_name('lambda_1/l2_normalize:0')
x = sess.graph.get_tensor_by_name('input:0')
for spec in specs:
spec = np.expand_dims(np.expand_dims(spec, 0), -1)
v_2 = sess.run(op, feed_dict={x: spec, K.learning_phase(): 0})
print(v_2)
As I said above, the printed results v_1 and v_2 are quite different, but they have the same shape, which makes me confused and I don't know which step was wrong. Is there anyone can help me? I will be very grateful.

How to save TensorRT graph generated from frozen inference graph?

I using the below script to convert my frozen_inference_graph into a TensorRT optimized one:
import tensorflow as tf
from tensorflow.python.compiler.tensorrt import trt_convert as trt
with tf.Session() as sess:
# First deserialize your frozen graph:
with tf.gfile.GFile('frozen_inference_graph.pb', 'rb') as f:
frozen_graph = tf.GraphDef()
frozen_graph.ParseFromString(f.read())
# Now you can create a TensorRT inference graph from your
# frozen graph:
converter = trt.TrtGraphConverter(
input_graph_def=frozen_graph,
nodes_blacklist=['outputs/Softmax']) #output nodes
trt_graph = converter.convert()
# Import the TensorRT graph into a new graph and run:
output_node = tf.import_graph_def(
trt_graph,
return_elements=['outputs/Softmax'])
sess.run(output_node)
My question is how can I save this optimized graph to disk so I can use it to run inference?
yes you can just add those two lines:
saved_model_dir_trt = "./tensorrt_model.trt"
converter.save(saved_model_dir_trt)

How can I visualize the estimator Trained model in tensorflow?

I have created a model with 3 hidden layers and trained it with the specific data-set.
How can I visualize the Model, with the neuron connections and weights at each iteration.
Here is the snippet of the python code :
#<ALL IMPORT STATEMENTS>
MODEL_DIR = <model_name>
def make_estimator(model_dir):
config = run_config.RunConfig(model_dir=model_dir)
feat_cols = [tf.feature_column.numeric_column("x", shape=<number_of_feat_cols>)]
return estimator.DNNClassifier(config=config, hidden_units=[<>,<>,<>],feature_columns=feat_cols,n_classes=2,optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.001))
data = pd.read_csv(<csv_file>)
feat_data = data.drop('Type',axis=1)
feat_data_matrix = feat_data.as_matrix()
labels = data['Type']
labels_matrix = labels.as_matrix()
deep_model = make_estimator(MODEL_DIR)
input_fn = estimator.inputs.numpy_input_fn(x={'x':feat_data_matrix}, y=labels_matrix, shuffle=True, batch_size=10, num_epochs=1000)
tr_steps = <step_size>
deep_model.train(input_fn=input_fn,steps=tr_steps)
print ("Training Done")
In the code above, I have not created any tensorflow session, without it where can I implement the TensorBoard APIs for visualizing the model ?
By using the Python API simply call the method tf.summary.FileWriter
Then if you load the file written by the SummaryWriter into TensorBoard, the graph is shown.
You have to load the graph like this:
# Launch the graph.
current_session = tf.Session()
current_session.run(init)
# Create a summary writer, add the 'graph' to the event file.
writer = tf.summary.FileWriter(<some-directory>, current_session.graph)
See here.

Single Image Inference in Tensorflow [Python]

I have already converted a pre-trained .ckpt file to .pb file freezing the model and saving the weighs as well. What I am trying to do now is to make a simple inference using that .pb file and extract and save output image. The model is a (Fully Convolutional Network for Semantic Segmentation) downloaded from here : https://github.com/MarvinTeichmann/KittiSeg . So far I have managed to, load the image, set the default tf graph and import the graph defined by the model on that, read the input and the output tensors and run the session (error here).
import tensorflow as tf
import os
import numpy as np
from tensorflow.python.platform import gfile
from PIL import Image
# Read the image & get statstics
img=Image.open('/path-to-image/demoImage.png')
img.show()
width, height = img.size
print(width)
print(height)
#Plot the image
#image.show()
with tf.Graph().as_default() as graph:
with tf.Session() as sess:
# Load the graph in graph_def
print("load graph")
# We load the protobuf file from the disk and parse it to retrive the unserialized graph_drf
with gfile.FastGFile("/path-to-FCN-model/FCN8.pb",'rb') as f:
#Set default graph as current graph
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
#sess.graph.as_default() #new line
# Import a graph_def into the current default Graph
tf.import_graph_def(graph_def, name='')
# Print the name of operations in the session
#for op in sess.graph.get_operations():
#print "Operation Name :",op.name # Operation name
#print "Tensor Stats :",str(op.values()) # Tensor name
# INFERENCE Here
l_input = graph.get_tensor_by_name('Placeholder:0')
l_output = graph.get_tensor_by_name('save/Assign_38:0')
print "l_input", l_input
print "l_output", l_output
print
print
# Acceptable feed values include Python scalars, strings, lists, numpy ndarrays, or TensorHandles.
result = sess.run(l_output, feed_dict={l_input : img})
print(results)
print("Inference done")
# Info
# First Tensor name : Placeholder:0
# Last tensor name : save/Assign_38:0"
Can the error come from the format of the image (e.g should I convert .png to another format?). Is it another fundamental error?
I managed to fix the error, below is the working script to inference a single image on Fully Convolutional Networks (for whoever is interesting in an alternative segmentation algorithm from SEGNET) . This model use billinear interpolation for scaling rather than an un-pooling layer. Anyway, because the model is available to download in a .chkpt format, you must first freeze the model and save it as a .pb file. Later on, you must pass the network from TF optimizer to set Dropout probabilities to 1. Afterwards, set the correct input and output tensor name in this script and the inference works correctly, extracting the segmented image.
import tensorflow as tf # Default graph is initialized when the library is imported
import os
from tensorflow.python.platform import gfile
from PIL import Image
import numpy as np
import scipy
from scipy import misc
import matplotlib.pyplot as plt
import cv2
with tf.Graph().as_default() as graph: # Set default graph as graph
with tf.Session() as sess:
# Load the graph in graph_def
print("load graph")
# We load the protobuf file from the disk and parse it to retrive the unserialized graph_drf
with gfile.FastGFile("/path-to-protobuf/FCN8_Freezed.pb",'rb') as f:
print("Load Image...")
# Read the image & get statstics
image = scipy.misc.imread('/Path-To-Image/uu_000010.png')
image = image.astype(float)
Input_image_shape=image.shape
height,width,channels = Input_image_shape
print("Plot image...")
#scipy.misc.imshow(image)
# Set FCN graph to the default graph
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
# Import a graph_def into the current default Graph (In this case, the weights are (typically) embedded in the graph)
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="",
op_dict=None,
producer_op_list=None
)
# Print the name of operations in the session
for op in graph.get_operations():
print "Operation Name :",op.name # Operation name
print "Tensor Stats :",str(op.values()) # Tensor name
# INFERENCE Here
l_input = graph.get_tensor_by_name('Inputs/fifo_queue_Dequeue:0') # Input Tensor
l_output = graph.get_tensor_by_name('upscore32/conv2d_transpose:0') # Output Tensor
print "Shape of input : ", tf.shape(l_input)
#initialize_all_variables
tf.global_variables_initializer()
# Run Kitty model on single image
Session_out = sess.run( l_output, feed_dict = {l_input : image}
Have you already looked at the demo.py. There is shown at line 141 how they modify the input of the graph:
# Create placeholder for input
image_pl = tf.placeholder(tf.float32)
image = tf.expand_dims(image_pl, 0)
# build Tensorflow graph using the model from logdir
prediction = core.build_inference_graph(hypes, modules,
image=image)
And at line 164 how the image is opened:
image = scp.misc.imread(input_image)
Which is fed directly to image_pl. The only point is that core.build_inference_graph is a TensorVision call.
Note, it would be interesting to provide the exact error message as as input as well.

Categories

Resources