Tensorflow data saving with two separate models - python

I have used tf to make two separate models. During training I saved each one alone. Now I want to use them both. I can use the first one but when I try to load the second one I get this message (in part):
I tensorflow/core/common_runtime/gpu/gpu_device.cc:1041] Creating TensorFlow device (/gpu:0) -> (device: 0, name: GeForce GTX 850M, pci bus id: 0000:0a:00.0)
W tensorflow/core/framework/op_kernel.cc:968] Invalid argument: Assign requires shapes of both tensors to match. lhs shape= [5,5,32,64] rhs shape= [1024,2]
[[Node: save_1/Assign_16 = Assign[T=DT_FLOAT, _class=["loc:#Variable_6"], use_locking=true, validate_shape=true, _device="/job:localhost/replica:0/task:0/gpu:0"](Variable_6, save_1/restore_slice_16/_47)]]
also there was a message signifying that the error took place in the 'restore' part of the code. Here's a snippet of that code:
def save(self):
filename = self.save_name
folder = self.ckpt_folder + os.sep + "ckpt"
if not os.path.exists(folder) :
os.makedirs(folder)
saver = tf.train.Saver()
save_path = saver.save(self.sess, folder + os.sep + self.ckpt_name + "."+ filename)
print ("saved?", filename)
def load(self):
filename = self.save_name
file = self.ckpt_folder + os.sep + "ckpt" + os.sep + self.ckpt_name +"."+ filename
if os.path.isfile(file) :
saver = tf.train.Saver()
saver.restore(self.sess, file)
print ("load?", filename)
The functions above, and specifically the load() is called by the model after the session object is initialized. How can I run both tf models together from the data I have already saved??

Depending on what you want to do with them, you should either:
Use name scoping to make them unique
Load them into separate graphs

Related

How can I use os.path.join on a Tensorflow Tensor?

I'm trying to create a custom Tensorflow dataset using the tensorflow.data.data API. However, my original data consists of many smaller images known as tiles which must be concatenated to form a larger image. These tiles are also undergoing image augmentation. For this reason, os.path.join is being used. However, os.path.join doesn't work with tensorflow tensors. Error message:
main_image_path = os.path.join(INDIVIDUAL_TILE_PATH, image_id)
File "C:\ProgramData\Anaconda3\envs\3.9\lib\ntpath.py", line 117, in join *
genericpath._check_arg_types('join', path, *paths)
File "C:\ProgramData\Anaconda3\envs\3.9\lib\genericpath.py", line 152, in _check_arg_types *
raise TypeError(f'{funcname}() argument must be str, bytes, or '
TypeError: join() argument must be str, bytes, or os.PathLike object, not 'Tensor'
Process finished with exit code 1
The obvious solution is to convert the tensor to a string but str(image_id) doesn't appear to be working. Here is my code:
def createDynamicDatasetFromIDsLabels(ID, labels, mode="train"):
dataset = (
tf.data.Dataset
.from_tensor_slices((ID, labels))
.map(decodeImages, num_parallel_calls=AUTO)
#.repeat()
#.shuffle(BATCH_SIZE * 5)
#.batch(BATCH_SIZE)
#.prefetch(AUTO)
)
return dataset
def decodeImages(image_id, label):
main_image_path = os.path.join(INDIVIDUAL_TILE_PATH, image_id)
tiles_list_paths = glob.glob(main_image_path + "*")
augmentedTiles = map(DataAugmentation.data_augment, tiles_list_paths) ##DATA AUGMENT READS TILES AND AUGMENTS
tile_list_images = list(augmentedTiles)
concat_image = glue_to_one(tile_list_images)
plt.imshow(concat_image)
plt.show()
return concat_image, label
def glue_to_one(imgs_seq):
first_row= tf.concat((imgs_seq[0], imgs_seq[1],imgs_seq[2],imgs_seq[3]), 0)
second_row = tf.concat((imgs_seq[4], imgs_seq[5], imgs_seq[6], imgs_seq[7]), 0)
third_row = tf.concat((imgs_seq[8], imgs_seq[9], imgs_seq[10], imgs_seq[11]), 0)
fourth_row = tf.concat((imgs_seq[12], imgs_seq[13], imgs_seq[14], imgs_seq[15]), 0)
img_glue = tf.stack((first_row, second_row, third_row, fourth_row), axis=1)
img_glue = tf.reshape(img_glue, [512,512,3])
return img_glue```
Instead of os.path.join you could use tf.strings.join and specify the operator with os.path.sep. Here's a small working example, where you get the folder path from your file path with tf.strings:
filepath = tf.convert_to_tensor('C:\ProgramData\Anaconda3\envs\3.9\lib\ntpath.py')
folderpath = tf.strings.split(filepath, os.path.sep)
folderpath = tf.strings.join(filepath[0:-1], os.path.sep)

Deploying Mask RCNN object detection model to GCP AI Platform that accepts base64 encoded images

Please help! I have been stuck for weeks on getting any type of prediction response for my mask rcnn object detection model in GCP ai platform.
So far, i have trained a simple model on about 200 images which outputs a weights file in h5 format using the matterport repo. In a new python script, I load those weights like this:
# LOAD MODEL
from config import mask_config
from model import MaskRCNN
config = get_config()
model = MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
model.load_weights(H5_WEIGHT_PATH, by_name=True)
then, I created a frozen graph .pb file using the following code:
def freeze_model(model, name):
frozen_graph = freeze_session(
sess,
output_names=[out.op.name for out in model.outputs][:4])
directory = PATH_TO_SAVE_FROZEN_PB
# directory = './'
tf.train.write_graph(frozen_graph, directory, name , as_text=False)
print("*"*80)
print("Finish converting keras model to Frozen PB")
print('PATH: ', PATH_TO_SAVE_FROZEN_PB)
# print('PATH: ', './')
print("*" * 80)
freeze_model(model.keras_model, FROZEN_NAME)
So far so good! I then continue to make my model tensorflow serving ready as follows:
def make_serving_ready(model_path, save_serve_path, version_number):
import tensorflow as tf
export_dir = os.path.join(save_serve_path, str(version_number))
graph_pb = model_path
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
with tf.gfile.GFile(graph_pb, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
# tf.import_graph_def(graph_model_def, name='', input_map={"input_image": img_uint8})
with tf.Session(graph=tf.Graph()) as sess:
# name="" is important to ensure we don't get spurious prefixing
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
input_image = g.get_tensor_by_name("input_image:0")
input_image_meta = g.get_tensor_by_name("input_image_meta:0")
input_anchors = g.get_tensor_by_name("input_anchors:0")
output_detection = g.get_tensor_by_name("mrcnn_detection/Reshape_1:0")
output_mask = g.get_tensor_by_name("mrcnn_mask/Reshape_1:0")
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"input_image": input_image, 'input_image_meta': input_image_meta, 'input_anchors': input_anchors},
# {"image_bytes": img_uint8, 'input_image_meta': input_image_meta, 'input_anchors': input_anchors},
{"mrcnn_detection/Reshape_1": output_detection, 'mrcnn_mask/Reshape_1': output_mask})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
builder.save()
print("*" * 80)
print("FINISH CONVERTING FROZEN PB TO SERVING READY")
print("PATH:", PATH_TO_SAVE_TENSORFLOW_SERVING_MODEL)
print("*" * 80)
# Now convert frozen graph to Tensorflow Serving Ready
make_serving_ready(os.path.join(PATH_TO_SAVE_FROZEN_PB, FROZEN_NAME),
PATH_TO_SAVE_TENSORFLOW_SERVING_MODEL,
VERSION_NUMBER)
print("COMPLETED")
I then deploy the output of the above code (saved_model.pb) to ai platform models
The thing i'm trying to figure out is - how can i modify the above code to accept base64 encoded images? I have deployed the model successfully onto GCP AI Platform but when I do a sample input to test the predictions, i need to use the gcp requested format:
{"instances":[
{"image_bytes":{"b64":abcdefg},{"key":"1"}
]}
So when i convert the image to a base64 encoded image and input the above format, i get this error:
{
"error": "{ \"error\": \"Failed to process element: 0 key: image_bytes of \\'instances\\' list. Error: Invalid argument: JSON object: does not have named input: image_bytes\" }"
}
So then I went back to my code and tried to change my input_image variable to accept decoded image format:
# concatenate decoder graph and original graph
image = tf.map_fn(decode_and_resize, image_str_tensor, back_prop=False, dtype=tf.uint8)
tf.import_graph_def(graph_def, name="", input_map={'input_image:0':image})
but then i get this error:
ValueError: Input 0 of node zero_padding2d_1/Pad_1 was passed uint8 from decoder/map/TensorArrayStack/TensorArrayGatherV3:0 incompatible with expected float.
So i'm officially completely clueless on how to get this thing running. Is there anyone out there that can fix this??!!!
According to https://cloud.google.com/ai-platform/prediction/docs/online-predict#for---json-request
The encoded string must be formatted as a JSON object with a single key named b64.
{
"instances": [
{
"image_bytes": {"b64": "ASa8asdf"}
},
{
"image_bytes": {"b64": "JLK7ljk3"}
}
]
}
And in your TensorFlow model code, you must name the aliases for your binary input and output tensors so that they end with '_bytes'.

Pyradiomics: Feature class glcm is not recognized. How to fix it?

I am making a project with a GUI for liver ultrasound diagnostics.
I use PyQT5 (5.12.1) for GUI and sklearn (0.21.2) for statistics models. Main texture features I get from pyradiomics (2.2.0).
When I compile my project in PyCharm 2019.1 - all works completely fine.
But when I try to build my project as .exe file with pyinstaller, I got some erors. I solved most of them (about missing libraries) but this one left.
I got errors:
Feature class firstorder is not recognized
Feature class glcm is not recognized
Feature class glrlm is not recognized
Feature class ngtdm is not recognized
Feature class glszm is not recognized
and my model also gives an error (when I fit my new data with models that were already saved in .sav files from sklearn):
ValueEror: operands could not be broadcast together with shapes (1,3)(96,)(1,3)
1) I tried to change from:
extractor.enableFeatureClassByName('glcm')
to:
extractor.enableFeatureClassByName(str('glcm'))
It did not help.
2) Also I tried to build a project at different versions of pyradiomics:
2.1.1 and
2.2.0
give the same result (error)
import pandas as pd
import numpy as np
import pickle
import sklearn
...
folderName = "tmp"
sl = "/"
image_path_to = os.getcwd() + "/data/nrrd/" + folderName + sl + name_image
label_path_to = os.getcwd() + "/data/nrrd/" + folderName + sl + name_label
# Instantiate the extractor
extractor = featureextractor.RadiomicsFeatureExtractor()
# Switch on only needed feature class
extractor.disableAllFeatures()
extractor.enableFeatureClassByName('firstorder') <<< There is a problem
extractor.enableFeatureClassByName('glcm')
extractor.enableFeatureClassByName('glrlm')
extractor.enableFeatureClassByName('ngtdm')
extractor.enableFeatureClassByName('gldm')
extractor.enableFeatureClassByName('glszm')
# result -> ordered dict
result = extractor.execute(image_path_to, label_path_to)
df = pd.DataFrame(result, index=[0])
...
# Load the model from disk
model_name = 'Multi-layer Perceptron'
poolParam = ["diagnosis_code", "isnorm"]
models = [0,5]
for param in poolParam:
filename = 'data/result/model/' + model_name + ' ' + param + '.sav'
file = open(filename, 'rb')
loaded = pickle.load(file)
print("Model <" + model_name + " " + param + "> was loaded")
# Test the classifier
y_pred = int(loaded.predict(data)) <<< There is a problem

Tensorflow prediction loop

I have trained multiple tensorflow models for the same set of data, each with slightly different configuration.
Now I want to run prediction for the given input file utilizing each tensorflow model and store the prediction in a csv.
It seems I am unable to get tensorflow to completely unload/reset before loading new model.
Here is my code. It works fine for the first model, then it generates error. I have tried changing sequence of models, it always run the first model without any issue, no matter which model is first.
import tensorflow as tf
import os
import numpy as np
predictionoutputfile = 'data\\prediction.csv'
predictioninputfile = 'data\\today.csv'
modelslist = 'data\\models.csv'
def predict(dirname,testfield,testper,threshold,prediction_OutFile):
with tf.Session() as sess:
print(dirname)
exported_path = 'imp\\exported\\' + dirname
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], exported_path)
# get the predictor , refer tf.contrib.predictor
predictor = tf.contrib.predictor.from_saved_model(exported_path)
with open(predictioninputfile) as inf:
# Skip header
#next(inf)
for line in inf:
# Read data, using python, into our features
var1,var2,var3,var4,var5 = line.strip().split(",")
# Create a feature_dict for train.example - Get Feature Columns using
feature_dict = {
'var1': _bytes_feature(value=var1.encode()),
'var2': _bytes_feature(value=var2.encode()),
'var3': _bytes_feature(value=var3.encode()),
'var4':_float_feature(value=int(var4)),
'var5':_float_feature(value=int(var5)),
}
# Prepare model input
model_input = tf.train.Example(features=tf.train.Features(feature=feature_dict))
model_input = model_input.SerializeToString()
output_dict = predictor({"inputs": [model_input]})
# Positive label = 1
if float(output_dict['scores'][0][1])>=float(threshold) :
prediction_OutFile.write(str(var1)+ "," + str(var2)+ "," + str(var3)+ "," + str(var4)+ "," + str(var5)+ ",")
label_index = tf.argmax(output_dict['scores'])
prediction_OutFile.write(str(output_dict['classes'][0][1]))
prediction_OutFile.write(',')
prediction_OutFile.write(str(output_dict['scores'][0][1]))
prediction_OutFile.write('\n')
def main():
prediction_OutFile = open(predictionoutputfile, 'w')
prediction_OutFile.write("model,SYMBOL,RECORDDATE,TESTFIELD,TESTPER,prediction,probability")
prediction_OutFile.write('\n')
with open(modelslist) as modlist:
#Skip header
next(modlist)
for mline in modlist:
try:
dirname = ''
modelname,datafield,dataper,testfield,testper,threshold,truepositive,falsepositive,truenegative,falsenegative,correct,incorrect,accuracy,precision = mline.strip().split(",")
# load the current model
predict(modelname,testfield,testper,threshold,prediction_OutFile)
# Read file and create feature_dict for each record
except:
print('error' + modelname)
prediction_OutFile.close()
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
if __name__ == "__main__":
main()
You can, just use tf.reset_default_graph
# some stuff
with tf.Session() as sess:
# more stuff
tf.reset_default_graph()
# some otherstuff (again)
with tf.Session() as sess:
# more other stuff
The elephant in the room: Why not using flags call the python script multiple times?

Tensorflow: define placeholders/operation name in image pipeline

I would like to save my trained Tensorflow model, so it can be deployed by restoring the model file (I'm following this example, which seems to make sense). To do this, however, I need to have named tensors, so that I can do reload the variables with something like:
graph = tf.get_default_graph()
w1 = graph.get_tensor_by_name("my_tensor:0")
I am queuing images from a list of filenames using string_input_producer (code below), but how do I name the tensors so that I can reload them at a later stage?
import tensorflow as tf
flags = tf.app.flags
conf = flags.FLAGS
class ImageDataSet(object):
def __init__(self, img_list_path, num_epoch, batch_size):
# Build the record list queue
input_file = open(images_list_path, 'r')
self.record_list = []
for line in input_file:
line = line.strip()
self.record_list.append(line)
filename_queue = tf.train.string_input_producer(self.record_list, num_epochs=num_epoch)
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
image = tf.image.decode_jpeg(image_file, conf.img_colour_channels)
# preprocess
# ...
min_after_dequeue = 1000
capacity = min_after_dequeue + 400 * batch_size
self.images = tf.train.shuffle_batch(image, batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue)
I assume that you want to restore the graph for testing or deploying.
For these purposes, you can edit your graph by insert a placeholder as an entrance of the testing data.
To edit the graph, you can use tf's graph editor, or build an new graph with placeholder and save it.

Categories

Resources