I am writing a python code to create tfrecord file which stores A and C as features, But I am having trouble to print out the A,C values from TFrecord file. Can anyone take a look at this?
#Writing TFrecord file
import tensorflow as tf
import numpy as np
writer=tf.python_io.TFRecordWriter('output.tfrecord')
A=[1,3,4]
C=[1.1, 2.1, 3.1]
feature_A=tf.train.Feature(int64_list=tf.train.Int64List(value=A))
feature_C=tf.train.Feature(float_list=tf.train.FloatList(value=C))
features={'A':feature_A, 'C':feature_C}
example=tf.train.Example(features=tf.train.Features(feature=features))
writer.write(example.SerializeToString())
writer.close()
#Read TFrecord file
import tensorflow as tf
reader=tf.TFRecordReader()
filename_queue = tf.train.string_input_producer(
["output.tfrecord"])
_, serialized_example = reader.read(filename_queue)
feature_set = { 'A': tf.FixedLenFeature([], tf.int64),
'C': tf.FixedLenFeature([], tf.float32)
}
features = tf.parse_single_example( serialized_example, features= feature_set )
A=features['A']
C=features['C']
with tf.Session() as sess:
print(sess.run([A,C])) # print out nothing
There are two issues:
FixedlenFeature has to have the size defined. So change to:
feature_set = { 'A': tf.FixedLenFeature([3], tf.int64),
'C': tf.FixedLenFeature([3], tf.float32)}
You need to start the queues for reading the inputs, so your code should look like:
with tf.Session() as sess:
# for the queues
init_op = tf.local_variables_initializer()
sess.run(init_op)
# Create a coordinator, launch the queue runner threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
for _ in range(1):
print(sess.run([A,C]))
except tf.errors.OutOfRangeError:
# When done, ask the threads to stop.
print('')
finally:
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
Related
I have very simple model which consists of one tf.Variable() and here is who code:
import tensorflow as tf
save_path="model1/model1.ckpt"
num_input = 2
n_nodes_hl1 = 2
with tf.variable_scope("model1"):
hidden_1_layer = {
'weights' : tf.Variable(tf.random_normal([num_input, n_nodes_hl1]), name='Weight1')
}
def train_model():
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
save_model(sess)
def save_model(sess):
saver = tf.train.Saver(tf.global_variables(), save_path)
saver.save(sess, save_path)
def load_model(sess):
saver = tf.train.Saver(tf.global_variables(), save_path)
saver.restore(sess, save_path)
def run_model():
print("model1 running...")
with tf.Session() as sess:
load_model(sess)
x = sess.run(hidden_1_layer)
print(x)
#train_model()
The second model is completely the same, but with changed names "model1" to "model2". Both models are trained, saved and work perfect separately. So now I want to test them using following script:
import model1 as m1
import model2 as m2
m1.run_model()
m2.run_model()
And here I got an error message:
NotFoundError (see above for traceback): Key model2/Weight2 not found in checkpoint
So it looks like running imports causes adding all variables to common graph (even though they are in separate variable scopes) and then it cannot find variable from model2 saved in checkpoint in model1.
Can anyone solve my problem?
Is it possible in Tensorflow to run a few different models in one script?
EDIT - PROBLEM SOLVED
The solution is very easy. What you have to do is to create separate graphs for each model like. It means that all tensors you declare or calculate must be within that graph. You also must put it as an argument in Session, like: tf.Session(graph=self.graph)
Whole example below:
import tensorflow as tf
save_path="model1/model1.ckpt"
class model1:
num_input = 2
n_nodes_hl1 = 2
def init(self):
self.graph = tf.Graph()
with self.graph.as_default():
with tf.variable_scope("model1"):
self.hidden_1_layer = {
'weights' : tf.Variable(tf.random_normal([self.num_input, self.n_nodes_hl1]), name='Weight1')
}
def train_model(self):
init = tf.global_variables_initializer()
with tf.Session(graph = self.graph) as sess:
sess.run(init)
self.save_model(sess)
def save_model(self, sess):
saver = tf.train.Saver(tf.global_variables(), save_path)
saver.save(sess, save_path)
def load_model(self, sess):
saver = tf.train.Saver(tf.global_variables(), save_path)
saver.restore(sess, save_path)
def run_model(self):
print("model1 running...")
with tf.Session(graph = self.graph) as sess:
self.load_model(sess)
x = sess.run(self.hidden_1_layer)
print(x)
Oh! the common "I want to use several models" question! just make sure that you reset the graph after each model:
tf.reset_default_graph()
Your code would look like:
import tensorflow as tf
import model1 as m1
m1.run_model()
tf.reset_default_graph()
import model2 as m2
m2.run_model()
Why? The moment you create a variable in tensorflow using tf.Variable, that variable is added to the default graph. If you import both models one after the other, you just created all the variables in the default graph! This is by far the easiest solution. Consider the default graph as a blackboard: you can draw your fancy ML model, but you need to wipe it clean before reuse!
NOTE: If you are wondering, the alternative is to create separate graphs for each of the models, but it is much more worrysome and I only recommend it for times when you must have both models at the same time.
EXTRA: Encapsulating your model in a Tensorflow class
A fancier way to do it while avoiding several graphs (seriously, it is horrible!) is to encapsulate the whole model in a class. Thus, your code would look like this:
import tensorflow as tf
class model():
self.num_input = 2
self.n_nodes_hl1 = 2
def init(self, new_save_path)
self.save_path=new_save_path
tf.reset_default_graph()
with tf.variable_scope("model1"):
self.hidden_1_layer = {
'weights' : tf.Variable(tf.random_normal([self.num_input,
self.n_nodes_hl1]), name='Weight1')
}
self.saver = tf.train.Saver(tf.global_variables(), self.save_path)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def save_model(self):
self.saver.save(self.sess, self.save_path)
def load_model(self):
self.saver.restore(self.sess, self.save_path)
def run_model(self):
print("model1 running...")
load_model()
x = sess.run(self.hidden_1_layer)
print(x)
#train_model(self)
This way you could simply do:
import model
m1 = model('model1/model1.ckpt') # These two lines could be put into one
m1.run_model() # m1 = model('model1/model1.ckpt').run_model()
m2 = model('model2/model2.ckpt')
m2.run_model()
You still want it in a for loop?
import model
model_file_list = ['model1/model1.ckpt', 'model2/model2.ckpt']
for model_file in model_list:
m = model(model_file ).run_model()
# Run tests, print stuff, save stuff here!
Im using the following code to do realtime inference from mobilenet
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
camera = cv2.VideoCapture(0)
camera.set(3, 1280)
camera.set(4, 1024)
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile('tf_model/output_labels.txt')]
gpu_options = tf.GPUOptions(allow_growth=True,per_process_gpu_memory_fraction=0.9)
sess_config = tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False,allow_soft_placement = True)
sess_config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess_config.gpu_options.allow_growth=True
def grabVideoFeed():
grabbed, frame = camera.read()
return frame if grabbed else None
def initialSetup():
with tf.device('/gpu:0'):
with tf.gfile.FastGFile('tf_model/output_graph.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
initialSetup()
with tf.Session(config= sess_config) as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
while True:
frame = grabVideoFeed()
//do the rest of classification
Though I have tried to use gpu, it is still not using the entire gpu. gpu usage is only 8%. how can I be able to sort this out?
I can't get the following string_input_producer-hello world program to run:
import tensorflow as tf
filename = tf.placeholder(dtype=tf.string, name='filename')
f_q = tf.train.string_input_producer(filename, num_epochs=1, shuffle=False)
filename_tf = f_q.dequeue()
with tf.Session() as S:
S.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
print(S.run(filename_tf, feed_dict={filename: "hello world"}))
coord.request_stop()
coord.join(threads)
Seems simple enough, but tf tells me in an error message that i need to pass a string value to placeholder 'filename' (which I do). Anyone gets what I'm doing wrong here? Thanks
Why does it say paper jam, when there is no paper jam!
This can work.
import tensorflow as tf
filename = ['hello world']
f_q = tf.train.string_input_producer(filename, num_epochs=1, shuffle=False)
filename_tf = f_q.dequeue()
with tf.Session() as S:
S.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
print(S.run(filename_tf))
coord.request_stop()
coord.join(threads)
because tf.train.string_input_producer returns a queue and it needs some real things to enqueue, then it will dequeue with some order.
Say I have defined a function that loads one label/features pair from a TfRecords file as follows
def read_one_image(tfrecords_path):
queue = tf.train.string_input_producer([tfrecords_path])
reader = tf.TFRecordReader()
key, value = reader.read(queue)
features = tf.parse_single_example(value,
features={'label': tf.FixedLenFeature([], tf.int64),
'image': tf.FixedLenFeature([784], tf.int64)})
label = features['label']
image = features['image']
return label, image
Fetching the images in a session works fine if I keep the session open:
tf.reset_default_graph()
label, image = read_one_image("mnist_train.tfrecords")
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
tf.train.start_queue_runners(sess=sess)
for i in range(10):
one_label, one_image = sess.run([label, image])
print(one_label, one_image.shape)
However, if I use a context manager like so
g = tf.Graph()
with g.as_default():
label, image = read_one_image("mnist_train.tfrecords")
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
tf.train.start_queue_runners(sess=sess)
for i in range(10):
one_label, one_image = sess.run([label, image])
print(one_label, one_image.shape)
I get an error: 7 ERROR:tensorflow:Exception in QueueRunner: Attempted to use a closed Session.(784,)
Maybe I am misunderstanding how the queue runner works, but since I called the sess.run method, it should have fetched a data pair 10 times. Now, is there a way to quit/exit/close the session without exhausting the queue?
You need to the tf.train.Coordinator
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
I am loading multiple jpeg images in tensorflow queue. The image files are all of different dimensions so I am using wholefileReader() to read my image files. I want to resize and crop the image_file through prep_image function but can't figure out how to do that.
Moreover, how can I get input batch through queue runner and process all that batch and run my classifier on it.
filename_queue = tf.train.string_input_producer(tf.train.match_filenames_once("path_to_image_files"))
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
image = tf.image.decode_jpeg(image_file)
image = prep_image(image)
with tf.Session() as sess:
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
image_ = sess.run([image])
print(type(image_))
print image_
image1 = prep_image(image_)
coord.request_stop()
coord.join(threads)