Tensorflow Object Detection API multi-scale inference variable reuse - python

I was trying to build multi-scale object detection inference code based on Tensorflow Object Detection API. However I don't quite know how to get around with variable reuse issue when predicting boxes on difference scale of a same image in one session. Here's what I did and where I don't understand:
In https://github.com/tensorflow/models/blob/master/research/object_detection/evaluator.py, I duplicate the line 'prediction_dict = model.predict(preprocessed_image, true_image_shapes)' one more time as you can see below.
def _extract_predictions_and_losses(model,
create_input_dict_fn,
ignore_groundtruth=False):
"""Constructs tensorflow detection graph and returns output tensors.
Args:
model: model to perform predictions with.
create_input_dict_fn: function to create input tensor dictionaries.
ignore_groundtruth: whether groundtruth should be ignored.
Returns:
prediction_groundtruth_dict: A dictionary with postprocessed tensors (keyed
by standard_fields.DetectionResultsFields) and optional groundtruth
tensors (keyed by standard_fields.InputDataFields).
losses_dict: A dictionary containing detection losses. This is empty when
ignore_groundtruth is true.
"""
input_dict = create_input_dict_fn()
prefetch_queue = prefetcher.prefetch(input_dict, capacity=500)
input_dict = prefetch_queue.dequeue()
original_image = tf.expand_dims(input_dict[fields.InputDataFields.image], 0)
preprocessed_image, true_image_shapes = model.preprocess(
tf.to_float(original_image))
prediction_dict1 = model.predict(preprocessed_image, true_image_shapes)
/****Some code to resize preprocessed_image****/
prediction_dict2 = model.predict(preprocessed_image, true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
groundtruth = None
losses_dict = {}
if not ignore_groundtruth:
This gives me the following error:
Traceback (most recent call last):
File "object_detection/eval_fddb.py", line 167, in <module>
tf.app.run()
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "object_detection/eval_fddb.py", line 163, in main
FLAGS.checkpoint_dir, FLAGS.eval_dir)
File "/local/mnt/workspace/chris/projects/models/object_detection/evaluator_fddb.py", line 261, in evaluate
create_input_dict_fn=create_input_dict_fn)
File "/local/mnt/workspace/chris/projects/models/object_detection/evaluator_fddb.py", line 187, in _extract_prediction_tensors
prediction_dict = model.predict(preprocessed_image)
File "/local/mnt/workspace/chris/projects/models/object_detection/meta_architectures/umd_meta_arch.py", line 362, in predict
image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs)
File "/local/mnt/workspace/chris/projects/models/object_detection/meta_architectures/umd_meta_arch.py", line 278, in _extract_rpn_feature_maps
preprocessed_inputs, scope=self.first_stage_feature_extractor_scope)
File "/local/mnt/workspace/chris/projects/models/object_detection/meta_architectures/faster_rcnn_meta_arch.py", line 154, in extract_proposal_features_w_internal_layers
return self._extract_proposal_features_w_internal_layers(preprocessed_inputs, scope)
File "/local/mnt/workspace/chris/projects/models/object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py", line 173, in _extract_proposal_features_w_internal_layers
scope=var_scope)
File "/local/mnt/workspace/chris/projects/models/slim/nets/resnet_v1.py", line 300, in resnet_v1_101
reuse=reuse, scope=scope)
File "/local/mnt/workspace/chris/projects/models/slim/nets/resnet_v1.py", line 214, in resnet_v1
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
File "/local/mnt/workspace/chris/projects/models/slim/nets/resnet_utils.py", line 122, in conv2d_same
rate=rate, padding='VALID', scope=scope)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/framework/python/ops/arg_scope.py", line 181, in func_with_args
return func(*args, **current_args)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/layers/python/layers/layers.py", line 1027, in convolution
outputs = layer.apply(inputs)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/python/layers/base.py", line 503, in apply
return self.__call__(inputs, *args, **kwargs)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/python/layers/base.py", line 443, in __call__
self.build(input_shapes[0])
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/python/layers/convolutional.py", line 137, in build
dtype=self.dtype)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/python/layers/base.py", line 383, in add_variable
trainable=trainable and self.trainable)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 1065, in get_variable
use_resource=use_resource, custom_getter=custom_getter)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 962, in get_variable
use_resource=use_resource, custom_getter=custom_getter)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 360, in get_variable
validate_shape=validate_shape, use_resource=use_resource)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/layers/python/layers/layers.py", line 1561, in layer_variable_getter
return _model_variable_getter(getter, *args, **kwargs)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/layers/python/layers/layers.py", line 1553, in _model_variable_getter
custom_getter=getter, use_resource=use_resource)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/framework/python/ops/arg_scope.py", line 181, in func_with_args
return func(*args, **current_args)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/framework/python/ops/variables.py", line 261, in model_variable
use_resource=use_resource)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/framework/python/ops/arg_scope.py", line 181, in func_with_args
return func(*args, **current_args)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/framework/python/ops/variables.py", line 216, in variable
use_resource=use_resource)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 352, in _true_getter
use_resource=use_resource)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/variable_scope.py", line 664, in _get_single_variable
name, "".join(traceback.format_list(tb))))
ValueError: Variable FirstStageFeatureExtractor/resnet_v1_101/conv1/weights already exists, disallowed. Did you mean to set reuse=True in VarScope? Originally defined at:
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/framework/python/ops/variables.py", line 216, in variable
use_resource=use_resource)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/framework/python/ops/arg_scope.py", line 181, in func_with_args
return func(*args, **current_args)
File "/local/mnt/workspace/chris/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/framework/python/ops/variables.py", line 261, in model_variable
use_resource=use_resource)
My understanding is that when each time I call model.predict(), it creates a model with all variables. But in the second time, I can't create all the variables because all of them have been existing with the same name. I tried to add an variable scope with 'reuse=True' for the second model.predict(), but it wouldn't load the variables at all because mismatch names.
My questions is:
How to get around with this by running the same model on two different scale images in one sess.run()?
Thank you very much.

Have you tried using tf.variable_scope() before you build the model?
with tf.variable_scope('first_prediction'):
prediction_dict1 = model.predict(preprocessed_image, true_image_shapes)
with tf.variable_scope('second_prediction'):
prediction_dict2 = model.predict(preprocessed_image, true_image_shapes)
This way both model should have different prefix to their original variable name, hence preventing the reuse problem.

Related

Can't load the model from checkpoint

Using object detection API and mask-rcnn from the model zoo, when I start training with the default command:
python model_main_tf2.py --pipeline_config_path=pipeline.config --model_dir=model/v1/
I get this error
Two checkpoint references resolved to different objects (<tensorflow.python.keras.layers.convolutional.Conv2D object at 0x7f5e94075c10> and <tensorflow.python.keras.layers.convolutional.Conv2D object at 0x7f5e9403b710>).
Traceback (most recent call last):
File "model_main_tf2.py", line 115, in <module>
tf.compat.v1.app.run()
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/platform/app.py", line 40, in run
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
File "/opt/anaconda/envs/tensorflow2.3/lib/python3.7/site-packages/absl/app.py", line 300, in run
_run_main(main, args)
File "/opt/anaconda/envs/tensorflow2.3/lib/python3.7/site-packages/absl/app.py", line 251, in _run_main
sys.exit(main(argv))
File "/home/TF/workspace/Antenna/model_main_tf2.py", line 112, in main
record_summaries=FLAGS.record_summaries)
File "/home/TF/models/research/object_detection/model_lib_v2.py", line 603, in train_loop
train_input, unpad_groundtruth_tensors)
File "/home/TF/models/research/object_detection/model_lib_v2.py", line 401, in load_fine_tune_checkpoint
checkpoint_path).expect_partial().assert_existing_objects_matched()
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/training/tracking/util.py", line 1721, in restore
status = self._saver.restore(save_path=save_path)
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/training/tracking/util.py", line 1320, in restore
checkpoint=checkpoint, proto_id=0).restore(self._graph_view.root)
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/training/tracking/base.py", line 209, in restore
restore_ops = trackable._restore_from_checkpoint_position(self) # pylint: disable=protected-access
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/training/tracking/base.py", line 914, in _restore_from_checkpoint_position
tensor_saveables, python_saveables))
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/training/tracking/util.py", line 297, in restore_saveables
validated_saveables).restore(self.save_path_tensor, self.options)
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/training/saving/functional_saver.py", line 340, in restore
restore_ops = restore_fn()
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/training/saving/functional_saver.py", line 316, in restore_fn
restore_ops.update(saver.restore(file_prefix, options))
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/training/saving/functional_saver.py", line 111, in restore
restored_tensors, restored_shapes=None)
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/distribute/values.py", line 890, in restore
for v in self._mirrored_variable.values))
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/distribute/values.py", line 890, in <genexpr>
for v in self._mirrored_variable.values))
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/distribute/values_util.py", line 195, in assign_on_device
return variable.assign(tensor)
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/ops/resource_variable_ops.py", line 858, in assign
self._shape.assert_is_compatible_with(value_tensor.shape)
File "/home/.local/lib/python3.7/site-packages/tensorflow/python/framework/tensor_shape.py", line 1134, in assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (1, 1, 1088, 256) and (1, 1, 1024, 512) are incompatible
I use the default config from tf-api models/research/object-detection/configs
I have already tried to fix it by using config-file from the model zoo or fine_tune_checkpoint_type: "detection"
The input shape in the pipeline.config file doesn't match the one from the model checkpoint

How to solve the problem of PyTorch stack?

I want to run Python program using PyTorch. How should I make each tensor in batch equal? Because the following problem appears:
Traceback (most recent call last):
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 311, in <module>
fire.Fire(demo)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 138, in Fire
component_trace = _Fire(component, args, parsed_flag_args, context, name)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 468, in _Fire
target=component.__name__)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 672, in _CallAndUpdateTrace
component = fn(*varargs, **kwargs)
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 289, in demo
n_epochs=n_epochs, batch_size=batch_size, seed=seed)
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 168, in train
n_epochs=n_epochs,
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 42, in train_epoch
for batch_idx, (input, target) in enumerate(loader):
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\dataloader.py", line 346, in __next__
data = self._next_data()
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\dataloader.py", line 386, in _next_data
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\fetch.py", line 47, in fetch
return self.collate_fn(data)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 87, in default_collate
return [default_collate(samples) for samples in transposed]
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 87, in <listcomp>
return [default_collate(samples) for samples in transposed]
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 72, in default_collate
return default_collate([torch.as_tensor(b) for b in batch])
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 63, in default_collate
return torch.stack(batch, 0, out=out)
RuntimeError: stack expects each tensor to be equal size, but got [650] at entry 0 and [108] at entry 1

<Tensorflow object detection>TypeError: resize_images() got an unexpected keyword argument 'preserve_aspect_ratio'

cuda 9.0
cudnn 7.5
python 3.5.2
tensorflow-gpu 1.8
I don't know where the error occurred, I also tried python 3.6.3. This error will also occur. Please help.
I am training model_main.py file, but I get the following error.
python model_main.py --model_dir=F:/cindy/cindybackup/tensorflow1/test/training -pipeline_config_path=F:/cindy/cindybackup/tensorflow1/test/data/faster_rcnn_inception_v2_pets.config --alsologtostderr --num_train_steps=1000 --num_eval_steps=10
It shows the following:
WARNING:tensorflow:Forced number of epochs for all eval validations to
be 1.
WARNING:tensorflow:Expected number of evaluation epochs is 1, but instead encountered eval_on_train_input_config.num_epochs = 0.
Overwriting num_epochs to 1.
WARNING:tensorflow:Using temporary folder as model directory: C:\Users\wyh\AppData\Local\Temp\tmplh3q4jn2
WARNING:tensorflow:Estimator's model_fn (.model_fn at 0x00000256FF7F1400>) includes
params argument, but params are not passed to Estimator.
WARNING:tensorflow:num_readers has been reduced to 1 to match input file shards.
Traceback (most recent call last):
File "model_main.py", line 109, in
tf.app.run()
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\platform\app.py",
line 126, in run
_sys.exit(main(argv))
File "model_main.py", line 105, in main
tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\training.py",
line 439, in train_and_evaluate
executor.run()
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\training.py",
line 518, in run
self.run_local()
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\training.py",
line 650, in run_local
hooks=train_hooks)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\estimator.py",
line 363, in train
loss = self._train_model(input_fn, hooks, saving_listeners)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\estimator.py",
line 843, in _train_model
return self._train_model_default(input_fn, hooks, saving_listeners)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\estimator.py",
line 853, in _train_model_default
input_fn, model_fn_lib.ModeKeys.TRAIN))
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\estimator.py",
line 691, in _get_features_and_labels_from_input_fn
result = self._call_input_fn(input_fn, mode)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\estimator.py",
line 798, in _call_input_fn
return input_fn(**kwargs)
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\inputs.py",
line 525, in _train_input_fn
batch_size=params['batch_size'] if params else train_config.batch_size)
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\builders\dataset_builder.py",
line 149, in build
dataset = data_map_fn(process_fn, num_parallel_calls=num_parallel_calls)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py",
line 853, in map
return ParallelMapDataset(self, map_func, num_parallel_calls)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py",
line 1870, in init
super(ParallelMapDataset, self).init(input_dataset, map_func)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py",
line 1839, in init
self._map_func.add_to_graph(ops.get_default_graph())
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\framework\function.py",
line 484, in add_to_graph
self._create_definition_if_needed()
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\framework\function.py",
line 319, in _create_definition_if_needed
self._create_definition_if_needed_impl()
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\framework\function.py",
line 336, in _create_definition_if_needed_impl
outputs = self._func(*inputs)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py",
line 1804, in tf_map_func
ret = map_func(nested_args)
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\builders\dataset_builder.py",
line 130, in process_fn
processed_tensors = transform_input_data_fn(processed_tensors)
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\inputs.py",
line 515, in transform_and_pad_input_data_fn
tensor_dict=transform_data_fn(tensor_dict),
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\inputs.py",
line 129, in transform_input_data
tf.expand_dims(tf.to_float(image), axis=0))
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\meta_architectures\faster_rcnn_meta_arch.py",
line 543, in preprocess
parallel_iterations=self._parallel_iterations)
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\utils\shape_utils.py",
line 237, in static_or_dynamic_map_fn
outputs = [fn(arg) for arg in tf.unstack(elems)]
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\utils\shape_utils.py",
line 237, in
outputs = [fn(arg) for arg in tf.unstack(elems)]
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\core\preprocessor.py",
line 2264, in resize_to_range
lambda: _resize_portrait_image(image))
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\util\deprecation.py",
line 432, in new_func
return func(*args, **kwargs)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\ops\control_flow_ops.py",
line 2063, in cond
orig_res_t, res_t = context_t.BuildCondBranch(true_fn)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\ops\control_flow_ops.py",
line 1913, in BuildCondBranch
original_result = fn()
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\core\preprocessor.py",
line 2263, in
lambda: _resize_landscape_image(image),
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\core\preprocessor.py",
line 2245, in _resize_landscape_image
align_corners=align_corners, preserve_aspect_ratio=True)
TypeError: resize_images() got an unexpected keyword argument 'preserve_aspect_ratio'
Thanks~
The problem is not resolved yet in tensorflow models(https://github.com/tensorflow/models/)
I just removed the preserve_aspect_ratio in object_detection/core/preprocessor.py
align_corners=align_corners, preserve_aspect_ratio=True)
align_corners=align_corners)

Tensorflow Dataset map: input seems to be a placeholder which causes an error in tf.read_file

I'm trying to read in data using the Tensorflow Dataset API. I have loaded filenames and label filenames into arrays which I load into a dataset. I then try to map these filenames to the actual image files, but get an error that seems to state that the input to the mapping function recieves placeholders rather than actual tensors.
class DatasetReader:
def __init__(self, records_list, batch_size=1):
self.batch_size = batch_size
self.records = {}
self.records["image"] = tf.convert_to_tensor([record['image'] for record in records_list])
self.records["filename"] = tf.convert_to_tensor([record['filename'] for record in records_list])
self.records["annotation"] = tf.convert_to_tensor([record['annotation'] for record in records_list])
self.dataset = Dataset.from_tensor_slices(self.records)
self.dataset = self.dataset.map(self._input_parser)
self.dataset = self.dataset.batch(batch_size)
self.dataset = self.dataset.repeat()
def _input_parser(self, record):
filename = record['filename']
image_name = record['image']
annotation_file = record['annotation']
image = tf.image.decode_image(tf.read_file(filename))
annotation = tf.image.decode_image(tf.read_file(annotation_file))
return self._augment_image(image, annotation)
The error I'm getting is in the line image = tf.image.decode_image(tf.read_file(filename)). The stack trace is below.
File "FCN.py", line 269, in <module>
tf.app.run()
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "FCN.py", line 179, in main
train_records, valid_records, image_options_train, image_options_val, FLAGS.batch_size, FLAGS.batch_size)
File "/home/ubuntu/FCN.tensorflow/TFReader.py", line 89, in from_records
train_reader = DatasetReader(train_records, train_image_options, train_batch_size)
File "/home/ubuntu/FCN.tensorflow/TFReader.py", line 34, in __init__
self.dataset = self.dataset.map(self._input_parser)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/data/python/ops/dataset_ops.py", line 964, in map
return MapDataset(self, map_func, num_threads, output_buffer_size)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/data/python/ops/dataset_ops.py", line 1735, in __init__
self._map_func.add_to_graph(ops.get_default_graph())
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/function.py", line 449, in add_to_graph
self._create_definition_if_needed()
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/data/python/framework/function.py", line 168, in _create_definition_if_needed
outputs = self._func(*inputs)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/data/python/ops/dataset_ops.py", line 1723, in tf_map_func
ret = map_func(nested_args)
File "/home/ubuntu/FCN.tensorflow/TFReader.py", line 42, in _input_parser
image = tf.image.decode_image(tf.read_file(filename))
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/gen_io_ops.py", line 223, in read_file
result = _op_def_lib.apply_op("ReadFile", filename=filename, name=name)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 767, in apply_op
op_def=op_def)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/contrib/data/python/framework/function.py", line 80, in create_op
data_types, **kwargs)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/function.py", line 665, in create_op
**kwargs)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2632, in create_op
set_shapes_for_outputs(ret)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1911, in set_shapes_for_outputs
shapes = shape_func(op)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1861, in call_with_requiring
return call_cpp_shape_fn(op, require_shape_fn=True)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/common_shapes.py", line 595, in call_cpp_shape_fn
require_shape_fn)
File "/home/ubuntu/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/common_shapes.py", line 659, in _call_cpp_shape_fn_impl
raise ValueError(err.message)
ValueError: Shape must be rank 0 but is rank 1 for 'ReadFile' (op: 'ReadFile') with input shapes: [?].
You cannot pass in a rank-1 tensor to tf.read_file. Here are some examples:
import tensorflow as tf
# Correct: input can be a string.
tf.image.decode_image(tf.read_file("filename"))
# Correct: input can be a rank-0 tensor.
tf.image.decode_image(tf.read_file(tf.convert_to_tensor("filename")))
# Wrong: input cannot be a list.
tf.image.decode_image(tf.read_file(["filename"]))
# Wrong: input cannot be a rank-1 tensor
tf.image.decode_image(tf.read_file(tf.convert_to_tensor(["filename"])))
In your code, it seems like self.records["filename"] is a rank-1 tensor; you might mistakenly passed it as a parameter to tf.read_file in _input_parser

Share weights in coupled gans - Tensorflow

I am using the tensorflow implementation of CoGAN. I could change which layers can share the weights and which couldn't, except for the first layer of discriminator. I am not sure of what is going wrong here.
The error part of code is
h0 = prelu(self.d_bn0(conv2d(image, self.c_dim, name='d'+branch+'_h0_conv', reuse=share_params), reuse=share_params),
name='d'+branch+'_h0_prelu', reuse=share_params)
where 'reuse=share_params' should allow to share weights between the two branches. Should this be dealt in a different manner?
The Traceback looks like:
Traceback (most recent call last): File "main.py", line 51, in <module>
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 43, in run
sys.exit(main(sys.argv[:1] + flags_passthrough))
File "main.py", line 41, in main
dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)
File "/home/iiitd/Desktop/CoGAN-tensorflow-master/model.py", line 70, in __init__
self.build_model()
File "/home/iiitd/Desktop/CoGAN-tensorflow-master/model.py", line 101, in build_model
self.D2_logits, self.D2 = self.discriminator(self.images2, self.y, share_params=True, reuse=False, name='D2')
File "/home/iiitd/Desktop/CoGAN-tensorflow-master/model.py", line 287, in discriminator
h0 = prelu(self.d_bn0(conv2d(image, self.c_dim, name='d'+branch+'_h0_conv', reuse=share_params), reuse=share_params),
File "/home/iiitd/Desktop/CoGAN-tensorflow-master/ops.py", line 82, in conv2d
initializer=tf.truncated_normal_initializer(stddev=stddev))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 1024, in get_variable
custom_getter=custom_getter)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 850, in get_variable
custom_getter=custom_getter)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 346, in get_variable
validate_shape=validate_shape)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 331, in _true_getter
caching_device=caching_device, validate_shape=validate_shape)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 650, in _get_single_variable
"VarScope?" % name)
ValueError: Variable d2_h0_conv/w does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?

Categories

Resources