Tensorflow model training - python

I am having problem with creating my own model for recognizing pets on images. I'm following Oxford pet recognizer walkthrough but i'm stuck with this error.
python object_detection/dataset_tools/create_pet_tf_record.py --data_dir=/home/cyber/Downloads/broji/annotations --output_dir=/home/cyber/Downloads/broji/output
Traceback (most recent call last):
File "object_detection/dataset_tools/create_pet_tf_record.py", line 318, in <module>
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 124, in run
_sys.exit(main(argv))
File "object_detection/dataset_tools/create_pet_tf_record.py", line 271, in main
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/models/research/object_detection/utils/label_map_util.py", line 164, in get_label_map_dict
label_map = load_labelmap(label_map_path)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/models/research/object_detection/utils/label_map_util.py", line 133, in load_labelmap
label_map_string = fid.read()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/lib/io/file_io.py", line 119, in read
self._preread_check()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/lib/io/file_io.py", line 79, in _preread_check
compat.as_bytes(self.__name), 1024 * 512, status)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/errors_impl.py", line 473, in __exit__
c_api.TF_GetCode(self.status.status))
tensorflow.python.framework.errors_impl.NotFoundError: data/pet_label_map.pbtxt; No such file or directory

Related

Weird issue with running pretrained model with tensorflow

(tensorflow) C:\tensorflow\workspace\training_demo>python model_main_tf2.py --model_dir=C:/tensorflow/workspace/models/my_ssd_resnet50_v1_fpn --pipeline_config_path=C:/tensorflow/workspace/models/my_ssd_resnet50_v1_fpn/pipeline.config
From C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\object_detection\model_lib_v2.py:522: StrategyBase.experimental_distribute_datasets_from_function (from tensorflow.python.distribute.distribute_lib) is deprecated and will be removed in a future version.
Instructions for updating:
rename to distribute_datasets_from_function
Traceback (most recent call last):
File "model_main_tf2.py", line 113, in <module>
tf.compat.v1.app.run()
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\platform\app.py", line 40, in run
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\absl\app.py", line 300, in run
_run_main(main, args)
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\absl\app.py", line 251, in _run_main
sys.exit(main(argv))
File "model_main_tf2.py", line 104, in main
model_lib_v2.train_loop(
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\object_detection\model_lib_v2.py", line 522, in train_loop
train_input = strategy.experimental_distribute_datasets_from_function(
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\util\deprecation.py", line 340, in new_func
return func(*args, **kwargs)
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\distribute\distribute_lib.py", line 1143, in experimental_distribute_datasets_from_function
return self.distribute_datasets_from_function(dataset_fn, options)
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\distribute\distribute_lib.py", line 1134, in distribute_datasets_from_function
return self._extended._distribute_datasets_from_function( # pylint: disable=protected-access
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\distribute\mirrored_strategy.py", line 545, in _distribute_datasets_from_function
return input_lib.get_distributed_datasets_from_function(
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\distribute\input_lib.py", line 161, in get_distributed_datasets_from_function
return DistributedDatasetsFromFunction(dataset_fn, input_workers,
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\distribute\input_lib.py", line 1272, in __init__
_create_datasets_from_function_with_input_context(
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\distribute\input_lib.py", line 1936, in _create_datasets_from_function_with_input_context
dataset = dataset_fn(ctx)
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\object_detection\model_lib_v2.py", line 513, in train_dataset_fn
train_input = inputs.train_input(
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\object_detection\inputs.py", line 870, in train_input
dataset = INPUT_BUILDER_UTIL_MAP['dataset_build'](
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\object_detection\builders\dataset_builder.py", line 195, in build
decoder = decoder_builder.build(input_reader_config)
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\object_detection\builders\decoder_builder.py", line 52, in build
decoder = tf_example_decoder.TfExampleDecoder(
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\object_detection\data_decoders\tf_example_decoder.py", line 391, in __init__
_ClassTensorHandler(
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\object_detection\data_decoders\tf_example_decoder.py", line 88, in __init__
name_to_id = label_map_util.get_label_map_dict(
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\object_detection\utils\label_map_util.py", line 201, in get_label_map_dict
label_map = load_labelmap(label_map_path_or_proto)
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\object_detection\utils\label_map_util.py", line 168, in load_labelmap
label_map_string = fid.read()
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\lib\io\file_io.py", line 117, in read
self._preread_check()
File "C:\Users\bigbootytx\anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\lib\io\file_io.py", line 79, in _preread_check
self._read_buf = _pywrap_file_io.BufferedInputStream(
tensorflow.python.framework.errors_impl.InvalidArgumentError: NewRandomAccessFile failed to Create/Open: C: ensorflow\workspacennotations\labelmap.pbtxt : The filename, directory name, or volume label syntax is incorrect.
; no protocol option
So im getting this weird issue where its changing the path of the file its looking for on its own.
C: ensorflow\workspacennotations\labelmap.pbtxt
should be
C:tensorflow\workspace\annotations\labelmap.pbtxt
Been stuck on this issue for quite a while now, tried several things including changing to forward slashes and stuff. Any idea what to do here? I followed this guide: https://tensorflow-object-detection-api-tutorial.readthedocs.io/en/latest/training.html
\t in your path automatically replaces by tab. For prevent this on Windows, you need use double backslash (\\) as path delimiter: C:\\tensorflow\\...

Module 'cryptography.utils' has no attribute 'bit_length'

I am currently trying to figure out how to use netmiko to automate some of my routine work.Such as getting configuration backup, creating vlans etc.I've managed to use it on Aruba and Huawei Switches without problem.But on alcatel switch I'm facing this issue:
Unknown exception: module 'cryptography.utils' has no attribute 'bit_length'
Traceback (most recent call last):
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\paramiko\transport.py", line 2075, in run
self.kex_engine.parse_next(ptype, m)
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\paramiko\kex_gex.py", line 101, in parse_next
return self._parse_kexdh_gex_reply(m)
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\paramiko\kex_gex.py", line 281, in _parse_kexdh_gex_reply
self.transport._verify_key(host_key, sig)
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\paramiko\transport.py", line 1886, in _verify_key
if not key.verify_ssh_sig(self.H, Message(sig)):
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\paramiko\dsskey.py", line 148, in verify_ssh_sig
key = dsa.DSAPublicNumbers(
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\cryptography\hazmat\primitives\asymmetric\dsa.py", line 212, in public_key
return backend.load_dsa_public_numbers(self)
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\cryptography\hazmat\backends\openssl\backend.py", line 871, in load_dsa_public_numbers
dsa._check_dsa_parameters(numbers.parameter_numbers)
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\netmiko\tplink\tplink_jetstream.py", line 145, in _override_check_dsa_parameters
if crypto_utils.bit_length(parameters.q) not in [160, 256]:
AttributeError: module 'cryptography.utils' has no attribute 'bit_length'
Traceback (most recent call last):
File "C:\Users\melih.celik\Desktop\New_Backup\Yedek\Coding\Rand stuff\ssh_deneme(toplu).py", line 75, in
config_backup(cihaz_secim,ip_address,username,password)
File "C:\Users\melih.celik\Desktop\New_Backup\Yedek\Coding\Rand stuff\ssh_deneme(toplu).py", line 12, in config_backup
net_connect=ConnectHandler(**switch) #Baglanti kuruldu.
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\netmiko\ssh_dispatcher.py", line 312, in ConnectHandler
return ConnectionClass(*args, **kwargs)
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\netmiko\base_connection.py", line 346, in init
self._open()
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\netmiko\base_connection.py", line 351, in _open
self.establish_connection()
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\netmiko\base_connection.py", line 920, in establish_connection
self.remote_conn_pre.connect(**ssh_connect_params)
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\paramiko\client.py", line 406, in connect
t.start_client(timeout=timeout)
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\paramiko\transport.py", line 660, in start_client
raise e
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\paramiko\transport.py", line 2075, in run
self.kex_engine.parse_next(ptype, m)
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\paramiko\kex_gex.py", line 101, in parse_next
return self._parse_kexdh_gex_reply(m)
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\paramiko\kex_gex.py", line 281, in _parse_kexdh_gex_reply
self.transport._verify_key(host_key, sig)
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\paramiko\transport.py", line 1886, in _verify_key
if not key.verify_ssh_sig(self.H, Message(sig)):
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\paramiko\dsskey.py", line 148, in verify_ssh_sig
key = dsa.DSAPublicNumbers(
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\cryptography\hazmat\primitives\asymmetric\dsa.py", line 212, in public_key
return backend.load_dsa_public_numbers(self)
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\cryptography\hazmat\backends\openssl\backend.py", line 871, in load_dsa_public_numbers
dsa._check_dsa_parameters(numbers.parameter_numbers)
File "C:\Users\melih.celik\AppData\Local\Programs\Python\Python39\lib\site-packages\netmiko\tplink\tplink_jetstream.py", line 145, in _override_check_dsa_parameters
if crypto_utils.bit_length(parameters.q) not in [160, 256]:
AttributeError: module 'cryptography.utils' has no attribute 'bit_length'
Thanks for your help in advance.
It looks like tplink_jetstream.py assumes that a (now removed) helper function is available to it. The simplest fix here would be to go into that file and modify the line containing crypto_utils.bit_length(parameters.q) to read parameters.q.bit_length() instead.

facing coding error when want to train the model in google colab

the code I used was:
!python3 object_detection/model_main.py \
--pipeline_config_path=/gdrive/My\ Drive/object_detection/models/research/object_detection/samples/configs/ssd_mobilenet_v2_coco.config \
--model_dir=training/
and the errors I faced were:
**Traceback (most recent call last):
File "object_detection/model_main.py", line 109, in <module>
tf.app.run()
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/platform/app.py", line 40, in run
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
File "/usr/local/lib/python3.6/dist-packages/absl/app.py", line 299, in run
_run_main(main, args)
File "/usr/local/lib/python3.6/dist-packages/absl/app.py", line 250, in _run_main
sys.exit(main(argv))
File "object_detection/model_main.py", line 71, in main
FLAGS.sample_1_of_n_eval_on_train_examples))
File "/content/drive/My Drive/object_detection/models/research/object_detection/model_lib.py", line 617, in create_estimator_and_inputs
pipeline_config_path, config_override=config_override)
File "/content/drive/My Drive/object_detection/models/research/object_detection/utils/config_util.py", line 103, in get_configs_from_pipeline_file
proto_str = f.read()
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/lib/io/file_io.py", line 122, in read
self._preread_check()
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/lib/io/file_io.py", line 84, in _preread_check
compat.as_bytes(self.__name), 1024 * 512)
tensorflow.python.framework.errors_impl.NotFoundError:/gdrive/My Drive/object_detection/models/research/object_detection/samples/configs/ssd_mobilenet_v2_coco.config; No such file or directory
I have checked so many time that the path is already correct and connected, but don't know why still occur this error, please help!

Does tf_debug truly support slim?

I have written the following code:
tf.contrib.slim.learning.train(
...
...
session_wrapper=tensorflow.python.debug.LocalCLIDebugWrapperSession,
...)
While running the code, it reported:
......
......
2018-02-14 01:03:25.229477: I tensorflow/core/debug/debug_graph_utils.cc:229] For debugging, tfdbg is changing the parallel_iterations attribute of the Enter/RefEnter node "lstm/lstm_1/while/Enter_2" on device "/job:localhost/replica:0/task:0/device:CPU:0" from 32 to 1. (This does not affect subsequent non-debug runs.)
Traceback (most recent call last):
File "train_getimageid_ngch.py", line 147, in <module>
tf.app.run()
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "train_getimageid_ngch.py", line 143, in main
saver=saver)
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/contrib/slim/python/slim/learning.py", line 775, in train
sv.stop(threads, close_summary_writer=True)
File "/usr/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/training/supervisor.py", line 964, in managed_session
self.stop(close_summary_writer=close_summary_writer)
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/training/supervisor.py", line 792, in stop
stop_grace_period_secs=self._stop_grace_secs)
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/training/coordinator.py", line 389, in join
six.reraise(*self._exc_info_to_raise)
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/training/coordinator.py", line 296, in stop_on_exception
yield
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/training/coordinator.py", line 494, in run
self.run_loop()
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/training/supervisor.py", line 1068, in run_loop
global_step=self._sv.global_step)
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/training/saver.py", line 1549, in save
global_step = training_util.global_step(sess, global_step)
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/training/training_util.py", line 67, in global_step
return int(sess.run(global_step_tensor))
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/debug/wrappers/framework.py", line 543, in run
run_end_resp = self.on_run_end(run_end_req)
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/debug/wrappers/local_cli_wrapper.py", line 321, in on_run_end
self._dump_root, partition_graphs=partition_graphs)
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/debug/lib/debug_data.py", line 495, in __init__
self._load_all_device_dumps(partition_graphs, validate)
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/debug/lib/debug_data.py", line 517, in _load_all_device_dumps
self._load_partition_graphs(partition_graphs, validate)
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/debug/lib/debug_data.py", line 798, in _load_partition_graphs
self._validate_dump_with_graphs(debug_graph.device_name)
File "/home/ngaimanchow/tensorflow_virtualenv/local/lib/python2.7/site-packages/tensorflow/python/debug/lib/debug_data.py", line 843, in _validate_dump_with_graphs
"device %s." % (datum.node_name, device_name))
ValueError: Node name 'TFRecordReaderV2' is not found in partition graphs of device /job:localhost/replica:0/task:0/device:CPU:0.
Even it can start the tf debugger, when tried to print the value of a tensor, i.e. the pt command, it reported the error:
Error occured during handling of command: "print_tensor"
exceptions.KeyError /device:CPU:0
Is it possible for tf_debug to truly support slim? How can it be worked out? If not, is there any other way to debug or print the values of tensors when using tf.contrib.slim.learning.train?

Error with loadmat in scipy: could not read bytes

I had a problem with a project where I'm trying to use the loadmat function but get the above error:
File "project.py", line 107, in load_data
return loadmat(filename, appendmat=False)['data'].squeeze()
File "/home/xx/.virtualenvs/cv/local/lib/python3.5/site-packages/scipy/io/matlab/mio.py", line 136, in loadmat
matfile_dict = MR.get_variables(variable_names)
File "/home/xx/.virtualenvs/cv/local/lib/python3.5/site-packages/scipy/io/matlab/mio5.py", line 292, in get_variables
res = self.read_var_array(hdr, process)
File "/home/xx/.virtualenvs/cv/local/lib/python3.5/site-packages/scipy/io/matlab/mio5.py", line 252, in read_var_array
return self._matrix_reader.array_from_header(header, process)
File "scipy/io/matlab/mio5_utils.pyx", line 673, in scipy.io.matlab.mio5_utils.VarReader5.array_from_header (scipy/io/matlab/mio5_utils.c:7119)
File "scipy/io/matlab/mio5_utils.pyx", line 703, in scipy.io.matlab.mio5_utils.VarReader5.array_from_header (scipy/io/matlab/mio5_utils.c:6244)
File "scipy/io/matlab/mio5_utils.pyx", line 776, in scipy.io.matlab.mio5_utils.VarReader5.read_real_complex (scipy/io/matlab/mio5_utils.c:7572)
File "scipy/io/matlab/mio5_utils.pyx", line 448, in scipy.io.matlab.mio5_utils.VarReader5.read_numeric (scipy/io/matlab/mio5_utils.c:4323)
File "scipy/io/matlab/mio5_utils.pyx", line 353, in scipy.io.matlab.mio5_utils.VarReader5.read_element (scipy/io/matlab/mio5_utils.c:3913)
File "scipy/io/matlab/streams.pyx", line 92, in scipy.io.matlab.streams.GenericStream.read_string (scipy/io/matlab/streams.c:2182)
File "scipy/io/matlab/streams.pyx", line 79, in scipy.io.matlab.streams.GenericStream.read_into (scipy/io/matlab/streams.c:1977)
OSError: could not read bytes
I had import:
from scipy.io import loadmat

Categories

Resources