I want to run Python program using PyTorch. How should I make each tensor in batch equal? Because the following problem appears:
Traceback (most recent call last):
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 311, in <module>
fire.Fire(demo)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 138, in Fire
component_trace = _Fire(component, args, parsed_flag_args, context, name)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 468, in _Fire
target=component.__name__)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 672, in _CallAndUpdateTrace
component = fn(*varargs, **kwargs)
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 289, in demo
n_epochs=n_epochs, batch_size=batch_size, seed=seed)
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 168, in train
n_epochs=n_epochs,
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 42, in train_epoch
for batch_idx, (input, target) in enumerate(loader):
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\dataloader.py", line 346, in __next__
data = self._next_data()
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\dataloader.py", line 386, in _next_data
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\fetch.py", line 47, in fetch
return self.collate_fn(data)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 87, in default_collate
return [default_collate(samples) for samples in transposed]
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 87, in <listcomp>
return [default_collate(samples) for samples in transposed]
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 72, in default_collate
return default_collate([torch.as_tensor(b) for b in batch])
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 63, in default_collate
return torch.stack(batch, 0, out=out)
RuntimeError: stack expects each tensor to be equal size, but got [650] at entry 0 and [108] at entry 1
Related
I have an exception occurring in a for statement:
for _, data in enumerate(dataloader, 0):
Not in the body of the for statement, but in the for statement itself. How do I catch this and continue?
Here is the entire error trace:
Traceback (most recent call last):
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/reprex/run_DL.py", line 67, in <module>
ut.generate_validation_model(cfg)
File "/panfs/roc/groups/4/miran045/reine097/projects/AlexNet_Abrol2021/reprex/utils.py", line 227, in generate_validation_model
loss = train(trainloader, net, optimizer, criterion, cfg.cuda_avl)
File "/panfs/roc/groups/4/miran045/reine097/projects/AlexNet_Abrol2021/reprex/utils.py", line 96, in train
for _, data in enumerate(dataloader, 0):
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 521, in __next__
data = self._next_data()
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 1203, in _next_data
return self._process_data(data)
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 1229, in _process_data
data.reraise()
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/_utils.py", line 434, in reraise
raise exception
RuntimeError: Caught RuntimeError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop
data = fetcher.fetch(index)
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/fetch.py", line 52, in fetch
return self.collate_fn(data)
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/collate.py", line 84, in default_collate
return [default_collate(samples) for samples in transposed]
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/collate.py", line 84, in <listcomp>
return [default_collate(samples) for samples in transposed]
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/collate.py", line 64, in default_collate
return default_collate([torch.as_tensor(b) for b in batch])
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/collate.py", line 56, in default_collate
return torch.stack(batch, 0, out=out)
RuntimeError: stack expects each tensor to be equal size, but got [1, 208, 300, 320] at entry 0 and [1, 320, 300, 208] at entry 13
The error occurs on this line:
File "/panfs/roc/groups/4/miran045/reine097/projects/AlexNet_Abrol2021/reprex/utils.py", line 96, in train
for _, data in enumerate(dataloader, 0):
I am getting this error while running the training code of a model.
Traceback (most recent call last):
File "train.py", line 273, in <module>
train_loss[epoch - 1] = process_epoch(
File "train.py", line 240, in process_epoch
loss = loss_fn(model, batch)
File "train.py", line 221, in <lambda>
loss_fn = lambda model, batch: weak_loss(model, batch, normalization="softmax")
File "train.py", line 171, in weak_loss
corr4d = model(batch).to("cuda")
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/srtf/ncnet/lib/model.py", line 263, in forward
feature_A = self.FeatureExtraction(tnf_batch['source_image'])
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/srtf/ncnet/lib/model.py", line 84, in forward
features = self.model(image_batch)
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/container.py", line 100, in forward
input = module(input)
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/conv.py", line 353, in forward
return self._conv_forward(input, self.weight)
File "/home/srtf/anaconda3/envs/ncnet/lib/python3.8/site-packages/torch/nn/modules/conv.py", line 349, in _conv_forward
return F.conv2d(input, weight, self.bias, self.stride,
RuntimeError: Input type (torch.FloatTensor) and weight type (torch.cuda.FloatTensor) should be the same
Cuda is there on the system. Where do I need to make changes in the code?
Your input needs to be sent to the correct device:
>>> corr4d = model(batch.cuda())
Which will copy the batch to the GPU device ('cuda:0' by default).
There is only one categorical column and I want to encode it, it is working fine on notebook but when it is being uploaded to aicrowd platform it is creating this trouble.
There are totally 3 categorical features where one is the target feature, one is the row of ids and after excluding them for the training I am left with one feature.
df[['intersection_pos_rel_centre']]
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
df[['intersection_pos_rel_centre']]=le.fit_transform(df[['intersection_pos_rel_centre']])
df[['intersection_pos_rel_centre']]
My error is
Selecting runtime language: python
[NbConvertApp] Converting notebook predict.ipynb to notebook
[NbConvertApp] Executing notebook with kernel: python
Traceback (most recent call last):
File "/opt/conda/bin/jupyter-nbconvert", line 11, in <module>
sys.exit(main())
File "/opt/conda/lib/python3.8/site-packages/jupyter_core/application.py", line 254, in launch_instance
return super(JupyterApp, cls).launch_instance(argv=argv, **kwargs)
File "/opt/conda/lib/python3.8/site-packages/traitlets/config/application.py", line 845, in launch_instance
app.start()
File "/opt/conda/lib/python3.8/site-packages/nbconvert/nbconvertapp.py", line 350, in start
self.convert_notebooks()
File "/opt/conda/lib/python3.8/site-packages/nbconvert/nbconvertapp.py", line 524, in convert_notebooks
self.convert_single_notebook(notebook_filename)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/nbconvertapp.py", line 489, in convert_single_notebook
output, resources = self.export_single_notebook(notebook_filename, resources, input_buffer=input_buffer)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/nbconvertapp.py", line 418, in export_single_notebook
output, resources = self.exporter.from_filename(notebook_filename, resources=resources)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/exporter.py", line 181, in from_filename
return self.from_file(f, resources=resources, **kw)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/exporter.py", line 199, in from_file
return self.from_notebook_node(nbformat.read(file_stream, as_version=4), resources=resources, **kw)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/notebook.py", line 32, in from_notebook_node
nb_copy, resources = super().from_notebook_node(nb, resources, **kw)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/exporter.py", line 143, in from_notebook_node
nb_copy, resources = self._preprocess(nb_copy, resources)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/exporter.py", line 318, in _preprocess
nbc, resc = preprocessor(nbc, resc)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/preprocessors/base.py", line 47, in __call__
return self.preprocess(nb, resources)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/preprocessors/execute.py", line 79, in preprocess
self.execute()
File "/opt/conda/lib/python3.8/site-packages/nbclient/util.py", line 74, in wrapped
return just_run(coro(*args, **kwargs))
File "/opt/conda/lib/python3.8/site-packages/nbclient/util.py", line 53, in just_run
return loop.run_until_complete(coro)
File "/opt/conda/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "/opt/conda/lib/python3.8/site-packages/nbclient/client.py", line 553, in async_execute
await self.async_execute_cell(
File "/opt/conda/lib/python3.8/site-packages/nbconvert/preprocessors/execute.py", line 123, in async_execute_cell
cell, resources = self.preprocess_cell(cell, self.resources, cell_index)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/preprocessors/execute.py", line 146, in preprocess_cell
cell = run_sync(NotebookClient.async_execute_cell)(self, cell, index, store_history=self.store_history)
File "/opt/conda/lib/python3.8/site-packages/nbclient/util.py", line 74, in wrapped
return just_run(coro(*args, **kwargs))
File "/opt/conda/lib/python3.8/site-packages/nbclient/util.py", line 53, in just_run
return loop.run_until_complete(coro)
File "/opt/conda/lib/python3.8/site-packages/nest_asyncio.py", line 98, in run_until_complete
return f.result()
File "/opt/conda/lib/python3.8/asyncio/futures.py", line 178, in result
raise self._exception
File "/opt/conda/lib/python3.8/asyncio/tasks.py", line 280, in __step
result = coro.send(None)
File "/opt/conda/lib/python3.8/site-packages/nbclient/client.py", line 852, in async_execute_cell
self._check_raise_for_error(cell, exec_reply)
File "/opt/conda/lib/python3.8/site-packages/nbclient/client.py", line 760, in _check_raise_for_error
raise CellExecutionError.from_cell_and_msg(cell, exec_reply_content)
nbclient.exceptions.CellExecutionError: An error occurred while executing the following cell:
------------------
df[['intersection_pos_rel_centre']]
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
df[['intersection_pos_rel_centre']]=le.fit_transform(df[['intersection_pos_rel_centre']])
df[['intersection_pos_rel_centre']]
------------------
TypeError: argument must be a string or number
I have created a custom environment for reinforcement learning with tf-agents (not needed to answer this question), which works fine if I instantiate one thread by setting num_parallel_environments to 1, but throws infrequent and seemingly random errors like an IndexError inside random.shuffle(), when I increase num_parallel_environments to 50. Here's the code:
inside train.py
tf_env = tf_py_environment.TFPyEnvironment(
batched_py_environment.BatchedPyEnvironment(
[environment.CardGameEnv()] * num_parallel_environments))
inside my environment, this is run in threads
self.cardStack = getFullDeck()
random.shuffle(self.cardStack)
this is a normal function, imported in every thread class
def getFullDeck():
deck = []
for rank in Ranks:
for suit in Suits:
deck.append(Card(rank, suit))
return deck
And here's one of the possible errors:
Traceback (most recent call last):
File "e:\Users\tmp\.vscode\extensions\ms-python.python-2019.1.0\pythonFiles\ptvsd_launcher.py", line 45, in <module>
main(ptvsdArgs)
File "e:\Users\tmp\.vscode\extensions\ms-python.python-2019.1.0\pythonFiles\lib\python\ptvsd\__main__.py", line 348, in main
run()
File "e:\Users\tmp\.vscode\extensions\ms-python.python-2019.1.0\pythonFiles\lib\python\ptvsd\__main__.py", line 253, in run_file
runpy.run_path(target, run_name='__main__')
File "C:\Python37\lib\runpy.py", line 263, in run_path
pkg_name=pkg_name, script_name=fname)
File "C:\Python37\lib\runpy.py", line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File "C:\Python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "e:\Users\tmp\Documents\Programming\Neural Nets\Poker_AI\train_v2.py", line 320, in <module>
app.run(main)
File "C:\Python37\lib\site-packages\absl\app.py", line 300, in run
_run_main(main, args)
File "C:\Python37\lib\site-packages\absl\app.py", line 251, in _run_main
sys.exit(main(argv))
File "e:\Users\tmp\Documents\Programming\Neural Nets\Poker_AI\train_v2.py", line 315, in main
num_eval_episodes=FLAGS.num_eval_episodes)
File "E:\Users\tmp\AppData\Roaming\Python\Python37\site-packages\gin\config.py", line 1032, in wrapper
utils.augment_exception_message_and_reraise(e, err_str)
File "E:\Users\tmp\AppData\Roaming\Python\Python37\site-packages\gin\utils.py", line 49, in augment_exception_message_and_reraise
six.raise_from(proxy.with_traceback(exception.__traceback__), None)
File "<string>", line 3, in raise_from
File "E:\Users\tmp\AppData\Roaming\Python\Python37\site-packages\gin\config.py", line 1009, in wrapper
return fn(*new_args, **new_kwargs)
File "e:\Users\tmp\Documents\Programming\Neural Nets\Poker_AI\train_v2.py", line 251, in train_eval
collect_driver.run()
File "C:\Python37\lib\site-packages\tf_agents\drivers\dynamic_episode_driver.py", line 149, in run
maximum_iterations=maximum_iterations)
File "C:\Python37\lib\site-packages\tf_agents\utils\common.py", line 111, in with_check_resource_vars
return fn(*fn_args, **fn_kwargs)
File "C:\Python37\lib\site-packages\tf_agents\drivers\dynamic_episode_driver.py", line 180, in _run
name='driver_loop'
File "C:\Python37\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 2457, in while_loop_v2
return_same_structure=True)
File "C:\Python37\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 2689, in while_loop
loop_vars = body(*loop_vars)
File "C:\Python37\lib\site-packages\tf_agents\drivers\dynamic_episode_driver.py", line 103, in loop_body
next_time_step = self.env.step(action_step.action)
File "C:\Python37\lib\site-packages\tf_agents\environments\tf_environment.py", line 232, in step
return self._step(action)
File "C:\Python37\lib\site-packages\tensorflow\python\autograph\impl\api.py", line 232, in graph_wrapper
return func(*args, **kwargs)
File "C:\Python37\lib\site-packages\tf_agents\environments\tf_py_environment.py", line 218, in _step
_step_py, flat_actions, self._time_step_dtypes, name='step_py_func')
File "C:\Python37\lib\site-packages\tensorflow\python\ops\script_ops.py", line 488, in numpy_function
return py_func_common(func, inp, Tout, stateful=True, name=name)
File "C:\Python37\lib\site-packages\tensorflow\python\ops\script_ops.py", line 452, in py_func_common
result = func(*[x.numpy() for x in inp])
File "C:\Python37\lib\site-packages\tf_agents\environments\tf_py_environment.py", line 203, in _step_py
self._time_step = self._env.step(packed)
File "C:\Python37\lib\site-packages\tf_agents\environments\py_environment.py", line 174, in step
self._current_time_step = self._step(action)
File "C:\Python37\lib\site-packages\tf_agents\environments\batched_py_environment.py", line 140, in _step
zip(self._envs, unstacked_actions))
File "C:\Python37\lib\multiprocessing\pool.py", line 268, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "C:\Python37\lib\multiprocessing\pool.py", line 657, in get
raise self._value
File "C:\Python37\lib\multiprocessing\pool.py", line 121, in worker
result = (True, func(*args, **kwds))
File "C:\Python37\lib\multiprocessing\pool.py", line 44, in mapstar
return list(map(*args))
File "C:\Python37\lib\site-packages\tf_agents\environments\batched_py_environment.py", line 139, in <lambda>
lambda env_action: env_action[0].step(env_action[1]),
File "C:\Python37\lib\site-packages\tf_agents\environments\py_environment.py", line 174, in step
self._current_time_step = self._step(action)
File "e:\Users\tmp\Documents\Programming\Neural Nets\Poker_AI\environment.py", line 116, in _step
canRoundContinue = self._table.runUntilChoice(action)
File "e:\Users\tmp\Documents\Programming\Neural Nets\Poker_AI\table.py", line 326, in runUntilChoice
random.shuffle(self.cardStack)
File "C:\Python37\lib\random.py", line 278, in shuffle
x[i], x[j] = x[j], x[i]
IndexError: list index out of range
In call to configurable 'train_eval' (<function train_eval at 0x000002722713A158>)
I suspect this error occurs because the threads are changing the array simultaneously, but I do not see why this would be the case:
Everything happens inside a class instance and the array getFullDeck() is returning is recreated every time the function is called, so there should be no way multiple threads have access to the same reference, right?
tf_env = tf_py_environment.TFPyEnvironment(
batched_py_environment.BatchedPyEnvironment(
[environment.CardGameEnv()] * num_parallel_environments))
You are reusing the same environment for each of the parallel instances rather than creating a new environment for each one. You might want to try something like
tf_env = tf_py_environment.TFPyEnvironment(
batched_py_environment.BatchedPyEnvironment(
[environment.CardGameEnv() for _ in range(num_parallel_environments)]))
cuda 9.0
cudnn 7.5
python 3.5.2
tensorflow-gpu 1.8
I don't know where the error occurred, I also tried python 3.6.3. This error will also occur. Please help.
I am training model_main.py file, but I get the following error.
python model_main.py --model_dir=F:/cindy/cindybackup/tensorflow1/test/training -pipeline_config_path=F:/cindy/cindybackup/tensorflow1/test/data/faster_rcnn_inception_v2_pets.config --alsologtostderr --num_train_steps=1000 --num_eval_steps=10
It shows the following:
WARNING:tensorflow:Forced number of epochs for all eval validations to
be 1.
WARNING:tensorflow:Expected number of evaluation epochs is 1, but instead encountered eval_on_train_input_config.num_epochs = 0.
Overwriting num_epochs to 1.
WARNING:tensorflow:Using temporary folder as model directory: C:\Users\wyh\AppData\Local\Temp\tmplh3q4jn2
WARNING:tensorflow:Estimator's model_fn (.model_fn at 0x00000256FF7F1400>) includes
params argument, but params are not passed to Estimator.
WARNING:tensorflow:num_readers has been reduced to 1 to match input file shards.
Traceback (most recent call last):
File "model_main.py", line 109, in
tf.app.run()
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\platform\app.py",
line 126, in run
_sys.exit(main(argv))
File "model_main.py", line 105, in main
tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0])
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\training.py",
line 439, in train_and_evaluate
executor.run()
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\training.py",
line 518, in run
self.run_local()
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\training.py",
line 650, in run_local
hooks=train_hooks)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\estimator.py",
line 363, in train
loss = self._train_model(input_fn, hooks, saving_listeners)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\estimator.py",
line 843, in _train_model
return self._train_model_default(input_fn, hooks, saving_listeners)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\estimator.py",
line 853, in _train_model_default
input_fn, model_fn_lib.ModeKeys.TRAIN))
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\estimator.py",
line 691, in _get_features_and_labels_from_input_fn
result = self._call_input_fn(input_fn, mode)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\estimator\estimator.py",
line 798, in _call_input_fn
return input_fn(**kwargs)
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\inputs.py",
line 525, in _train_input_fn
batch_size=params['batch_size'] if params else train_config.batch_size)
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\builders\dataset_builder.py",
line 149, in build
dataset = data_map_fn(process_fn, num_parallel_calls=num_parallel_calls)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py",
line 853, in map
return ParallelMapDataset(self, map_func, num_parallel_calls)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py",
line 1870, in init
super(ParallelMapDataset, self).init(input_dataset, map_func)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py",
line 1839, in init
self._map_func.add_to_graph(ops.get_default_graph())
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\framework\function.py",
line 484, in add_to_graph
self._create_definition_if_needed()
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\framework\function.py",
line 319, in _create_definition_if_needed
self._create_definition_if_needed_impl()
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\framework\function.py",
line 336, in _create_definition_if_needed_impl
outputs = self._func(*inputs)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\data\ops\dataset_ops.py",
line 1804, in tf_map_func
ret = map_func(nested_args)
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\builders\dataset_builder.py",
line 130, in process_fn
processed_tensors = transform_input_data_fn(processed_tensors)
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\inputs.py",
line 515, in transform_and_pad_input_data_fn
tensor_dict=transform_data_fn(tensor_dict),
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\inputs.py",
line 129, in transform_input_data
tf.expand_dims(tf.to_float(image), axis=0))
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\meta_architectures\faster_rcnn_meta_arch.py",
line 543, in preprocess
parallel_iterations=self._parallel_iterations)
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\utils\shape_utils.py",
line 237, in static_or_dynamic_map_fn
outputs = [fn(arg) for arg in tf.unstack(elems)]
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\utils\shape_utils.py",
line 237, in
outputs = [fn(arg) for arg in tf.unstack(elems)]
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\core\preprocessor.py",
line 2264, in resize_to_range
lambda: _resize_portrait_image(image))
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\util\deprecation.py",
line 432, in new_func
return func(*args, **kwargs)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\ops\control_flow_ops.py",
line 2063, in cond
orig_res_t, res_t = context_t.BuildCondBranch(true_fn)
File "C:\Users\wyh\AppData\Local\conda\conda\envs\py352\lib\site-packages\tensorflow\python\ops\control_flow_ops.py",
line 1913, in BuildCondBranch
original_result = fn()
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\core\preprocessor.py",
line 2263, in
lambda: _resize_landscape_image(image),
File "F:\cindy\cindybackup\tensorflow1\models\research\object_detection\core\preprocessor.py",
line 2245, in _resize_landscape_image
align_corners=align_corners, preserve_aspect_ratio=True)
TypeError: resize_images() got an unexpected keyword argument 'preserve_aspect_ratio'
Thanks~
The problem is not resolved yet in tensorflow models(https://github.com/tensorflow/models/)
I just removed the preserve_aspect_ratio in object_detection/core/preprocessor.py
align_corners=align_corners, preserve_aspect_ratio=True)
align_corners=align_corners)