Exception occurs in for statement itself - python

I have an exception occurring in a for statement:
for _, data in enumerate(dataloader, 0):
Not in the body of the for statement, but in the for statement itself. How do I catch this and continue?
Here is the entire error trace:
Traceback (most recent call last):
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/reprex/run_DL.py", line 67, in <module>
ut.generate_validation_model(cfg)
File "/panfs/roc/groups/4/miran045/reine097/projects/AlexNet_Abrol2021/reprex/utils.py", line 227, in generate_validation_model
loss = train(trainloader, net, optimizer, criterion, cfg.cuda_avl)
File "/panfs/roc/groups/4/miran045/reine097/projects/AlexNet_Abrol2021/reprex/utils.py", line 96, in train
for _, data in enumerate(dataloader, 0):
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 521, in __next__
data = self._next_data()
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 1203, in _next_data
return self._process_data(data)
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 1229, in _process_data
data.reraise()
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/_utils.py", line 434, in reraise
raise exception
RuntimeError: Caught RuntimeError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop
data = fetcher.fetch(index)
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/fetch.py", line 52, in fetch
return self.collate_fn(data)
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/collate.py", line 84, in default_collate
return [default_collate(samples) for samples in transposed]
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/collate.py", line 84, in <listcomp>
return [default_collate(samples) for samples in transposed]
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/collate.py", line 64, in default_collate
return default_collate([torch.as_tensor(b) for b in batch])
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/collate.py", line 56, in default_collate
return torch.stack(batch, 0, out=out)
RuntimeError: stack expects each tensor to be equal size, but got [1, 208, 300, 320] at entry 0 and [1, 320, 300, 208] at entry 13
The error occurs on this line:
File "/panfs/roc/groups/4/miran045/reine097/projects/AlexNet_Abrol2021/reprex/utils.py", line 96, in train
for _, data in enumerate(dataloader, 0):

Related

KeyError: Caught KeyError in DataLoader worker process 0

I am facing this error which is related to the dictionary of data loader for (MSVD dataset) while running training file for video_captioning on https://github.com/nasib-ullah/video-captioning-models-in-Pytorch
//////////////////////////////////////////////////////
code of getitem function :
def __getitem__(self,idx):
anno = random.choice(self.annotation_dict[self.v_name_list[idx]])
anno_index = []
for word in anno.split(' '):
try:
anno_index.append(self.voc.word2index[word])
except:
pass
if self.opt_truncate_caption:
if len(anno_index)> self.max_caption_length:
anno_index = anno_index[:self.max_caption_length]
anno_index = anno_index + [self.voc.cfg.EOS_token]
appearance_tensor = torch.tensor(self.appearance_feature_dict[self.v_name_list[idx]]).float()
if self.motion_feature_dict == None:
motion_tensor = torch.zeros_like(appearance_tensor)
else:
motion_tensor = torch.tensor(self.motion_feature_dict[self.v_name_list[idx]]).float()
if self.object_feature_dict == None:
object_tensor = torch.zeros_like(appearance_tensor)
else:
object_tensor = torch.tensor(self.object_feature_dict[self.v_name_list[idx]]).float()
return appearance_tensor,anno_index, self.v_name_list[idx],motion_tensor,object_tensor
Traceback (most recent call last):
File "/home/adel/Downloads/video-captioning-models-in-Pytorch-main/untitled0.py", line 84, in <module>
loss_train,ac_loss = model.train_epoch(train_loader,utils)
File "/home/adel/Downloads/video-captioning-models-in-Pytorch-main/models/MARN/model.py", line 321, in train_epoch
for data in dataloader:
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 435, in __next__
data = self._next_data()
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1085, in _next_data
return self._process_data(data)
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1111, in _process_data
data.reraise()
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/_utils.py", line 428, in reraise
raise self.exc_type(msg)
KeyError: Caught KeyError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/utils/data/_utils/worker.py", line 198, in _worker_loop
data = fetcher.fetch(index)
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 44, in <listcomp>
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/home/adel/Downloads/video-captioning-models-in-Pytorch-main/data.py", line 88, in __getitem__
appearance_tensor = torch.tensor(self.appearance_feature_dict[self.v_name_list[idx]]).float()
KeyError: 'vid1'
////////////////////////////////////////////////////////
keys of dictionary
Keys: <KeysViewHDF5 ['vid1', 'vid10', 'vid100', 'vid1000', 'vid1001', 'vid1002', 'vid1003', 'vid1004', 'vid1005', 'vid1006', ...
<HDF5 dataset "vid1": shape (28, 1536), type "<f4">

File contains data in an unknown format. (m4a load from librosa)

So I am currently working on a DNN that takes in m4a files. I have ffmpeg, it creates a few batches and then dies on this error:
Traceback (most recent call last):
File "/users/work/s163838/./main.py", line 126, in <module>
File "/users/work/s163838/./main.py", line 96, in main
print("e")
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 521, in __next__
data = self._next_data()
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 1203, in _next_data
return self._process_data(data)
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 1229, in _process_data
data.reraise()
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/_utils.py", line 425, in reraise
raise self.exc_type(msg)
EOFError: Caught EOFError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/librosa/core/audio.py", line 164, in load
y, sr_native = __soundfile_load(path, offset, duration, dtype)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/librosa/core/audio.py", line 195, in __soundfile_load
context = sf.SoundFile(path)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/soundfile.py", line 629, in __init__
self._file = self._open(file, mode_int, closefd)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/soundfile.py", line 1183, in _open
_error_check(_snd.sf_error(file_ptr),
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/soundfile.py", line 1357, in _error_check
raise RuntimeError(prefix + _ffi.string(err_str).decode('utf-8', 'replace'))
RuntimeError: Error opening 'vox2/dev/aac/id08194/QnBYPze-x9A/00079.m4a': File contains data in an unknown format.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop
data = fetcher.fetch(index)
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/utils/data/_utils/fetch.py", line 44, in <listcomp>
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/users/work/s163838/vox_celeb_loader.py", line 53, in __getitem__
load(speaker2utt1, self.num_samples)
File "/users/work/s163838/vox_celeb_loader.py", line 13, in load
wav, sr = librosa.load(path, sr=16000)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/librosa/util/decorators.py", line 88, in inner_f
return f(*args, **kwargs)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/librosa/core/audio.py", line 170, in load
y, sr_native = __audioread_load(path, offset, duration, dtype)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/librosa/core/audio.py", line 226, in __audioread_load
reader = audioread.audio_open(path)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/audioread/__init__.py", line 111, in audio_open
return BackendClass(path)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/audioread/rawread.py", line 65, in __init__
self._file = aifc.open(self._fh)
File "/apl/tryton/python/3.9.5/lib/python3.9/aifc.py", line 917, in open
return Aifc_read(f)
File "/apl/tryton/python/3.9.5/lib/python3.9/aifc.py", line 358, in __init__
self.initfp(f)
File "/apl/tryton/python/3.9.5/lib/python3.9/aifc.py", line 314, in initfp
chunk = Chunk(file)
File "/apl/tryton/python/3.9.5/lib/python3.9/chunk.py", line 63, in __init__
raise EOFError
EOFError
I am using this command
wav, sr = librosa.load(path, sr=16000)
is it just a broken file? How do I skip such then? Or is it something about loading a m4a file even with ffmpeg and the desired output when tested on a single m4a file?

Issue TypeError: argument must be a string or number

There is only one categorical column and I want to encode it, it is working fine on notebook but when it is being uploaded to aicrowd platform it is creating this trouble.
There are totally 3 categorical features where one is the target feature, one is the row of ids and after excluding them for the training I am left with one feature.
df[['intersection_pos_rel_centre']]
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
df[['intersection_pos_rel_centre']]=le.fit_transform(df[['intersection_pos_rel_centre']])
df[['intersection_pos_rel_centre']]
My error is
Selecting runtime language: python
[NbConvertApp] Converting notebook predict.ipynb to notebook
[NbConvertApp] Executing notebook with kernel: python
Traceback (most recent call last):
File "/opt/conda/bin/jupyter-nbconvert", line 11, in <module>
sys.exit(main())
File "/opt/conda/lib/python3.8/site-packages/jupyter_core/application.py", line 254, in launch_instance
return super(JupyterApp, cls).launch_instance(argv=argv, **kwargs)
File "/opt/conda/lib/python3.8/site-packages/traitlets/config/application.py", line 845, in launch_instance
app.start()
File "/opt/conda/lib/python3.8/site-packages/nbconvert/nbconvertapp.py", line 350, in start
self.convert_notebooks()
File "/opt/conda/lib/python3.8/site-packages/nbconvert/nbconvertapp.py", line 524, in convert_notebooks
self.convert_single_notebook(notebook_filename)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/nbconvertapp.py", line 489, in convert_single_notebook
output, resources = self.export_single_notebook(notebook_filename, resources, input_buffer=input_buffer)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/nbconvertapp.py", line 418, in export_single_notebook
output, resources = self.exporter.from_filename(notebook_filename, resources=resources)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/exporter.py", line 181, in from_filename
return self.from_file(f, resources=resources, **kw)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/exporter.py", line 199, in from_file
return self.from_notebook_node(nbformat.read(file_stream, as_version=4), resources=resources, **kw)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/notebook.py", line 32, in from_notebook_node
nb_copy, resources = super().from_notebook_node(nb, resources, **kw)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/exporter.py", line 143, in from_notebook_node
nb_copy, resources = self._preprocess(nb_copy, resources)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/exporters/exporter.py", line 318, in _preprocess
nbc, resc = preprocessor(nbc, resc)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/preprocessors/base.py", line 47, in __call__
return self.preprocess(nb, resources)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/preprocessors/execute.py", line 79, in preprocess
self.execute()
File "/opt/conda/lib/python3.8/site-packages/nbclient/util.py", line 74, in wrapped
return just_run(coro(*args, **kwargs))
File "/opt/conda/lib/python3.8/site-packages/nbclient/util.py", line 53, in just_run
return loop.run_until_complete(coro)
File "/opt/conda/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "/opt/conda/lib/python3.8/site-packages/nbclient/client.py", line 553, in async_execute
await self.async_execute_cell(
File "/opt/conda/lib/python3.8/site-packages/nbconvert/preprocessors/execute.py", line 123, in async_execute_cell
cell, resources = self.preprocess_cell(cell, self.resources, cell_index)
File "/opt/conda/lib/python3.8/site-packages/nbconvert/preprocessors/execute.py", line 146, in preprocess_cell
cell = run_sync(NotebookClient.async_execute_cell)(self, cell, index, store_history=self.store_history)
File "/opt/conda/lib/python3.8/site-packages/nbclient/util.py", line 74, in wrapped
return just_run(coro(*args, **kwargs))
File "/opt/conda/lib/python3.8/site-packages/nbclient/util.py", line 53, in just_run
return loop.run_until_complete(coro)
File "/opt/conda/lib/python3.8/site-packages/nest_asyncio.py", line 98, in run_until_complete
return f.result()
File "/opt/conda/lib/python3.8/asyncio/futures.py", line 178, in result
raise self._exception
File "/opt/conda/lib/python3.8/asyncio/tasks.py", line 280, in __step
result = coro.send(None)
File "/opt/conda/lib/python3.8/site-packages/nbclient/client.py", line 852, in async_execute_cell
self._check_raise_for_error(cell, exec_reply)
File "/opt/conda/lib/python3.8/site-packages/nbclient/client.py", line 760, in _check_raise_for_error
raise CellExecutionError.from_cell_and_msg(cell, exec_reply_content)
nbclient.exceptions.CellExecutionError: An error occurred while executing the following cell:
------------------
df[['intersection_pos_rel_centre']]
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
df[['intersection_pos_rel_centre']]=le.fit_transform(df[['intersection_pos_rel_centre']])
df[['intersection_pos_rel_centre']]
------------------
TypeError: argument must be a string or number

How to solve the problem of PyTorch stack?

I want to run Python program using PyTorch. How should I make each tensor in batch equal? Because the following problem appears:
Traceback (most recent call last):
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 311, in <module>
fire.Fire(demo)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 138, in Fire
component_trace = _Fire(component, args, parsed_flag_args, context, name)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 468, in _Fire
target=component.__name__)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 672, in _CallAndUpdateTrace
component = fn(*varargs, **kwargs)
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 289, in demo
n_epochs=n_epochs, batch_size=batch_size, seed=seed)
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 168, in train
n_epochs=n_epochs,
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 42, in train_epoch
for batch_idx, (input, target) in enumerate(loader):
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\dataloader.py", line 346, in __next__
data = self._next_data()
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\dataloader.py", line 386, in _next_data
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\fetch.py", line 47, in fetch
return self.collate_fn(data)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 87, in default_collate
return [default_collate(samples) for samples in transposed]
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 87, in <listcomp>
return [default_collate(samples) for samples in transposed]
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 72, in default_collate
return default_collate([torch.as_tensor(b) for b in batch])
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 63, in default_collate
return torch.stack(batch, 0, out=out)
RuntimeError: stack expects each tensor to be equal size, but got [650] at entry 0 and [108] at entry 1

Pylint resulting in RuntimeError: generator raised StopIteration with latest package versions

Here are my package versions,
$ pylint --version
pylint 2.3.1
astroid 2.2.5
Python 3.7.4 (default, Aug 14 2019, 12:09:51)
[GCC 8.3.0]
When I run
pylint {package_name}
I get a RuntimeError as shown below:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/astroid/protocols.py", line 492, in _infer_context_manager
enter = next(inferred.igetattr("__enter__", context=context))
StopIteration
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/bin/pylint", line 10, in <module>
sys.exit(run_pylint())
File "/usr/local/lib/python3.7/site-packages/pylint/__init__.py", line 20, in run_pylint
Run(sys.argv[1:])
File "/usr/local/lib/python3.7/site-packages/pylint/lint.py", line 1628, in __init__
linter.check(args)
File "/usr/local/lib/python3.7/site-packages/pylint/lint.py", line 943, in check
self._do_check(files_or_modules)
File "/usr/local/lib/python3.7/site-packages/pylint/lint.py", line 1075, in _do_check
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
File "/usr/local/lib/python3.7/site-packages/pylint/lint.py", line 1158, in check_astroid_module
walker.walk(ast_node)
File "/usr/local/lib/python3.7/site-packages/pylint/utils.py", line 1303, in walk
self.walk(child)
File "/usr/local/lib/python3.7/site-packages/pylint/utils.py", line 1300, in walk
cb(astroid)
File "/usr/local/lib/python3.7/site-packages/pylint/checkers/variables.py", line 1590, in visit_import
module = next(_infer_name_module(node, parts[0]))
File "/usr/local/lib/python3.7/site-packages/astroid/util.py", line 160, in limit_inference
yield from islice(iterator, size)
File "/usr/local/lib/python3.7/site-packages/astroid/context.py", line 113, in cache_generator
for result in generator:
File "/usr/local/lib/python3.7/site-packages/astroid/decorators.py", line 131, in raise_if_nothing_inferred
yield next(generator)
File "/usr/local/lib/python3.7/site-packages/astroid/decorators.py", line 95, in wrapped
res = next(generator)
File "/usr/local/lib/python3.7/site-packages/astroid/inference.py", line 240, in infer_import
yield self.do_import_module(name)
File "/usr/local/lib/python3.7/site-packages/astroid/mixins.py", line 100, in do_import_module
modname, level=level, relative_only=level and level >= 1
File "/usr/local/lib/python3.7/site-packages/astroid/scoped_nodes.py", line 619, in import_module
return MANAGER.ast_from_module_name(absmodname)
File "/usr/local/lib/python3.7/site-packages/astroid/manager.py", line 171, in ast_from_module_name
return self.ast_from_file(found_spec.location, modname, fallback=False)
File "/usr/local/lib/python3.7/site-packages/astroid/manager.py", line 91, in ast_from_file
return AstroidBuilder(self).file_build(filepath, modname)
File "/usr/local/lib/python3.7/site-packages/astroid/builder.py", line 136, in file_build
return self._post_build(module, encoding)
File "/usr/local/lib/python3.7/site-packages/astroid/builder.py", line 153, in _post_build
self.add_from_names_to_locals(from_node)
File "/usr/local/lib/python3.7/site-packages/astroid/builder.py", line 206, in add_from_names_to_locals
imported = node.do_import_module()
File "/usr/local/lib/python3.7/site-packages/astroid/mixins.py", line 100, in do_import_module
modname, level=level, relative_only=level and level >= 1
File "/usr/local/lib/python3.7/site-packages/astroid/scoped_nodes.py", line 619, in import_module
return MANAGER.ast_from_module_name(absmodname)
File "/usr/local/lib/python3.7/site-packages/astroid/manager.py", line 171, in ast_from_module_name
return self.ast_from_file(found_spec.location, modname, fallback=False)
File "/usr/local/lib/python3.7/site-packages/astroid/manager.py", line 91, in ast_from_file
return AstroidBuilder(self).file_build(filepath, modname)
File "/usr/local/lib/python3.7/site-packages/astroid/builder.py", line 136, in file_build
return self._post_build(module, encoding)
File "/usr/local/lib/python3.7/site-packages/astroid/builder.py", line 156, in _post_build
self.delayed_assattr(delayed)
File "/usr/local/lib/python3.7/site-packages/astroid/builder.py", line 223, in delayed_assattr
for inferred in node.expr.infer():
File "/usr/local/lib/python3.7/site-packages/astroid/decorators.py", line 141, in raise_if_nothing_inferred
yield from generator
File "/usr/local/lib/python3.7/site-packages/astroid/decorators.py", line 95, in wrapped
res = next(generator)
File "/usr/local/lib/python3.7/site-packages/astroid/inference.py", line 279, in infer_attribute
for owner in self.expr.infer(context):
File "/usr/local/lib/python3.7/site-packages/astroid/util.py", line 160, in limit_inference
yield from islice(iterator, size)
File "/usr/local/lib/python3.7/site-packages/astroid/context.py", line 113, in cache_generator
for result in generator:
File "/usr/local/lib/python3.7/site-packages/astroid/decorators.py", line 141, in raise_if_nothing_inferred
yield from generator
File "/usr/local/lib/python3.7/site-packages/astroid/decorators.py", line 95, in wrapped
res = next(generator)
File "/usr/local/lib/python3.7/site-packages/astroid/bases.py", line 137, in _infer_stmts
for inferred in stmt.infer(context=context):
File "/usr/local/lib/python3.7/site-packages/astroid/util.py", line 160, in limit_inference
yield from islice(iterator, size)
File "/usr/local/lib/python3.7/site-packages/astroid/context.py", line 113, in cache_generator
for result in generator:
File "/usr/local/lib/python3.7/site-packages/astroid/decorators.py", line 131, in raise_if_nothing_inferred
yield next(generator)
File "/usr/local/lib/python3.7/site-packages/astroid/decorators.py", line 92, in wrapped
generator = _func(node, context, **kwargs)
File "/usr/local/lib/python3.7/site-packages/astroid/inference.py", line 832, in infer_assign
stmts = list(self.assigned_stmts(context=context))
File "/usr/local/lib/python3.7/site-packages/astroid/decorators.py", line 131, in raise_if_nothing_inferred
yield next(generator)
File "/usr/local/lib/python3.7/site-packages/astroid/protocols.py", line 537, in with_assigned_stmts
yield from _infer_context_manager(self, mgr, context)
RuntimeError: generator raised StopIteration
From some search, it seems like this error should've been fixed with Pylint 2.x, however, I still get an error with the latest versions with Python 3.7. Any fixes?

Categories

Resources