I am facing this error which is related to the dictionary of data loader for (MSVD dataset) while running training file for video_captioning on https://github.com/nasib-ullah/video-captioning-models-in-Pytorch
//////////////////////////////////////////////////////
code of getitem function :
def __getitem__(self,idx):
anno = random.choice(self.annotation_dict[self.v_name_list[idx]])
anno_index = []
for word in anno.split(' '):
try:
anno_index.append(self.voc.word2index[word])
except:
pass
if self.opt_truncate_caption:
if len(anno_index)> self.max_caption_length:
anno_index = anno_index[:self.max_caption_length]
anno_index = anno_index + [self.voc.cfg.EOS_token]
appearance_tensor = torch.tensor(self.appearance_feature_dict[self.v_name_list[idx]]).float()
if self.motion_feature_dict == None:
motion_tensor = torch.zeros_like(appearance_tensor)
else:
motion_tensor = torch.tensor(self.motion_feature_dict[self.v_name_list[idx]]).float()
if self.object_feature_dict == None:
object_tensor = torch.zeros_like(appearance_tensor)
else:
object_tensor = torch.tensor(self.object_feature_dict[self.v_name_list[idx]]).float()
return appearance_tensor,anno_index, self.v_name_list[idx],motion_tensor,object_tensor
Traceback (most recent call last):
File "/home/adel/Downloads/video-captioning-models-in-Pytorch-main/untitled0.py", line 84, in <module>
loss_train,ac_loss = model.train_epoch(train_loader,utils)
File "/home/adel/Downloads/video-captioning-models-in-Pytorch-main/models/MARN/model.py", line 321, in train_epoch
for data in dataloader:
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 435, in __next__
data = self._next_data()
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1085, in _next_data
return self._process_data(data)
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1111, in _process_data
data.reraise()
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/_utils.py", line 428, in reraise
raise self.exc_type(msg)
KeyError: Caught KeyError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/utils/data/_utils/worker.py", line 198, in _worker_loop
data = fetcher.fetch(index)
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/home/adel/anaconda3/envs/nasib/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 44, in <listcomp>
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/home/adel/Downloads/video-captioning-models-in-Pytorch-main/data.py", line 88, in __getitem__
appearance_tensor = torch.tensor(self.appearance_feature_dict[self.v_name_list[idx]]).float()
KeyError: 'vid1'
////////////////////////////////////////////////////////
keys of dictionary
Keys: <KeysViewHDF5 ['vid1', 'vid10', 'vid100', 'vid1000', 'vid1001', 'vid1002', 'vid1003', 'vid1004', 'vid1005', 'vid1006', ...
<HDF5 dataset "vid1": shape (28, 1536), type "<f4">
Related
I have 375000 items in my table.
I am doing a loop to obtain all id of all items, with API limit set to 20000 items per api call.
After 200000 I always start to get httpx.ReadTimeout: The read operation timed out sometime it may reach 240000 but never go ahead.
I have tried to have different wait time after each loop.
I have tried to change api limit to 10000 as well as increase it to 30000 or 50000 make less calls but in all cases it get's stuck at around 150000 or 200000.
existing_search_result = supabase.table('vehicles').select('ref_id', count='exact').order('id', desc=False).execute()
existing_items = []
range_step = len(existing_search_result.data)
total_existing_items = existing_search_result.count
print(total_existing_items)
while len(existing_items) < total_existing_items:
try:
existing_items += (
supabase.table(
'vehicles'
).select('ref_id')
.order('id', desc=False)
.range(range_start, range_start + range_step)
.execute()
).data
range_start += range_step
except Exception as e:
logging.exception(e)
print(range_start, len(existing_items))
time.sleep(0.30)
Error log:
2022-10-23 21:04:14,168:ERROR - The read operation timed out
Traceback (most recent call last):
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpcore/_exceptions.py", line 8, in map_exceptions
yield
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpcore/backends/sync.py", line 26, in read
return self._sock.recv(max_bytes)
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/ssl.py", line 1226, in recv
return self.read(buflen)
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/ssl.py", line 1101, in read
return self._sslobj.read(len)
socket.timeout: The read operation timed out
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpx/_transports/default.py", line 60, in map_httpcore_exceptions
yield
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpx/_transports/default.py", line 204, in handle_request
resp = self._pool.handle_request(req)
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpcore/_sync/connection_pool.py", line 253, in handle_request
raise exc
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpcore/_sync/connection_pool.py", line 237, in handle_request
response = connection.handle_request(request)
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpcore/_sync/connection.py", line 90, in handle_request
return self._connection.handle_request(request)
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpcore/_sync/http11.py", line 102, in handle_request
raise exc
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpcore/_sync/http11.py", line 81, in handle_request
) = self._receive_response_headers(**kwargs)
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpcore/_sync/http11.py", line 143, in _receive_response_headers
event = self._receive_event(timeout=timeout)
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpcore/_sync/http11.py", line 172, in _receive_event
data = self._network_stream.read(
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpcore/backends/sync.py", line 26, in read
return self._sock.recv(max_bytes)
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/contextlib.py", line 135, in __exit__
self.gen.throw(type, value, traceback)
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpcore/_exceptions.py", line 12, in map_exceptions
raise to_exc(exc)
httpcore.ReadTimeout: The read operation timed out
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/ak4zh/updater/main.py", line 278, in job
supabase.table(
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/postgrest/_sync/request_builder.py", line 53, in execute
r = self.session.request(
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpx/_client.py", line 802, in request
return self.send(request, auth=auth, follow_redirects=follow_redirects)
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpx/_client.py", line 889, in send
response = self._send_handling_auth(
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpx/_client.py", line 917, in _send_handling_auth
response = self._send_handling_redirects(
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpx/_client.py", line 954, in _send_handling_redirects
response = self._send_single_request(request)
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpx/_client.py", line 990, in _send_single_request
response = transport.handle_request(request)
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpx/_transports/default.py", line 204, in handle_request
resp = self._pool.handle_request(req)
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/contextlib.py", line 135, in __exit__
self.gen.throw(type, value, traceback)
File "/Users/ak4zh/updater/venv/lib/python3.9/site-packages/httpx/_transports/default.py", line 77, in map_httpcore_exceptions
raise mapped_exc(message) from exc
httpx.ReadTimeout: The read operation timed out
I had the same problem, so I create a function to get 1000 item and merge it!
this is my code:
def count_data(tb_name: str, field_name: str):
return supabase.table(tb_name).select(field_name, count='exact').execute().count
def get_field_data(tb_name: str, src_field: str, len_record: int, id_field: str = 'id'):
if len_record <= 1000:
field_data = supabase.table(tb_name).select(id_field, src_field).order(
column=id_field).execute().data
else:
rnk = int(len_record / 1000)
field_data = []
for i in range(rnk):
min_rg = (i * 1000) + 1
max_rg = (i + 1) * 1000
field_data = field_data + supabase.table(tb_name).select(id_field, src_field).order(
column=id_field).range(min_rg - 1, max_rg).execute().data
field_data = field_data + supabase.table(tb_name).select(id_field, src_field).order(
column=id_field).range(max_rg, len_record).execute().data
return field_data
So I am currently working on a DNN that takes in m4a files. I have ffmpeg, it creates a few batches and then dies on this error:
Traceback (most recent call last):
File "/users/work/s163838/./main.py", line 126, in <module>
File "/users/work/s163838/./main.py", line 96, in main
print("e")
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 521, in __next__
data = self._next_data()
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 1203, in _next_data
return self._process_data(data)
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 1229, in _process_data
data.reraise()
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/_utils.py", line 425, in reraise
raise self.exc_type(msg)
EOFError: Caught EOFError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/librosa/core/audio.py", line 164, in load
y, sr_native = __soundfile_load(path, offset, duration, dtype)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/librosa/core/audio.py", line 195, in __soundfile_load
context = sf.SoundFile(path)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/soundfile.py", line 629, in __init__
self._file = self._open(file, mode_int, closefd)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/soundfile.py", line 1183, in _open
_error_check(_snd.sf_error(file_ptr),
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/soundfile.py", line 1357, in _error_check
raise RuntimeError(prefix + _ffi.string(err_str).decode('utf-8', 'replace'))
RuntimeError: Error opening 'vox2/dev/aac/id08194/QnBYPze-x9A/00079.m4a': File contains data in an unknown format.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop
data = fetcher.fetch(index)
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/utils/data/_utils/fetch.py", line 44, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/apl/tryton/python/3.9.5/lib/python3.9/site-packages/torch/utils/data/_utils/fetch.py", line 44, in <listcomp>
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/users/work/s163838/vox_celeb_loader.py", line 53, in __getitem__
load(speaker2utt1, self.num_samples)
File "/users/work/s163838/vox_celeb_loader.py", line 13, in load
wav, sr = librosa.load(path, sr=16000)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/librosa/util/decorators.py", line 88, in inner_f
return f(*args, **kwargs)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/librosa/core/audio.py", line 170, in load
y, sr_native = __audioread_load(path, offset, duration, dtype)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/librosa/core/audio.py", line 226, in __audioread_load
reader = audioread.audio_open(path)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/audioread/__init__.py", line 111, in audio_open
return BackendClass(path)
File "/users/kdm/s163838/.local/lib/python3.9/site-packages/audioread/rawread.py", line 65, in __init__
self._file = aifc.open(self._fh)
File "/apl/tryton/python/3.9.5/lib/python3.9/aifc.py", line 917, in open
return Aifc_read(f)
File "/apl/tryton/python/3.9.5/lib/python3.9/aifc.py", line 358, in __init__
self.initfp(f)
File "/apl/tryton/python/3.9.5/lib/python3.9/aifc.py", line 314, in initfp
chunk = Chunk(file)
File "/apl/tryton/python/3.9.5/lib/python3.9/chunk.py", line 63, in __init__
raise EOFError
EOFError
I am using this command
wav, sr = librosa.load(path, sr=16000)
is it just a broken file? How do I skip such then? Or is it something about loading a m4a file even with ffmpeg and the desired output when tested on a single m4a file?
I have an exception occurring in a for statement:
for _, data in enumerate(dataloader, 0):
Not in the body of the for statement, but in the for statement itself. How do I catch this and continue?
Here is the entire error trace:
Traceback (most recent call last):
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/reprex/run_DL.py", line 67, in <module>
ut.generate_validation_model(cfg)
File "/panfs/roc/groups/4/miran045/reine097/projects/AlexNet_Abrol2021/reprex/utils.py", line 227, in generate_validation_model
loss = train(trainloader, net, optimizer, criterion, cfg.cuda_avl)
File "/panfs/roc/groups/4/miran045/reine097/projects/AlexNet_Abrol2021/reprex/utils.py", line 96, in train
for _, data in enumerate(dataloader, 0):
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 521, in __next__
data = self._next_data()
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 1203, in _next_data
return self._process_data(data)
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/dataloader.py", line 1229, in _process_data
data.reraise()
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/_utils.py", line 434, in reraise
raise exception
RuntimeError: Caught RuntimeError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop
data = fetcher.fetch(index)
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/fetch.py", line 52, in fetch
return self.collate_fn(data)
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/collate.py", line 84, in default_collate
return [default_collate(samples) for samples in transposed]
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/collate.py", line 84, in <listcomp>
return [default_collate(samples) for samples in transposed]
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/collate.py", line 64, in default_collate
return default_collate([torch.as_tensor(b) for b in batch])
File "/home/miran045/reine097/projects/AlexNet_Abrol2021/venv/lib/python3.9/site-packages/torch/utils/data/_utils/collate.py", line 56, in default_collate
return torch.stack(batch, 0, out=out)
RuntimeError: stack expects each tensor to be equal size, but got [1, 208, 300, 320] at entry 0 and [1, 320, 300, 208] at entry 13
The error occurs on this line:
File "/panfs/roc/groups/4/miran045/reine097/projects/AlexNet_Abrol2021/reprex/utils.py", line 96, in train
for _, data in enumerate(dataloader, 0):
I want to run Python program using PyTorch. How should I make each tensor in batch equal? Because the following problem appears:
Traceback (most recent call last):
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 311, in <module>
fire.Fire(demo)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 138, in Fire
component_trace = _Fire(component, args, parsed_flag_args, context, name)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 468, in _Fire
target=component.__name__)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 672, in _CallAndUpdateTrace
component = fn(*varargs, **kwargs)
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 289, in demo
n_epochs=n_epochs, batch_size=batch_size, seed=seed)
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 168, in train
n_epochs=n_epochs,
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demoEmotion.py", line 42, in train_epoch
for batch_idx, (input, target) in enumerate(loader):
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\dataloader.py", line 346, in __next__
data = self._next_data()
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\dataloader.py", line 386, in _next_data
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\fetch.py", line 47, in fetch
return self.collate_fn(data)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 87, in default_collate
return [default_collate(samples) for samples in transposed]
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 87, in <listcomp>
return [default_collate(samples) for samples in transposed]
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 72, in default_collate
return default_collate([torch.as_tensor(b) for b in batch])
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\collate.py", line 63, in default_collate
return torch.stack(batch, 0, out=out)
RuntimeError: stack expects each tensor to be equal size, but got [650] at entry 0 and [108] at entry 1
I am trying to run a source code from a Keras tutorial for image recognition. I'm getting this error,
Traceback (most recent call last):
File "ty.py", line 52, in <module>
X, Y = hf['imgs'][:], hf['labels'][:]
File "h5py\_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py\_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "C:\Users\alams\Anaconda3\envs\tensorflow\lib\site-
packages\h5py\_hl\group.py", line 167, in __getitem__
oid = h5o.open(self.id, self._e(name), lapl=self._lapl)
File "h5py\_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py\_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "h5py\h5o.pyx", line 190, in h5py.h5o.open
KeyError: "Unable to open object (object 'imgs' doesn't exist)"
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "ty.py", line 66, in <module>
label = get_class(img_path)
File "ty.py", line 48, in get_class
return int(img_path.split('/')[-2])
ValueError: invalid literal for int() with base 10: 'Final_Training'
This is my source code:
def get_class(img_path):
return int(img_path.split('/')[-2])
try:
with h5py.File('X.h5') as hf:
X, Y = hf['imgs'][:], hf['labels'][:]
except (IOError,OSError, KeyError):
root_dir = 'Data/Final_Training/Images/'
imgs = []
labels = []
all_img_paths = glob.glob(os.path.join(root_dir, '*/*.ppm'))
np.random.shuffle(all_img_paths)
for img_path in all_img_paths:
try:
img = preprocess_img(io.imread(img_path))
label = get_class(img_path)
imgs.append(img)
labels.append(label)
except (IOError, OSError):
print('missed', img_path)
pass
X = np.array(imgs, dtype='float32')
Y = np.eye(NUM_CLASSES, dtype='uint8')[labels]
with h5py.File('X.h5','w') as hf:
hf.create_dataset('imgs', data=X)
hf.create_dataset('labels', data=Y)
I tried to run this code by removing the int Conversion from the return of the first function. But seems like all the values are not write in X.h5
You have defined img=[] inside except block (locally). That's why it doesn't have access outside the block. Define it outside the block.
def get_class(img_path):
return int(img_path.split('/')[-2])
imgs=[]
labels=[]
#Your code