Split multi-page tiff with pillow package - python

I have searched for that topic and I found the following code
from PIL import Image, ImageSequence
im = Image.open("MySample.tiff")
for i, page in enumerate(ImageSequence.Iterator(im)):
page.save("Page_%d.png" % i)
The code works for some tiff files but I encountered an error with one of them
The traceback is like that
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-2-12bbb3a559f8> in <module>
5 im = Image.open("MySample.tiff")
6 for i, page in enumerate(ImageSequence.Iterator(im)):
----> 7 page.save("Page_%d.png" % i)
C:\ProgramData\Anaconda3\lib\site-packages\PIL\Image.py in save(self, fp, format, **params)
2128
2129 # may mutate self!
-> 2130 self._ensure_mutable()
2131
2132 save_all = params.pop("save_all", False)
C:\ProgramData\Anaconda3\lib\site-packages\PIL\Image.py in _ensure_mutable(self)
617 def _ensure_mutable(self):
618 if self.readonly:
--> 619 self._copy()
620 else:
621 self.load()
C:\ProgramData\Anaconda3\lib\site-packages\PIL\Image.py in _copy(self)
610
611 def _copy(self):
--> 612 self.load()
613 self.im = self.im.copy()
614 self.pyaccess = None
C:\ProgramData\Anaconda3\lib\site-packages\PIL\TiffImagePlugin.py in load(self)
1086 def load(self):
1087 if self.tile and self.use_load_libtiff:
-> 1088 return self._load_libtiff()
1089 return super().load()
1090
C:\ProgramData\Anaconda3\lib\site-packages\PIL\TiffImagePlugin.py in _load_libtiff(self)
1190
1191 if err < 0:
-> 1192 raise OSError(err)
1193
1194 return Image.Image.load(self)
OSError: -9

Related

NoBackendError while getting features of audio files using librosa

Im using jupyter notebook for executing this. The Whole program is available in this link https://github.com/MiteshPuthran/Speech-Emotion-Analyzer/blob/master/final_results_gender_test.ipynb
I tried using ffmpeg, tried using another .wav file, nothing seems to be working. please help.
This is the code :
df = pd.DataFrame(columns=['feature'])
bookmark=0
for index,y in enumerate(mylist):
if mylist[index][6:-16]!='01' and mylist[index][6:-16]!='07' and mylist[index][6:-16]!='08' and mylist[index][:2]!='su' and mylist[index][:1]!='n' and mylist[index][:1]!='d':
X, sample_rate = librosa.load('C:/Users/Admin/Desktop/pw-4/Speech-Emotion-Analyzer-master/Speech-Emotion-Analyzer-master/'+y, res_type='kaiser_fast',duration=2.5,sr=22050*2,offset=0.5)
sample_rate = np.array(sample_rate)
mfccs = np.mean(librosa.feature.mfcc(y=X,
sr=sample_rate,
n_mfcc=13),
axis=0)
feature = mfccs
#[float(i) for i in feature]
#feature1=feature[:135]
df.loc[bookmark] = [feature]
bookmark=bookmark+1
and this is the error im getting:
RuntimeError Traceback (most recent call last)
File ~\AppData\Roaming\Python\Python39\site-packages\librosa\core\audio.py:155, in load(path, sr, mono, offset, duration, dtype, res_type)
153 else:
154 # Otherwise, create the soundfile object
--> 155 context = sf.SoundFile(path)
157 with context as sf_desc:
File ~\AppData\Roaming\Python\Python39\site-packages\soundfile.py:629, in SoundFile.__init__(self, file, mode, samplerate, channels, subtype, endian, format, closefd)
627 self._info = _create_info_struct(file, mode, samplerate, channels,
628 format, subtype, endian)
--> 629 self._file = self._open(file, mode_int, closefd)
630 if set(mode).issuperset('r+') and self.seekable():
631 # Move write position to 0 (like in Python file objects)
File ~\AppData\Roaming\Python\Python39\site-packages\soundfile.py:1183, in SoundFile._open(self, file, mode_int, closefd)
1182 raise TypeError("Invalid file: {0!r}".format(self.name))
-> 1183 _error_check(_snd.sf_error(file_ptr),
1184 "Error opening {0!r}: ".format(self.name))
1185 if mode_int == _snd.SFM_WRITE:
1186 # Due to a bug in libsndfile version <= 1.0.25, frames != 0
1187 # when opening a named pipe in SFM_WRITE mode.
1188 # See http://github.com/erikd/libsndfile/issues/77.
File ~\AppData\Roaming\Python\Python39\site-packages\soundfile.py:1357, in _error_check(err, prefix)
1356 err_str = _snd.sf_error_number(err)
-> 1357 raise RuntimeError(prefix + _ffi.string(err_str).decode('utf-8', 'replace'))
RuntimeError: Error opening 'C:/Users/Admin/Desktop/pw-4/Speech-Emotion-Analyzer-master/Speech-Emotion-Analyzer-master/AudioRecorder.ipynb': File contains data in an unknown format.
During handling of the above exception, another exception occurred:
NoBackendError Traceback (most recent call last)
Input In [46], in <cell line: 3>()
3 for index,y in enumerate(mylist):
4 if mylist[index][6:-16]!='01' and mylist[index][6:-16]!='07' and mylist[index][6:-16]!='08' and mylist[index][:2]!='su' and mylist[index][:1]!='n' and mylist[index][:1]!='d':
----> 5 X, sample_rate = librosa.load('C:/Users/Admin/Desktop/pw-4/Speech-Emotion-Analyzer-master/Speech-Emotion-Analyzer-master/'+y, res_type='kaiser_fast',duration=2.5,sr=22050*2,offset=0.5)
6 sample_rate = np.array(sample_rate)
7 mfccs = np.mean(librosa.feature.mfcc(y=X,
8 sr=sample_rate,
9 n_mfcc=13),
10 axis=0)
File ~\AppData\Roaming\Python\Python39\site-packages\librosa\util\decorators.py:88, in deprecate_positional_args.<locals>._inner_deprecate_positional_args.<locals>.inner_f(*args, **kwargs)
86 extra_args = len(args) - len(all_args)
87 if extra_args <= 0:
---> 88 return f(*args, **kwargs)
90 # extra_args > 0
91 args_msg = [
92 "{}={}".format(name, arg)
93 for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
94 ]
File ~\AppData\Roaming\Python\Python39\site-packages\librosa\core\audio.py:174, in load(path, sr, mono, offset, duration, dtype, res_type)
172 if isinstance(path, (str, pathlib.PurePath)):
173 warnings.warn("PySoundFile failed. Trying audioread instead.", stacklevel=2)
--> 174 y, sr_native = __audioread_load(path, offset, duration, dtype)
175 else:
176 raise (exc)
File ~\AppData\Roaming\Python\Python39\site-packages\librosa\core\audio.py:198, in __audioread_load(path, offset, duration, dtype)
192 """Load an audio buffer using audioread.
193
194 This loads one block at a time, and then concatenates the results.
195 """
197 y = []
--> 198 with audioread.audio_open(path) as input_file:
199 sr_native = input_file.samplerate
200 n_channels = input_file.channels
File ~\AppData\Roaming\Python\Python39\site-packages\audioread\__init__.py:116, in audio_open(path, backends)
113 pass
115 # All backends failed!
--> 116 raise NoBackendError()
NoBackendError:

"Error while extracting" from tensorflow datasets

I want to train a tensorflow image segmentation model on COCO, and thought I would leverage the dataset builder already included. Download seems to be completed but it crashes on extracting the zip files.
Running with TF 2.0.0 on a Jupyter Notebook under a conda environment. Computer is 64-bit Windows 10. The Oxford Pet III dataset used in the official image segmentation tutorial works fine.
Below is the error message (my local user name replaced with %user%).
---------------------------------------------------------------------------
OutOfRangeError Traceback (most recent call last)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in _sync_extract(self, from_path, method, to_path)
88 try:
---> 89 for path, handle in iter_archive(from_path, method):
90 path = tf.compat.as_text(path)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in iter_zip(arch_f)
176 with _open_or_pass(arch_f) as fobj:
--> 177 z = zipfile.ZipFile(fobj)
178 for member in z.infolist():
~\.conda\envs\tf-tutorial\lib\zipfile.py in __init__(self, file, mode, compression, allowZip64)
1130 if mode == 'r':
-> 1131 self._RealGetContents()
1132 elif mode in ('w', 'x'):
~\.conda\envs\tf-tutorial\lib\zipfile.py in _RealGetContents(self)
1193 try:
-> 1194 endrec = _EndRecData(fp)
1195 except OSError:
~\.conda\envs\tf-tutorial\lib\zipfile.py in _EndRecData(fpin)
263 # Determine file size
--> 264 fpin.seek(0, 2)
265 filesize = fpin.tell()
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\util\deprecation.py in new_func(*args, **kwargs)
506 instructions)
--> 507 return func(*args, **kwargs)
508
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in seek(self, offset, whence, position)
166 elif whence == 2:
--> 167 offset += self.size()
168 else:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in size(self)
101 """Returns the size of the file."""
--> 102 return stat(self.__name).length
103
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in stat(filename)
726 """
--> 727 return stat_v2(filename)
728
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_core\python\lib\io\file_io.py in stat_v2(path)
743 file_statistics = pywrap_tensorflow.FileStatistics()
--> 744 pywrap_tensorflow.Stat(compat.as_bytes(path), file_statistics)
745 return file_statistics
OutOfRangeError: C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip; Unknown error
During handling of the above exception, another exception occurred:
ExtractError Traceback (most recent call last)
<ipython-input-27-887fa0198611> in <module>
1 cocoBuilder = tfds.builder('coco')
2 info = cocoBuilder.info
----> 3 cocoBuilder.download_and_prepare()
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\api_utils.py in disallow_positional_args_dec(fn, instance, args, kwargs)
50 _check_no_positional(fn, args, ismethod, allowed=allowed)
51 _check_required(fn, kwargs)
---> 52 return fn(*args, **kwargs)
53
54 return disallow_positional_args_dec(wrapped) # pylint: disable=no-value-for-parameter
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in download_and_prepare(self, download_dir, download_config)
285 self._download_and_prepare(
286 dl_manager=dl_manager,
--> 287 download_config=download_config)
288
289 # NOTE: If modifying the lines below to put additional information in
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in _download_and_prepare(self, dl_manager, download_config)
946 super(GeneratorBasedBuilder, self)._download_and_prepare(
947 dl_manager=dl_manager,
--> 948 max_examples_per_split=download_config.max_examples_per_split,
949 )
950
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\dataset_builder.py in _download_and_prepare(self, dl_manager, **prepare_split_kwargs)
802 # Generating data for all splits
803 split_dict = splits_lib.SplitDict()
--> 804 for split_generator in self._split_generators(dl_manager):
805 if splits_lib.Split.ALL == split_generator.split_info.name:
806 raise ValueError(
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\image\coco.py in _split_generators(self, dl_manager)
237 root_url = 'http://images.cocodataset.org/'
238 extracted_paths = dl_manager.download_and_extract({
--> 239 key: root_url + url for key, url in urls.items()
240 })
241
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in download_and_extract(self, url_or_urls)
357 with self._downloader.tqdm():
358 with self._extractor.tqdm():
--> 359 return _map_promise(self._download_extract, url_or_urls)
360
361 #property
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in _map_promise(map_fn, all_inputs)
393 """Map the function into each element and resolve the promise."""
394 all_promises = utils.map_nested(map_fn, all_inputs) # Apply the function
--> 395 res = utils.map_nested(_wait_on_promise, all_promises)
396 return res
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in map_nested(function, data_struct, dict_only, map_tuple)
127 return {
128 k: map_nested(function, v, dict_only, map_tuple)
--> 129 for k, v in data_struct.items()
130 }
131 elif not dict_only:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in <dictcomp>(.0)
127 return {
128 k: map_nested(function, v, dict_only, map_tuple)
--> 129 for k, v in data_struct.items()
130 }
131 elif not dict_only:
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\utils\py_utils.py in map_nested(function, data_struct, dict_only, map_tuple)
141 return tuple(mapped)
142 # Singleton
--> 143 return function(data_struct)
144
145
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\download_manager.py in _wait_on_promise(p)
377
378 def _wait_on_promise(p):
--> 379 return p.get()
380
381 else:
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in get(self, timeout)
508 target = self._target()
509 self._wait(timeout or DEFAULT_TIMEOUT)
--> 510 return self._target_settled_value(_raise=True)
511
512 def _target_settled_value(self, _raise=False):
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in _target_settled_value(self, _raise)
512 def _target_settled_value(self, _raise=False):
513 # type: (bool) -> Any
--> 514 return self._target()._settled_value(_raise)
515
516 _value = _reason = _target_settled_value
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in _settled_value(self, _raise)
222 if _raise:
223 raise_val = self._fulfillment_handler0
--> 224 reraise(type(raise_val), raise_val, self._traceback)
225 return self._fulfillment_handler0
226
~\.conda\envs\tf-tutorial\lib\site-packages\six.py in reraise(tp, value, tb)
694 if value.__traceback__ is not tb:
695 raise value.with_traceback(tb)
--> 696 raise value
697 finally:
698 value = None
~\.conda\envs\tf-tutorial\lib\site-packages\promise\promise.py in handle_future_result(future)
840 # type: (Any) -> None
841 try:
--> 842 resolve(future.result())
843 except Exception as e:
844 tb = exc_info()[2]
~\.conda\envs\tf-tutorial\lib\concurrent\futures\_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~\.conda\envs\tf-tutorial\lib\concurrent\futures\_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~\.conda\envs\tf-tutorial\lib\concurrent\futures\thread.py in run(self)
54
55 try:
---> 56 result = self.fn(*self.args, **self.kwargs)
57 except BaseException as exc:
58 self.future.set_exception(exc)
~\.conda\envs\tf-tutorial\lib\site-packages\tensorflow_datasets\core\download\extractor.py in _sync_extract(self, from_path, method, to_path)
92 except BaseException as err:
93 msg = 'Error while extracting %s to %s : %s' % (from_path, to_path, err)
---> 94 raise ExtractError(msg)
95 # `tf.io.gfile.Rename(overwrite=True)` doesn't work for non empty
96 # directories, so delete destination first, if it already exists.
ExtractError: Error while extracting C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip to C:\Users\%user%\tensorflow_datasets\downloads\extracted\ZIP.images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip : C:\Users\%user%\tensorflow_datasets\downloads\images.cocodataset.org_zips_train20147eQIfmQL3bpVDgkOrnAQklNLVUtCsFrDPwMAuYSzF3U.zip; Unknown error
The message seems cryptic to me. The folder to which it is trying to extract does not exist when the notebook is started - it is created by Tensorflow, and only at that command line. I obviously tried deleting it completely and running it again, to no effect.
The code that leads to the error is (everything runs fine until the last line):
import tensorflow as tf
from __future__ import absolute_import, division, print_function, unicode_literals
from tensorflow_examples.models.pix2pix import pix2pix
import tensorflow_datasets as tfds
from IPython.display import clear_output
import matplotlib.pyplot as plt
dataset, info = tfds.load('coco', with_info=True)
Also tried breaking down the last command into assigning the tdfs.builder object and then running download_and_extract, and again got the same error.
There is enough space in disk - after download, still 50+GB available, while the dataset is supposed to be 37GB in its largest version (2014).
I have a similar problem with Windows 10 & COCO 2017. My solution is simple. Extract the ZIP file manually according to the folder path in the error message.

Word2Vec error when loading in GoogleNews data

I am following a tutorial here: https://towardsdatascience.com/multi-class-text-classification-model-comparison-and-selection-5eb066197568
I am at the part "Word2vec and Logistic Regression". I have downloaded the "GoogleNews-vectors-negative300.bin.gz" file and I am tyring to apply it to my own text data. However when I get to the following code:
%%time
from gensim.models import Word2Vec
wv = gensim.models.KeyedVectors.load_word2vec_format("/data/users/USERS/File_path/classifier/GoogleNews_Embedding/GoogleNews-vectors-negative300.bin.gz", binary=True)
wv.init_sims(replace=True)
I run into the following error:
/data/users/msmith/env/lib64/python3.6/site-packages/smart_open/smart_open_lib.py:398: UserWarning: This function is deprecated, use smart_open.open instead. See the migration notes for details: https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst#migrating-to-the-new-open-function
'See the migration notes for details: %s' % _MIGRATION_NOTES_URL
---------------------------------------------------------------------------
EOFError Traceback (most recent call last)
<timed exec> in <module>
~/env/lib64/python3.6/site-packages/gensim/models/keyedvectors.py in load_word2vec_format(cls, fname, fvocab, binary, encoding, unicode_errors, limit, datatype)
1492 return _load_word2vec_format(
1493 cls, fname, fvocab=fvocab, binary=binary, encoding=encoding, unicode_errors=unicode_errors,
-> 1494 limit=limit, datatype=datatype)
1495
1496 def get_keras_embedding(self, train_embeddings=False):
~/env/lib64/python3.6/site-packages/gensim/models/utils_any2vec.py in _load_word2vec_format(cls, fname, fvocab, binary, encoding, unicode_errors, limit, datatype)
383 with utils.ignore_deprecation_warning():
384 # TODO use frombuffer or something similar
--> 385 weights = fromstring(fin.read(binary_len), dtype=REAL).astype(datatype)
386 add_word(word, weights)
387 else:
/usr/lib64/python3.6/gzip.py in read(self, size)
274 import errno
275 raise OSError(errno.EBADF, "read() on write-only GzipFile object")
--> 276 return self._buffer.read(size)
277
278 def read1(self, size=-1):
/usr/lib64/python3.6/_compression.py in readinto(self, b)
66 def readinto(self, b):
67 with memoryview(b) as view, view.cast("B") as byte_view:
---> 68 data = self.read(len(byte_view))
69 byte_view[:len(data)] = data
70 return len(data)
/usr/lib64/python3.6/gzip.py in read(self, size)
480 break
481 if buf == b"":
--> 482 raise EOFError("Compressed file ended before the "
483 "end-of-stream marker was reached")
484
EOFError: Compressed file ended before the end-of-stream marker was reached
Any idea whats gone wrong/ how to overcome this issue?
Thanks in advance!

Opening a FITS file with Astropy results in FileNotFoundError

When I use
fits_datasweep_gal = fits.open('Macintosh HD/Users/lingxuan/Downloads/datasweep-index-gal.fits')
to open a FITS file on Jupyter notebook, it returns:
FileNotFoundError Traceback (most recent call last)
<ipython-input-13-e5886f60eba2> in <module>
----> 1 fits_datasweep_gal = fits.open('Macintosh HD/Users/lingxuan/Downloads/datasweep-index-gal.fits')
~/anaconda3/lib/python3.7/site-packages/astropy/io/fits/hdu/hdulist.py in fitsopen(name, mode, memmap, save_backup, cache, lazy_load_hdus, **kwargs)
149
150 return HDUList.fromfile(name, mode, memmap, save_backup, cache,
--> 151 lazy_load_hdus, **kwargs)
152
153
~/anaconda3/lib/python3.7/site-packages/astropy/io/fits/hdu/hdulist.py in fromfile(cls, fileobj, mode, memmap, save_backup, cache, lazy_load_hdus, **kwargs)
388 return cls._readfrom(fileobj=fileobj, mode=mode, memmap=memmap,
389 save_backup=save_backup, cache=cache,
--> 390 lazy_load_hdus=lazy_load_hdus, **kwargs)
391
392 #classmethod
~/anaconda3/lib/python3.7/site-packages/astropy/io/fits/hdu/hdulist.py in _readfrom(cls, fileobj, data, mode, memmap, save_backup, cache, lazy_load_hdus, **kwargs)
1037 if not isinstance(fileobj, _File):
1038 # instantiate a FITS file object (ffo)
-> 1039 fileobj = _File(fileobj, mode=mode, memmap=memmap, cache=cache)
1040 # The Astropy mode is determined by the _File initializer if the
1041 # supplied mode was None
~/anaconda3/lib/python3.7/site-packages/astropy/utils/decorators.py in wrapper(*args, **kwargs)
501 # one with the name of the new argument to the function
502 kwargs[new_name[i]] = value
--> 503 return function(*args, **kwargs)
504
505 return wrapper
~/anaconda3/lib/python3.7/site-packages/astropy/io/fits/file.py in __init__(self, fileobj, mode, memmap, overwrite, cache)
176 self._open_fileobj(fileobj, mode, overwrite)
177 elif isinstance(fileobj, str):
--> 178 self._open_filename(fileobj, mode, overwrite)
179 else:
180 self._open_filelike(fileobj, mode, overwrite)
~/anaconda3/lib/python3.7/site-packages/astropy/io/fits/file.py in _open_filename(self, filename, mode, overwrite)
553
554 if not self._try_read_compressed(self.name, magic, mode, ext=ext):
--> 555 self._file = fileobj_open(self.name, IO_FITS_MODES[mode])
556 self.close_on_error = True
557
~/anaconda3/lib/python3.7/site-packages/astropy/io/fits/util.py in fileobj_open(filename, mode)
386 """
387
--> 388 return open(filename, mode, buffering=0)
389
390
FileNotFoundError: [Errno 2] No such file or directory: 'Macintosh HD/Users/lingxuan/Downloads/datasweep-index-gal.fits'
What should I do?
Remove Macintosh HD part from path
fits_datasweep_gal = fits.open('/Users/lingxuan/Downloads/datasweep-index-gal.fits')

KeyError: "filename 'storages' not found"

I'm trying to load pre-trained model weights using this line :
state_dict = torch.load('models/seq_to_txt_state_7.tar')
and I'm getting:
KeyError Traceback (most recent call last)
<ipython-input-30-3f7b5be8fc72> in <module>()
----> 1 state_dict = torch.load('models/seq_to_txt_state_7.tar')
/home/arash/venvs/marzieh_env/local/lib/python2.7/site-packages/torch/serialization.pyc in load(f, map_location, pickle_module)
365 f = open(f, 'rb')
366 try:
--> 367 return _load(f, map_location, pickle_module)
368 finally:
369 if new_fd:
/home/arash/venvs/marzieh_env/local/lib/python2.7/site-packages/torch/serialization.pyc in _load(f, map_location, pickle_module)
521 # only if offset is zero we can attempt the legacy tar file loader
522 try:
--> 523 return legacy_load(f)
524 except tarfile.TarError:
525 # if not a tarfile, reset file offset and proceed
/home/arash/venvs/marzieh_env/local/lib/python2.7/site-packages/torch/serialization.pyc in legacy_load(f)
448 mkdtemp() as tmpdir:
449
--> 450 tar.extract('storages', path=tmpdir)
451 with open(os.path.join(tmpdir, 'storages'), 'rb', 0) as f:
452 num_storages = pickle_module.load(f)
/usr/lib/python2.7/tarfile.pyc in extract(self, member, path)
2107
2108 if isinstance(member, basestring):
-> 2109 tarinfo = self.getmember(member)
2110 else:
2111 tarinfo = member
/usr/lib/python2.7/tarfile.pyc in getmember(self, name)
1827 tarinfo = self._getmember(name)
1828 if tarinfo is None:
-> 1829 raise KeyError("filename %r not found" % name)
1830 return tarinfo
1831
KeyError: "filename 'storages' not found"
I'm using python 2.7 on Ubuntu 18.
In addition the model is saved using this function in first place:
def save_state(enc, dec, enc_optim, dec_optim, dec_idx_to_word, dec_word_to_idx, epoch):
state = {'enc':enc.state_dict(), 'dec':dec.state_dict(),
'enc_optim':enc_optim.state_dict(), 'dec_optim':dec_optim.state_dict(),
'dec_idx_to_word':dec_idx_to_word, 'dec_word_to_idx':dec_word_to_idx}
torch.save(state, epoch_to_save_path(epoch))
#reportgunner is right. The model file was corrupted. End of the message!

Categories

Resources