I am facing the following error statement
FileNotFoundError: [WinError 3] The system cannot find the path specified: '/tmp/5pido2dr.h5'
when trying to run keras-vis. The code block causing the error :
layer_idx = utils.find_layer_idx(model, 'dense_2') #after naming the layers
model.layers[layer_idx].activation = activations.softmax
model = utils.apply_modifications(model)
for class_idx in np.arange(10):
indices = np.where(y_train[:, class_idx] == 1.)[0]
idx = indices[0]
grads = visualize_saliency(model, layer_idx, filter_indices=class_idx,
seed_input=X_train[idx],
backprop_modifier='guided')
f, ax = plt.subplots(1, 2)
ax[0].imshow(X_train[idx][..., 0])
ax[1].imshow(grads, cmap='jet')
savename = "layer_activations" + str(class_idx) + ".png"
f.savefig(savename)
and the complete error output :
OSError Traceback (most recent call last)
c:\python36\lib\site-packages\vis\utils\utils.py in apply_modifications(model)
122 try:
--> 123 model.save(model_path)
124 return load_model(model_path)
c:\python36\lib\site-packages\keras\engine\network.py in save(self, filepath, overwrite, include_optimizer)
1084 from ..models import save_model
-> 1085 save_model(self, filepath, overwrite, include_optimizer)
1086
c:\python36\lib\site-packages\keras\engine\saving.py in save_model(model, filepath, overwrite, include_optimizer)
104
--> 105 f = h5py.File(filepath, mode='w')
106 opened_new_file = True
c:\python36\lib\site-packages\h5py\_hl\files.py in __init__(self, name, mode, driver, libver, userblock_size, swmr, **kwds)
311 fapl = make_fapl(driver, libver, **kwds)
--> 312 fid = make_fid(name, mode, userblock_size, fapl, swmr=swmr)
313
c:\python36\lib\site-packages\h5py\_hl\files.py in make_fid(name, mode, userblock_size, fapl, fcpl, swmr)
147 elif mode == 'w':
--> 148 fid = h5f.create(name, h5f.ACC_TRUNC, fapl=fapl, fcpl=fcpl)
149 elif mode == 'a':
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
h5py\_objects.pyx in h5py._objects.with_phil.wrapper()
h5py\h5f.pyx in h5py.h5f.create()
OSError: Unable to create file (unable to open file: name = '/tmp/5pido2dr.h5', errno = 2, error message = 'No such file or directory', flags = 13, o_flags = 302)
During handling of the above exception, another exception occurred:
FileNotFoundError Traceback (most recent call last)
<ipython-input-20-df7d3b5036c3> in <module>()
1 layer_idx = utils.find_layer_idx(model, 'dense_2') #after naming the layers
2 model.layers[layer_idx].activation = activations.softmax
----> 3 model = utils.apply_modifications(model)
4
5 for class_idx in np.arange(10):
c:\python36\lib\site-packages\vis\utils\utils.py in apply_modifications(model)
** 124 return load_model(model_path)
125 finally:
--> 126 os.remove(model_path)
127
128
FileNotFoundError: [WinError 3] The system cannot find the path specified: '/tmp/5pido2dr.h5'
There is an OS error preceding which appears to be the primary cause. I am not sure why this error arises. The .h5 filename keeps changing. With due research, I have suggestions that this could be a environmental variable issue and requires reinstall.
But am not sure the right method for debug. I am using WIN 10, with python 3.6.5, and jupyter 4.4.0
Related
I read some Audio file, labeled them, and together with their path, save the path and emotion of each Audioo file in a csv file. Now I want to read their path from the file and open them but I get this Error:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
File ~\.conda\envs\nhashemi\lib\site-packages\librosa\core\audio.py:155, in load(path, sr, mono, offset, duration, dtype, res_type)
153 else:
154 # Otherwise, create the soundfile object
--> 155 context = sf.SoundFile(path)
157 with context as sf_desc:
File ~\.conda\envs\nhashemi\lib\site-packages\soundfile.py:629, in SoundFile.__init__(self, file, mode, samplerate, channels, subtype, endian, format, closefd)
627 self._info = _create_info_struct(file, mode, samplerate, channels,
628 format, subtype, endian)
--> 629 self._file = self._open(file, mode_int, closefd)
630 if set(mode).issuperset('r+') and self.seekable():
631 # Move write position to 0 (like in Python file objects)
File ~\.conda\envs\nhashemi\lib\site-packages\soundfile.py:1183, in SoundFile._open(self, file, mode_int, closefd)
1182 raise TypeError("Invalid file: {0!r}".format(self.name))
-> 1183 _error_check(_snd.sf_error(file_ptr),
1184 "Error opening {0!r}: ".format(self.name))
1185 if mode_int == _snd.SFM_WRITE:
1186 # Due to a bug in libsndfile version <= 1.0.25, frames != 0
1187 # when opening a named pipe in SFM_WRITE mode.
1188 # See http://github.com/erikd/libsndfile/issues/77.
File ~\.conda\envs\nhashemi\lib\site-packages\soundfile.py:1357, in _error_check(err, prefix)
1356 err_str = _snd.sf_error_number(err)
-> 1357 raise RuntimeError(prefix + _ffi.string(err_str).decode('utf-8', 'replace'))
RuntimeError: Error opening 'C:/Users/external_dipf/Documents/Dataset/CREMA/AudioWAV/1001_IEO_FEA_HI.wav': File contains data in an unknown format.
During handling of the above exception, another exception occurred:
NoBackendError Traceback (most recent call last)
Input In [553], in <cell line: 3>()
1 emotion='fear'
2 path = np.array(data_path.Path[data_path.Emotions==emotion])[1]
----> 3 data, sampling_rate = librosa.load(path)
4 create_waveplot(data, sampling_rate, emotion)
5 create_spectrogram(data, sampling_rate, emotion)
File ~\.conda\envs\nhashemi\lib\site-packages\librosa\util\decorators.py:88, in deprecate_positional_args.<locals>._inner_deprecate_positional_args.<locals>.inner_f(*args, **kwargs)
86 extra_args = len(args) - len(all_args)
87 if extra_args <= 0:
---> 88 return f(*args, **kwargs)
90 # extra_args > 0
91 args_msg = [
92 "{}={}".format(name, arg)
93 for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
94 ]
File ~\.conda\envs\nhashemi\lib\site-packages\librosa\core\audio.py:174, in load(path, sr, mono, offset, duration, dtype, res_type)
172 if isinstance(path, (str, pathlib.PurePath)):
173 warnings.warn("PySoundFile failed. Trying audioread instead.", stacklevel=2)
--> 174 y, sr_native = __audioread_load(path, offset, duration, dtype)
175 else:
176 raise (exc)
File ~\.conda\envs\nhashemi\lib\site-packages\librosa\core\audio.py:198, in __audioread_load(path, offset, duration, dtype)
192 """Load an audio buffer using audioread.
193
194 This loads one block at a time, and then concatenates the results.
195 """
197 y = []
--> 198 with audioread.audio_open(path) as input_file:
199 sr_native = input_file.samplerate
200 n_channels = input_file.channels
File ~\.conda\envs\nhashemi\lib\site-packages\audioread\__init__.py:116, in audio_open(path, backends)
113 pass
115 # All backends failed!
--> 116 raise NoBackendError()
NoBackendError:
Here is my code to label and specify the label (emotion) of each file
CREMA ="C:/Users/external_dipf/Documents/Dataset/CREMA/AudioWAV/"
crema_directory_list = os.listdir(CREMA)
file_emotion = []
file_path = []
for file in crema_directory_list:
# storing file paths
file_path.append(CREMA + file)
# storing file emotions
part=file.split('_')
if part[2] == 'SAD':
file_emotion.append('sad')
elif part[2] == 'ANG':
file_emotion.append('angry')
elif part[2] == 'DIS':
file_emotion.append('disgust')
elif part[2] == 'FEA':
file_emotion.append('fear')
elif part[2] == 'HAP':
file_emotion.append('happy')
elif part[2] == 'NEU':
file_emotion.append('neutral')
else:
file_emotion.append('Unknown')
# dataframe for emotion of files
emotion_df = pd.DataFrame(file_emotion, columns=['Emotions'])
# dataframe for path of files.
path_df = pd.DataFrame(file_path, columns=['Path'])
CREMA_df = pd.concat([emotion_df, path_df], axis=1)
CREMA_df.head()
Here is were I save them in a CSV file
data_path = pd.concat([CREMA_df, RAVDESS_df, TESS_df, SAVEE_df], axis = 0)
data_path.to_csv("data_path.csv",index=False)
data_path.head()
and here I am trying to read the file. The error is related to the CREMA dataset.
emotion='fear'
path = np.array(data_path.Path[data_path.Emotions==emotion])[1]
data, sampling_rate = librosa.load(path)
create_waveplot(data, sampling_rate, emotion)
create_spectrogram(data, sampling_rate, emotion)
Audio(path)
I checked the path file, everything was correct. I can open other wav files. My librosa version is 0.9.1
On my windows,
pathlib.Path('R:/人')
has no problem to give
WindowsPath('R:/人')
However,
pathlib.Path('R:/人').resolve()
gives
--------------------------------------------------------------------------- OSError Traceback (most recent call
last) in
----> 1 pathlib.Path('R:/人').resolve()
~\anaconda3\lib\pathlib.py in resolve(self, strict) 1178 if
self._closed: 1179 self._raise_closed()
-> 1180 s = self._flavour.resolve(self, strict=strict) 1181 if s is None: 1182 # No symlink resolution => for
consistency, raise an error if
~\anaconda3\lib\pathlib.py in resolve(self, path, strict)
203 while True:
204 try:
--> 205 s = self._ext_to_normal(_getfinalpathname(s))
206 except FileNotFoundError:
207 previous_s = s
OSError: [WinError 1] 函数不正确。: 'R:\人'
What is the problem with resolve in pathlib?
On the other hand,
pathlib.Path('R:/人').absolute()
correctly gives
WindowsPath('R:/人')
I updated billiard,celery,kombu,amqp : nothing worked, Please help me resolve this. I am trying to use https://wntr.readthedocs.io/
OSError Traceback (most recent call last)
<ipython-input-9-6ccee6a8a438> in <module>
1 # Simulate hydraulics
2 sim = wntr.sim.EpanetSimulator(wn)
----> 3 results = sim.run_sim()
/opt/anaconda3/lib/python3.8/site-packages/wntr/sim/epanet.py in run_sim(self, file_prefix, save_hyd, use_hyd, hydfile, version)
94 inpfile = file_prefix + '.inp'
95 self._wn.write_inpfile(inpfile, units=self._wn.options.hydraulic.inpfile_units, version=version)
---> 96 enData = wntr.epanet.toolkit.ENepanet(version=version)
97 rptfile = file_prefix + '.rpt'
98 outfile = file_prefix + '.bin'
/opt/anaconda3/lib/python3.8/site-packages/wntr/epanet/toolkit.py in __init__(self, inpfile, rptfile, binfile, version)
155 except Exception as E1:
156 if lib == libnames[-1]:
--> 157 raise E1
158 pass
159 return
/opt/anaconda3/lib/python3.8/site-packages/wntr/epanet/toolkit.py in __init__(self, inpfile, rptfile, binfile, version)
148 elif sys.platform in ['darwin']:
149 libepanet = resource_filename(epanet_toolkit,'Darwin/lib%s.dylib' % lib)
--> 150 self.ENlib = ctypes.cdll.LoadLibrary(libepanet)
151 else:
152 libepanet = resource_filename(epanet_toolkit,'Linux/lib%s.so' % lib)
/opt/anaconda3/lib/python3.8/ctypes/__init__.py in LoadLibrary(self, name)
457
458 def LoadLibrary(self, name):
--> 459 return self._dlltype(name)
460
461 cdll = LibraryLoader(CDLL)
/opt/anaconda3/lib/python3.8/ctypes/__init__.py in __init__(self, name, mode, handle, use_errno, use_last_error, winmode)
379
380 if handle is None:
--> 381 self._handle = _dlopen(self._name, mode)
382 else:
383 self._handle = handle
OSError: dlopen(/opt/anaconda3/lib/python3.8/site-packages/wntr/epanet/Darwin/libepanet22_win32.dylib, 6): image not found
Everything worked earlier. I am using MacOS Sierra 10.13.6
I came across the same issue both with OWA-epanet and epanet-toolkit python libraries.
I solved this by putting the Epanet .dylib file in ~/lib/
This way MacOS is able to find it without messing with the standard dylibs path.
The other option is to place it within your virtual environment folder
I tried to fine-tune VGG16 on my dataset, but stuck on trouble of opening h5py file of VGG16-weights. I don't understand what does this error mean about:
OSError: Unable to open file (Truncated file: eof = 221184, sblock->base_addr = 0, stored_eoa = 58889256)
Does anyone know how to fix it? thanks
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-3-6059faca8ed7> in <module>()
9 K.set_session(sess)
10 input_tensor=Input(shape=(h,w,ch))
---> 11 base_model=VGG16(input_tensor=input_tensor, include_top=False)
12 x_img=base_model.output
13 x_img=AveragePooling2D((7,7))(x_img)
/Users/simin/anaconda/envs/IntroToTensorFlow/lib/python3.5/site-packages/keras/applications/vgg16.py in VGG16(include_top, weights, input_tensor)
144 TF_WEIGHTS_PATH_NO_TOP,
145 cache_subdir='models')
--> 146 model.load_weights(weights_path)
147 if K.backend() == 'theano':
148 convert_all_kernels_in_model(model)
/Users/simin/anaconda/envs/IntroToTensorFlow/lib/python3.5/site-packages/keras/engine/topology.py in load_weights(self, filepath, by_name)
2492 '''
2493 import h5py
-> 2494 f = h5py.File(filepath, mode='r')
2495 if 'layer_names' not in f.attrs and 'model_weights' in f:
2496 f = f['model_weights']
/Users/simin/anaconda/envs/IntroToTensorFlow/lib/python3.5/site-packages/h5py/_hl/files.py in __init__(self, name, mode, driver, libver, userblock_size, swmr, **kwds)
270
271 fapl = make_fapl(driver, libver, **kwds)
--> 272 fid = make_fid(name, mode, userblock_size, fapl, swmr=swmr)
273
274 if swmr_support:
/Users/simin/anaconda/envs/IntroToTensorFlow/lib/python3.5/site-packages/h5py/_hl/files.py in make_fid(name, mode, userblock_size, fapl, fcpl, swmr)
90 if swmr and swmr_support:
91 flags |= h5f.ACC_SWMR_READ
---> 92 fid = h5f.open(name, flags, fapl=fapl)
93 elif mode == 'r+':
94 fid = h5f.open(name, h5f.ACC_RDWR, fapl=fapl)
h5py/_objects.pyx in h5py._objects.with_phil.wrapper (/Users/ilan/minonda/conda-bld/work/h5py/_objects.c:2696)()
h5py/_objects.pyx in h5py._objects.with_phil.wrapper (/Users/ilan/minonda/conda-bld/work/h5py/_objects.c:2654)()
h5py/h5f.pyx in h5py.h5f.open (/Users/ilan/minonda/conda-bld/work/h5py/h5f.c:1942)()
OSError: Unable to open file (Truncated file: eof = 221184, sblock->base_addr = 0, stored_eoa = 58889256)
There is a possibility that the download of the file has failed.
Replacing a file that failed to open with the following file may resolve it.
https://github.com/fchollet/deep-learning-models/releases
My situation,
That file was in the following path.
C:\Users\MyName\.keras\models\vgg16_weights_tf_dim_ordering_tf_kernels.h5
I replaced it and solved it.
because the last time you file download is failed.but the bad file remains in the filepath. so you have to find the bad file.
maybe u can use “find / -name 'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'” to find the path in unix-system.
then delete it
try agian!good luck!!
It is probably because the file download failed or the file is corrupt.
I found those corrupted files in /tmp/.keras/models/vggface/.
My system is Ubuntu 20.04.
import numpy as np
import h5py
with h5py.File("testfile.hdf5", "w-") as f:
arr = np.ones((5,2))
f["my dataset"] = arr
dset = f["my dataset"]
This code runs correctly the first time, but when run a second time, returns the following error:
%run "C:\Users\James\Google Drive\Python Scripts\Python and
HDF5\Chapter3.py"
--------------------------------------------------------------------------- RuntimeError Traceback (most recent call
last) C:\Users\James\Google Drive\Python Scripts\Python and
HDF5\Chapter3.py in ()
6 with h5py.File("testfile.hdf5") as f:
7 arr = np.ones((5,2))
----> 8 f["my dataset"] = arr
9 dset = f["my dataset"]
10
h5py_objects.pyx in h5py._objects.with_phil.wrapper
(C:\pisi\tmp\h5py-2.6.0-2\work\h5py-2.6.0\h5py_objects.c:2696)()
h5py_objects.pyx in h5py._objects.with_phil.wrapper
(C:\pisi\tmp\h5py-2.6.0-2\work\h5py-2.6.0\h5py_objects.c:2654)()
C:\Users\James\AppData\Local\Enthought\Canopy\User\lib\site-packages\h5py_hl\group.py
in setitem(self, name, obj)
291 else:
292 ds = self.create_dataset(None, data=obj, dtype=base.guess_dtype(obj))
--> 293 h5o.link(ds.id, self.id, name, lcpl=lcpl)
294
295 #with_phil
h5py_objects.pyx in h5py._objects.with_phil.wrapper
(C:\pisi\tmp\h5py-2.6.0-2\work\h5py-2.6.0\h5py_objects.c:2696)()
h5py_objects.pyx in h5py._objects.with_phil.wrapper
(C:\pisi\tmp\h5py-2.6.0-2\work\h5py-2.6.0\h5py_objects.c:2654)()
h5py\h5o.pyx in h5py.h5o.link
(C:\pisi\tmp\h5py-2.6.0-2\work\h5py-2.6.0\h5py\h5o.c:3610)()
RuntimeError: Unable to create link (Name already exists)
%run "C:\Users\James\Google Drive\Python Scripts\Python and
HDF5\Chapter3.py"
--------------------------------------------------------------------------- IOError Traceback (most recent call
last) C:\Users\James\Google Drive\Python Scripts\Python and
HDF5\Chapter3.py in ()
4 from timeit import timeit
5
----> 6 with h5py.File("testfile.hdf5", "w-") as f:
7 arr = np.ones((5,2))
8 f["my dataset"] = arr
C:\Users\James\AppData\Local\Enthought\Canopy\User\lib\site-packages\h5py_hl\files.py
in init(self, name, mode, driver, libver, userblock_size, swmr,
**kwds)
270
271 fapl = make_fapl(driver, libver, **kwds)
--> 272 fid = make_fid(name, mode, userblock_size, fapl, swmr=swmr)
273
274 if swmr_support:
C:\Users\James\AppData\Local\Enthought\Canopy\User\lib\site-packages\h5py_hl\files.py
in make_fid(name, mode, userblock_size, fapl, fcpl, swmr)
94 fid = h5f.open(name, h5f.ACC_RDWR, fapl=fapl)
95 elif mode in ['w-', 'x']:
---> 96 fid = h5f.create(name, h5f.ACC_EXCL, fapl=fapl, fcpl=fcpl)
97 elif mode == 'w':
98 fid = h5f.create(name, h5f.ACC_TRUNC, fapl=fapl, fcpl=fcpl)
h5py_objects.pyx in h5py._objects.with_phil.wrapper
(C:\pisi\tmp\h5py-2.6.0-2\work\h5py-2.6.0\h5py_objects.c:2696)()
h5py_objects.pyx in h5py._objects.with_phil.wrapper
(C:\pisi\tmp\h5py-2.6.0-2\work\h5py-2.6.0\h5py_objects.c:2654)()
h5py\h5f.pyx in h5py.h5f.create
(C:\pisi\tmp\h5py-2.6.0-2\work\h5py-2.6.0\h5py\h5f.c:2109)()
IOError: Unable to create file (Unable to open file: name =
'testfile.hdf5', errno = 17, error message = 'file exists', flags =
15, o_flags = 502)
The code and error were run in Canopy // Python 3.5. I also ran it in Spyder and received the same result. I also tried using
with h5py.File("testfile.hdf5", "a") as f:
with no success.
I encountered the exact same error message when I use HDF5Matrix class in keras(v2.2.2). However, I failed to find a mature solution to completely avoid this error when I have multiple training processes which all need to access the same HDF5 data on the disk. Only one process could successfully access this HDF5 data, while all others would report the same error, even though I modified the reading mode from the default r+ to r. I gave up and used the workable solution that keeps multiple copies of HDF5 data and one copy for each training process.
As per http://docs.h5py.org/en/latest/high/file.html, the w- mode is designed to cause the open operation to fail if the file already exists.