File not found: archive/constants.pkl at Python Pytorch - python

I'm getting error while preparing List of operators of your serialized torchscript model. What is the issue here? Is it coming when loading it?
Python code
# Dump list of operators used by MobileNetV2:
import torch, yaml
root = '/content/drive/My Drive/Monitoring/'
model = torch.jit.load(root+'model.pt')
ops = torch.jit.export_opnames(model)
with open('MobileNetV2.yaml', 'w') as output:
yaml.dump(ops, output)
Stack Trace
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-39-8b61c35fb898> in <module>()
3
4 root = '/content/drive/My Drive/Monitoring/'
----> 5 model = torch.jit.load(root+'model.pt')
6 ops = torch.jit.export_opnames(model)
7 with open('MobileNetV2.yaml', 'w') as output:
/usr/local/lib/python3.6/dist-packages/torch/jit/_serialization.py in load(f, map_location, _extra_files)
159 cu = torch._C.CompilationUnit()
160 if isinstance(f, str) or isinstance(f, pathlib.Path):
--> 161 cpp_module = torch._C.import_ir_module(cu, f, map_location, _extra_files)
162 else:
163 cpp_module = torch._C.import_ir_module_from_buffer(
RuntimeError: [enforce fail at inline_container.cc:222] . file not found: archive/constants.pkl

Related

PAYG tokens - error messages (on both Disco and Stable Diffusion)

I signed up for some pay as you go credit - and to my dismay now receive this error both on Disco AND Stable Diffusion:
FileNotFoundError Traceback (most recent call last)
<ipython-input-4-191981527364> in <module>
38 import py3d_tools as p3d
39
---> 40 from helpers import DepthModel, sampler_fn
41 from k_diffusion.external import CompVisDenoiser
42 from ldm.util import instantiate_from_config
4 frames
/content/MiDaS/midas/backbones/next_vit.py in <module>
6 from .utils import activations, forward_default, get_activation
7
----> 8 file = open("./externals/Next_ViT/classification/nextvit.py", "r")
9 source_code = file.read().replace(" utils", " externals.Next_ViT.classification.utils")
10 exec(source_code)
FileNotFoundError: [Errno 2] No such file or directory: './externals/Next_ViT/classification/nextvit.py'
I also receive this error on Stable:
NameError Traceback (most recent call last)
<ipython-input-5-d64464a7a6a5> in <module>
154 if load_on_run_all and ckpt_valid:
155 local_config = OmegaConf.load(f"{ckpt_config_path}")
--> 156 model = load_model_from_config(local_config, f"{ckpt_path}", half_precision=half_precision)
157 device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
158 model = model.to(device)
<ipython-input-5-d64464a7a6a5> in load_model_from_config(config, ckpt, verbose, device, half_precision)
136 print(f"Global Step: {pl_sd['global_step']}")
137 sd = pl_sd["state_dict"]
--> 138 model = instantiate_from_config(config.model)
139 m, u = model.load_state_dict(sd, strict=False)
140 if len(m) > 0 and verbose:
Despite clearing my Google Drive and reloading Disco and Stable Diffusion, (including the .ckpt file, placed correctly as before in the models folder) the same errors occur.

how can I resolve this key error in this section of program

I have a problem with this section of my code
that it returns to the diffusion analysts of
materials
from pymatgen.analysis.diffusion.analyzer import (
DiffusionAnalyzer,
fit_arrhenius,
get_conversion_factor,
)
import json
from pymatgen.analysis.diffusion.aimd.van_hove import VanHoveAnalysis
%matplotlib inline
data = json.load(open(".../py.pro/mp-1138_LiF.json", "r"))
new_obj = DiffusionAnalyzer.from_dict(data)
vhfunc = VanHoveAnalysis(diffusion_analyzer=new_obj, avg_nsteps=5, ngrid=101, rmax=10.0,
step_skip=5, sigma=0.1, species = ["Li", "F"])
vhfunc.get_3d_plot(mode="self")
vhfunc.get_3d_plot(mode="distinct")
and the key error says this
KeyError Traceback (most recent call last)
<ipython-input-4-17584fdb7b1f> in <module>
3 data = json.load(open("J:/py.pro/mp-1138_LiF.json", "r"))
4
----> 5 new_obj = DiffusionAnalyzer.from_dict(data)
6
7 vhfunc = VanHoveAnalysis(diffusion_analyzer=new_obj, avg_nsteps=5, ngrid=101, rmax=10.0,
~\Anaconda3\lib\site-packages\pymatgen\analysis\diffusion\analyzer.py in from_dict(cls, d)
766 return cls(
767 structure,
--> 768 np.array(d["displacements"]),
769 specie=d["specie"],
770 temperature=d["temperature"],
KeyError: 'displacements'
how it resolves?
does it return to the program?

AttributeError: type object 'TFLiteConverterV2' has no attribute 'from_frozen_gragh'

I was trying to convert my PyTorch model into TensorFlow lite for mobile. My model was pre-trained DenseNet 169 so I did this:-
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import onnx
from collections import OrderedDict
import tensorflow as tf
from torch.autograd import Variable
from onnx_tf.backend import prepare
dummy_input = Variable(torch.randn(32, 3, 224, 224))
torch.onnx.export(trained_model, dummy_input, "mymodel.onnx")
model = onnx.load("mymodel.onnx")
tf_rep = prepare(model)
print('inputs:', tf_rep.inputs)
# Output nodes from the model
print('outputs:', tf_rep.outputs)
# All nodes in the model
print('tensor_dict:')
print(tf_rep.tensor_dict)
tf_rep.export_graph("mymodel.pb")
converter = tf.lite.TFLiteConverter.from_frozen_gragh("mymodel.pb/saved_model.pb",
tf_rep.inputs, tf_rep.outputs) # **ERROR HERE**
tflite_model = converter.convert()
open("mymodel.tflite", "wb").write(tflite_model)
HERE IS MY ERROR
AttributeError Traceback (most recent call last)
<ipython-input-37-0abbde392f91> in <module>()
----> 1 converter = tf.lite.TFLiteConverter.from_frozen_gragh("flowers.pb/saved_model.pb", tf_rep.inputs, tf_rep.outputs)
2 tflite_model = converter.convert()
3 open("flowers.tflite", "wb").write(tflite_model)
AttributeError: type object 'TFLiteConverterV2' has no attribute 'from_frozen_gragh'
When I tried with compat.v1 I got the same error but instead of TFLiteConverterV2 I got TFLiteConverter
Thanks, in Advance.
EDIT
So I tried with compat.v1 and fixed the typo in 'from_frozen_gragh' and got this ugly error
---------------------------------------------------------------------------
DecodeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/tensorflow/lite/python/lite.py in from_frozen_graph(cls, graph_def_file, input_arrays, output_arrays, input_shapes)
1804 graph_def = _graph_pb2.GraphDef()
-> 1805 graph_def.ParseFromString(file_content)
1806 except (_text_format.ParseError, DecodeError):
DecodeError: Error parsing message
During handling of the above exception, another exception occurred:
UnicodeDecodeError Traceback (most recent call last)
2 frames
<ipython-input-32-46dac4006b0d> in <module>()
----> 1 tflitconverter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph("flowers.pb/saved_model.pb", tf_rep.inputs, tf_rep.outputs)
2 e_model = converter.convert()
3 open("flowers.tflite", "wb").write(tflite_model)
/usr/local/lib/python3.6/dist-packages/tensorflow/lite/python/lite.py in from_frozen_graph(cls, graph_def_file, input_arrays, output_arrays, input_shapes)
1812 file_content = six.ensure_binary(file_content, "utf-8")
1813 else:
-> 1814 file_content = six.ensure_text(file_content, "utf-8")
1815 graph_def = _graph_pb2.GraphDef()
1816 _text_format.Merge(file_content, graph_def)
/usr/local/lib/python3.6/dist-packages/six.py in ensure_text(s, encoding, errors)
933 """
934 if isinstance(s, binary_type):
--> 935 return s.decode(encoding, errors)
936 elif isinstance(s, text_type):
937 return s
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb7 in position 3: invalid start byte
PLEASE HELP
it is "from_frozen_graph" not "from_frozen_gragh"
You need to use compat.v1 since from_frozen_graph is not available in TF 2.x
I had the same problem as your utf-8 error.
you can define your converter like this:
converter = tf.compat.v1.lite.TFLiteConverter.from_saved_model("mymodel.pb/", signature_keys=['serving_default'])

OSError: Failed to interpret file '/modelvgg.npy' as a pickle

I used a pretrained imagenet VGG16 model in keras and add my own Dense on top.
I'm trying to save and load weights from the model i have trained.
the code im using to save the model is
import time
start = time.time()
history = model.fit_generator(generator=train_batches,
epochs=epochs,
steps_per_epoch=steps_train,
#callbacks=callbacks_list,
validation_data=valid_batches,
validation_steps=steps_valid,
shuffle=True)
end = time.time()
model.save("modelvgg.npy")
Let me know if this an incorrect way to do it,or if there is a better way to do it.
but when i try to load them,using this,
def __init__(self, vgg16_npy_path=None, trainable=True):
if vgg16_npy_path is None:
path = inspect.getfile(Vgg16)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "modelvgg.npy")
vgg16_npy_path = path
print(path)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
self.trainable = trainable
print("npy file loaded")
but i get this error:
UnpicklingError Traceback (most recent call last)
~/.local/lib/python3.6/site-packages/numpy/lib/npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
446 try:
--> 447 return pickle.load(fid, **pickle_kwargs)
448 except Exception:
UnpicklingError: invalid load key, 'H'.
During handling of the above exception, another exception occurred:
OSError Traceback (most recent call last)
<ipython-input-5-d099900e8f3b> in <module>
46 labels = tf.placeholder(tf.float32, [batch_size, 2])
47
---> 48 vgg = vgg16.Vgg16()
49 model.build(images)
50 cost = (-1) * tf.reduce_sum(tf.multiply(labels, tf.log(model.prob)), axis=1)
~/Bureau/Grad-CAM_final/model/vgg16.py in __init__(self, vgg16_npy_path, trainable)
18 print(path)
19
---> 20 self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
21 self.trainable = trainable
22 print("npy file loaded")
~/.local/lib/python3.6/site-packages/numpy/lib/npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
448 except Exception:
449 raise IOError(
--> 450 "Failed to interpret file %s as a pickle" % repr(file))
451 finally:
452 if own_fid:
OSError: Failed to interpret file '/home/omri/Bureau/Grad-CAM_final/model/modelvgg.npy' as a pickle
Any suggestions on what i may be doing wrong? Thank you in advance.
This is not the correct way to load a keras model saved as HDF5 (since you saved it with model.save)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
The correct way is to use keras.models.load_model:
from keras.models import load_model
model = load_model('your_file.hdf5')

Can't get librosa load a wav file

I got an audio dataset of many wav files and tired to use librosa to edit, but I have trouble reading some certain files by using librosa.load.Could someone help me figure it out?
here is my code:
import librosa
sound_clip = librosa.load('audio/fold1/180937-7-3-10.wav')
print(sound_clip)
here is the error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-5-93fe2f032e98> in <module>()
----> 1 sound_clip = librosa.load('audio/fold1/180937-7-3-10.wav')
2 print(sound_clip)
/home/uri7910/anaconda2/envs/tensorflow011/lib/python2.7/site-packages/librosa/core/audio.pyc in load(path, sr, mono, offset, duration, dtype)
107
108 y = []
--> 109 with audioread.audio_open(os.path.realpath(path)) as input_file:
110 sr_native = input_file.samplerate
111 n_channels = input_file.channels
/home/uri7910/anaconda2/envs/tensorflow011/lib/python2.7/site-packages/audioread/__init__.pyc in audio_open(path)
100 from . import maddec
101 try:
--> 102 return maddec.MadAudioFile(path)
103 except DecodeError:
104 pass
/home/uri7910/anaconda2/envs/tensorflow011/lib/python2.7/site-packages/audioread/maddec.pyc in __init__(self, filename)
24 def __init__(self, filename):
25 self.fp = open(filename, 'rb')
---> 26 self.mf = mad.MadFile(self.fp)
27 if not self.mf.total_time(): # Indicates a failed open.
28 raise UnsupportedError()
AttributeError: 'module' object has no attribute 'MadFile'
The failing line is:
self.mf = mad.MadFile(self.fp)
AttributeError: 'module' object has no attribute 'MadFile'
This looks to be a problem with the pyMad library. Would suggest looking into upgrading or reinstalling. that library. If that fails you might want to raise a bug.

Categories

Resources