I tried to load a Bert model from local directory and it was showing an error
I am using cuda 10.0 version and pytorch 1.6.0
Code to load model:-
output_dir = './ner_model/'
model = BertForTokenClassification.from_pretrained(output_dir)
tokenizer = BertTokenizer.from_pretrained(output_dir)
model.to(device)
Any help would be appreicated
ReadError: invalid header
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
~\anaconda3\envs\env\lib\site-packages\transformers\modeling_utils.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
511 try:
--> 512 state_dict = torch.load(resolved_archive_file, map_location="cpu")
513 except Exception:
~\anaconda3\envs\env\lib\site-packages\torch\serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
385 try:
--> 386 return _load(f, map_location, pickle_module, **pickle_load_args)
387 finally:
~\anaconda3\envs\env\lib\site-packages\torch\serialization.py in _load(f, map_location, pickle_module, **pickle_load_args)
558 # .zip is used for torch.jit.save and will throw an un-pickling error here
--> 559 raise RuntimeError("{} is a zip archive (did you mean to use torch.jit.load()?)".format(f.name))
560 # if not a tarfile, reset file offset and proceed
RuntimeError: ./ner_model/pytorch_model.bin is a zip archive (did you mean to use torch.jit.load()?)
During handling of the above exception, another exception occurred:
OSError Traceback (most recent call last)
<ipython-input-13-770da388c2c8> in <module>
23
24 output_dir = './ner_model/'
---> 25 model = BertForTokenClassification.from_pretrained(output_dir)
26 tokenizer = BertTokenizer.from_pretrained(output_dir)
27 model.to(device)
~\anaconda3\envs\env\lib\site-packages\transformers\modeling_utils.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
513 except Exception:
514 raise OSError(
--> 515 "Unable to load weights from pytorch checkpoint file. "
516 "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
517 )
OSError: Unable to load weights from pytorch checkpoint file. If you tried to load a PyTorch model from a TF 2.0 checkpoint, pleas
e set from_tf=True.
Related
I have created an image classification model using pre-trained model inceptionV3. After I trained the model on my dataset I saved the model using joblib. When trying to load the model Im getting error "Unsuccessful TensorSliceReader constructor: Failed to find any matching files for ram://1ea4479d-6a25-4562-965a-428f7eb33342/variables/variables
You may be trying to load on a different device from the computational device. Consider setting the experimental_io_device option in tf.saved_model.LoadOptions to the io_device such as '/job:localhost'."
Any idea why is this message appearing or is it because you cant use joblib to save a model made from pre-trained model. Below is the code and the error
import joblib
joblib.dump(inceptionv3_model, 'inceptV3_model.pkl')
model_inceptionv3 = joblib.load('inceptV3_model.pkl')
FileNotFoundError Traceback (most recent call last)
<ipython-input-14-8ed26b03fd7d> in <module>
1 # loading the model
----> 2 model_inceptionv3 = joblib.load('C:/Users/Indranil/inceptV3_model.pkl')
~\anaconda3\lib\site-packages\joblib\numpy_pickle.py in load(filename, mmap_mode)
583 return load_compatibility(fobj)
584
--> 585 obj = _unpickle(fobj, filename, mmap_mode)
586 return obj
~\anaconda3\lib\site-packages\joblib\numpy_pickle.py in _unpickle(fobj, filename, mmap_mode)
502 obj = None
503 try:
--> 504 obj = unpickler.load()
505 if unpickler.compat_mode:
506 warnings.warn("The file '%s' has been generated with a "
~\anaconda3\lib\pickle.py in load(self)
1208 raise EOFError
1209 assert isinstance(key, bytes_types)
-> 1210 dispatch[key[0]](self)
1211 except _Stop as stopinst:
1212 return stopinst.value
~\anaconda3\lib\pickle.py in load_reduce(self)
1585 args = stack.pop()
1586 func = stack[-1]
-> 1587 stack[-1] = func(*args)
1588 dispatch[REDUCE[0]] = load_reduce
1589
~\anaconda3\lib\site-packages\keras\saving\pickle_utils.py in deserialize_model_from_bytecode(serialized_model)
46 with tf.io.gfile.GFile(dest_path, "wb") as f:
47 f.write(archive.extractfile(name).read())
---> 48 model = save_module.load_model(temp_dir)
49 tf.io.gfile.rmtree(temp_dir)
50 return model
~\anaconda3\lib\site-packages\keras\utils\traceback_utils.py in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
~\anaconda3\lib\site-packages\tensorflow\python\saved_model\load.py in load_internal(export_dir, tags, options, loader_cls, filters)
975 ckpt_options, options, filters)
976 except errors.NotFoundError as err:
--> 977 raise FileNotFoundError(
978 str(err) + "\n You may be trying to load on a different device "
979 "from the computational device. Consider setting the "
FileNotFoundError: Unsuccessful TensorSliceReader constructor: Failed to find any matching files for ram://1ea4479d-6a25-4562-965a-428f7eb33342/variables/variables
You may be trying to load on a different device from the computational device. Consider setting the `experimental_io_device` option in `tf.saved_model.LoadOptions` to the io_device such as '/job:localhost'.
I just want to load a .h5 file exported from PyTorch, back into PyTorch.
Here's my code:
import torch
loaded_model = torch.load('/Users/me/tmp_model.h5')
Which spits out the following error:
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-6-bfb05a2f6d1e> in <module>
1 import torch
----> 2 loaded_model = torch.load('/Users/arielelkin/tmp_model.h5')
~/.pyenv/versions/3.8.6/lib/python3.8/site-packages/torch/serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
605 opened_file.seek(orig_position)
606 return torch.jit.load(opened_file)
--> 607 return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
608 return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
609
~/.pyenv/versions/3.8.6/lib/python3.8/site-packages/torch/serialization.py in _load(zip_file, map_location, pickle_module, pickle_file, **pickle_load_args)
880 unpickler = UnpicklerWrapper(data_file, **pickle_load_args)
881 unpickler.persistent_load = persistent_load
--> 882 result = unpickler.load()
883
884 torch._utils._validate_loaded_sparse_tensors()
~/.pyenv/versions/3.8.6/lib/python3.8/site-packages/torch/serialization.py in find_class(self, mod_name, name)
873 def find_class(self, mod_name, name):
874 mod_name = load_module_mapping.get(mod_name, mod_name)
--> 875 return super().find_class(mod_name, name)
876
877 # Load the data (which may in turn use `persistent_load` to load tensors)
ModuleNotFoundError: No module named 'model'
I know that the model was exported thus:
ckpoint = 'version_131/epoch=171-step=1375.ckpt'
model = NSNetModel.load_from_checkpoint(Path('/Users/dev/Documents/models/'+ckpoint))
torch.save(model, 'tmp_model.h5')
What's the issue here? Am I missing an import?
When trying to load a pytorch model it gives the following attribute error
model = torch.load('../input/melanoma-model/melanoma_model_0.pth')
model = model.to(device)
model.eval()
AttributeError Traceback (most recent call
last) in
1 arch = EfficientNet.from_pretrained('efficientnet-b2')
2 model = Net(arch=arch)
----> 3 torch.load('../input/melanoma-model/melanoma_model_0.pth')
4 model = model.to(device)
5 model.eval()
/opt/conda/lib/python3.7/site-packages/torch/serialization.py in
load(f, map_location, pickle_module, **pickle_load_args)
591 return torch.jit.load(f)
592 return _load(opened_zipfile, map_location, pickle_module, **pickle_load_args)
--> 593 return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
594
595
/opt/conda/lib/python3.7/site-packages/torch/serialization.py in
_legacy_load(f, map_location, pickle_module, **pickle_load_args)
771 unpickler = pickle_module.Unpickler(f, **pickle_load_args)
772 unpickler.persistent_load = persistent_load
--> 773 result = unpickler.load()
774
775 deserialized_storage_keys = pickle_module.load(f, **pickle_load_args)
AttributeError: Can't get attribute 'Identity' on <module
'efficientnet_pytorch.utils' from
'/opt/conda/lib/python3.7/site-packages/efficientnet_pytorch/utils.py'>
First you need a model class to load the parameters from the .pth into. And you are missing one step:
model = Model() # the model class (yours has probably another name)
model.load_state_dict(torch.load('../input/melanoma-model/melanoma_model_0.pth'))
model = model.to(device)
model.eval()
There you go, I hope that solved your problem!
I trained a model in a cluster, downloaded it (pkl format) and tried to load locally. I know that sklearn's version of joblib was used to save a model mymodel.pkl (but I don't know which exactly version...).
from sklearn.externals import joblib
print(joblib.__version__)
model = joblib.load("mymodel.pkl")
I use the version 0.13.0 of sklearn's joblib locally.
This is the error that I got:
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-100-d0a3c42e5c53> in <module>
3 print(joblib.__version__)
4
----> 5 model = joblib.load("mymodel.pkl")
~\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\externals\joblib\numpy_pickle.py in load(filename, mmap_mode)
596 return load_compatibility(fobj)
597
--> 598 obj = _unpickle(fobj, filename, mmap_mode)
599
600 return obj
~\AppData\Local\Continuum\anaconda3\lib\site-packages\sklearn\externals\joblib\numpy_pickle.py in _unpickle(fobj, filename, mmap_mode)
524 obj = None
525 try:
--> 526 obj = unpickler.load()
527 if unpickler.compat_mode:
528 warnings.warn("The file '%s' has been generated with a "
~\AppData\Local\Continuum\anaconda3\lib\pickle.py in load(self)
1083 raise EOFError
1084 assert isinstance(key, bytes_types)
-> 1085 dispatch[key[0]](self)
1086 except _Stop as stopinst:
1087 return stopinst.value
KeyError: 239
Update:
Also I tried, but got an error AttributeError: 'str' object has no attribute 'readable':
with io.BufferedReader("mymodel.pkl") as pickle_file:
model = pickle.load(pickle_file)
You tried to dump it with joblib.dump('pipeline','mymodel.pkl'). This only dumped the string 'pipeline'! Not your actual pipeline object.
Dump it correctly with:
joblib.dump(pipeline,'mymodel.pkl')
...then read back with:
model = joblib.load('mymodel.pkl')
I used a pretrained imagenet VGG16 model in keras and add my own Dense on top.
I'm trying to save and load weights from the model i have trained.
the code im using to save the model is
import time
start = time.time()
history = model.fit_generator(generator=train_batches,
epochs=epochs,
steps_per_epoch=steps_train,
#callbacks=callbacks_list,
validation_data=valid_batches,
validation_steps=steps_valid,
shuffle=True)
end = time.time()
model.save("modelvgg.npy")
Let me know if this an incorrect way to do it,or if there is a better way to do it.
but when i try to load them,using this,
def __init__(self, vgg16_npy_path=None, trainable=True):
if vgg16_npy_path is None:
path = inspect.getfile(Vgg16)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "modelvgg.npy")
vgg16_npy_path = path
print(path)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
self.trainable = trainable
print("npy file loaded")
but i get this error:
UnpicklingError Traceback (most recent call last)
~/.local/lib/python3.6/site-packages/numpy/lib/npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
446 try:
--> 447 return pickle.load(fid, **pickle_kwargs)
448 except Exception:
UnpicklingError: invalid load key, 'H'.
During handling of the above exception, another exception occurred:
OSError Traceback (most recent call last)
<ipython-input-5-d099900e8f3b> in <module>
46 labels = tf.placeholder(tf.float32, [batch_size, 2])
47
---> 48 vgg = vgg16.Vgg16()
49 model.build(images)
50 cost = (-1) * tf.reduce_sum(tf.multiply(labels, tf.log(model.prob)), axis=1)
~/Bureau/Grad-CAM_final/model/vgg16.py in __init__(self, vgg16_npy_path, trainable)
18 print(path)
19
---> 20 self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
21 self.trainable = trainable
22 print("npy file loaded")
~/.local/lib/python3.6/site-packages/numpy/lib/npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
448 except Exception:
449 raise IOError(
--> 450 "Failed to interpret file %s as a pickle" % repr(file))
451 finally:
452 if own_fid:
OSError: Failed to interpret file '/home/omri/Bureau/Grad-CAM_final/model/modelvgg.npy' as a pickle
Any suggestions on what i may be doing wrong? Thank you in advance.
This is not the correct way to load a keras model saved as HDF5 (since you saved it with model.save)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
The correct way is to use keras.models.load_model:
from keras.models import load_model
model = load_model('your_file.hdf5')