I used a pretrained imagenet VGG16 model in keras and add my own Dense on top.
I'm trying to save and load weights from the model i have trained.
the code im using to save the model is
import time
start = time.time()
history = model.fit_generator(generator=train_batches,
epochs=epochs,
steps_per_epoch=steps_train,
#callbacks=callbacks_list,
validation_data=valid_batches,
validation_steps=steps_valid,
shuffle=True)
end = time.time()
model.save("modelvgg.npy")
Let me know if this an incorrect way to do it,or if there is a better way to do it.
but when i try to load them,using this,
def __init__(self, vgg16_npy_path=None, trainable=True):
if vgg16_npy_path is None:
path = inspect.getfile(Vgg16)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "modelvgg.npy")
vgg16_npy_path = path
print(path)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
self.trainable = trainable
print("npy file loaded")
but i get this error:
UnpicklingError Traceback (most recent call last)
~/.local/lib/python3.6/site-packages/numpy/lib/npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
446 try:
--> 447 return pickle.load(fid, **pickle_kwargs)
448 except Exception:
UnpicklingError: invalid load key, 'H'.
During handling of the above exception, another exception occurred:
OSError Traceback (most recent call last)
<ipython-input-5-d099900e8f3b> in <module>
46 labels = tf.placeholder(tf.float32, [batch_size, 2])
47
---> 48 vgg = vgg16.Vgg16()
49 model.build(images)
50 cost = (-1) * tf.reduce_sum(tf.multiply(labels, tf.log(model.prob)), axis=1)
~/Bureau/Grad-CAM_final/model/vgg16.py in __init__(self, vgg16_npy_path, trainable)
18 print(path)
19
---> 20 self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
21 self.trainable = trainable
22 print("npy file loaded")
~/.local/lib/python3.6/site-packages/numpy/lib/npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
448 except Exception:
449 raise IOError(
--> 450 "Failed to interpret file %s as a pickle" % repr(file))
451 finally:
452 if own_fid:
OSError: Failed to interpret file '/home/omri/Bureau/Grad-CAM_final/model/modelvgg.npy' as a pickle
Any suggestions on what i may be doing wrong? Thank you in advance.
This is not the correct way to load a keras model saved as HDF5 (since you saved it with model.save)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
The correct way is to use keras.models.load_model:
from keras.models import load_model
model = load_model('your_file.hdf5')
Related
I've created a model, trained it, and saved it as follows:
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
I later try to load the same model as follows:
dir_to_trained_model = args.output_dir
config_trained = BertConfig.from_pretrained(dir_to_trained_model)
tokenizer = BertTokenizer.from_pretrained(dir_to_trained_model)
model_trained = BertForSequenceClassification.from_pretrained(dir_to_trained_model, config = config_trained)
However, I experience the following error at the final model_trained = ... line:
============================================================
<class 'transformers.tokenization_bert.BertTokenizer'>
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-26-5519b2c94546> in <module>
6 tokenizer = BertTokenizer.from_pretrained(dir_to_trained_model)
7
----> 8 model_trained = BertForSequenceClassification.from_pretrained(dir_to_trained_model, config = config_trained)
/home/s1097572/DNABERT/src/transformers/modeling_utils.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
737 raise RuntimeError(
738 "Error(s) in loading state_dict for {}:\n\t{}".format(
--> 739 model.__class__.__name__, "\n\t".join(error_msgs)
740 )
741 )
RuntimeError: Error(s) in loading state_dict for BertForSequenceClassification:
size mismatch for bert.embeddings.word_embeddings.weight: copying a param with shape torch.Size([4101, 768]) from checkpoint, the shape in current model is torch.Size([30522, 768]).
It seems the model and the config are incompatible, which doesn't make sense given that both source files originated from the same model. I am using Transformers=2.5.0 if that makes a difference. Any help is greatly appreciated.
I am not able to load the BERT model weights.
Code for saving and training the BERT model weights:
# Fetching the pre-trained version
from transformers import BertTokenizer, TFBertModel, BertConfig,TFBertForSequenceClassification
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-uncased')
bert_model = TFBertForSequenceClassification.from_pretrained('bert-base-multilingual-uncased',num_labels=3)
# Training and saving model
import tensorflow as tf
log_dir='tensorboard_data/tb_bert'
model_save_path='./models/bert_model.h5'
callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath='./models/bert_model.h5',save_weights_only=True,monitor='val_loss',mode='min',save_best_only=True),keras.callbacks.TensorBoard(log_dir=log_dir)]
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-5,epsilon=1e-08)
bert_model.compile(loss=loss,optimizer=optimizer,metrics=[metric])
h=bert_model.fit([train_inp,train_mask],train_label,batch_size=8,epochs=2,validation_data=([val_inp,val_mask],val_label),callbacks=callbacks)
Code for loading weights:
As I saved weights only, so First, I fetched the architecture of bert model
from transformers import BertTokenizer, TFBertModel, BertConfig,TFBertForSequenceClassification
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-uncased')
bert_model = TFBertForSequenceClassification.from_pretrained('bert-base-multilingual-uncased',num_labels=3)
model_save_path='/root/data/bert_model.h5'
bert_model.compile(loss=loss,optimizer=optimizer, metrics=[metric])
bert_model.load_weights(model_save_path) # error comes in this line
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-9-857931cc97b8> in <module>()
----> 1 bert_model.load_weights(model_save_path)
1 frames
/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
/usr/local/lib/python3.7/dist-packages/keras/saving/hdf5_format.py in load_weights_from_hdf5_group(f, model)
756 if len(weight_values) != len(symbolic_weights):
757 raise ValueError(
--> 758 f'Weight count mismatch for top-level weights when loading weights '
759 f'from file. '
760 f'Model expects {len(symbolic_weights)} top-level weight(s). '
ValueError: Weight count mismatch for top-level weights when loading weights from file. Model expects 0 top-level weight(s). Received 1 saved top-level weight(s)
I was trying to convert my PyTorch model into TensorFlow lite for mobile. My model was pre-trained DenseNet 169 so I did this:-
import sys
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import onnx
from collections import OrderedDict
import tensorflow as tf
from torch.autograd import Variable
from onnx_tf.backend import prepare
dummy_input = Variable(torch.randn(32, 3, 224, 224))
torch.onnx.export(trained_model, dummy_input, "mymodel.onnx")
model = onnx.load("mymodel.onnx")
tf_rep = prepare(model)
print('inputs:', tf_rep.inputs)
# Output nodes from the model
print('outputs:', tf_rep.outputs)
# All nodes in the model
print('tensor_dict:')
print(tf_rep.tensor_dict)
tf_rep.export_graph("mymodel.pb")
converter = tf.lite.TFLiteConverter.from_frozen_gragh("mymodel.pb/saved_model.pb",
tf_rep.inputs, tf_rep.outputs) # **ERROR HERE**
tflite_model = converter.convert()
open("mymodel.tflite", "wb").write(tflite_model)
HERE IS MY ERROR
AttributeError Traceback (most recent call last)
<ipython-input-37-0abbde392f91> in <module>()
----> 1 converter = tf.lite.TFLiteConverter.from_frozen_gragh("flowers.pb/saved_model.pb", tf_rep.inputs, tf_rep.outputs)
2 tflite_model = converter.convert()
3 open("flowers.tflite", "wb").write(tflite_model)
AttributeError: type object 'TFLiteConverterV2' has no attribute 'from_frozen_gragh'
When I tried with compat.v1 I got the same error but instead of TFLiteConverterV2 I got TFLiteConverter
Thanks, in Advance.
EDIT
So I tried with compat.v1 and fixed the typo in 'from_frozen_gragh' and got this ugly error
---------------------------------------------------------------------------
DecodeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/tensorflow/lite/python/lite.py in from_frozen_graph(cls, graph_def_file, input_arrays, output_arrays, input_shapes)
1804 graph_def = _graph_pb2.GraphDef()
-> 1805 graph_def.ParseFromString(file_content)
1806 except (_text_format.ParseError, DecodeError):
DecodeError: Error parsing message
During handling of the above exception, another exception occurred:
UnicodeDecodeError Traceback (most recent call last)
2 frames
<ipython-input-32-46dac4006b0d> in <module>()
----> 1 tflitconverter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph("flowers.pb/saved_model.pb", tf_rep.inputs, tf_rep.outputs)
2 e_model = converter.convert()
3 open("flowers.tflite", "wb").write(tflite_model)
/usr/local/lib/python3.6/dist-packages/tensorflow/lite/python/lite.py in from_frozen_graph(cls, graph_def_file, input_arrays, output_arrays, input_shapes)
1812 file_content = six.ensure_binary(file_content, "utf-8")
1813 else:
-> 1814 file_content = six.ensure_text(file_content, "utf-8")
1815 graph_def = _graph_pb2.GraphDef()
1816 _text_format.Merge(file_content, graph_def)
/usr/local/lib/python3.6/dist-packages/six.py in ensure_text(s, encoding, errors)
933 """
934 if isinstance(s, binary_type):
--> 935 return s.decode(encoding, errors)
936 elif isinstance(s, text_type):
937 return s
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb7 in position 3: invalid start byte
PLEASE HELP
it is "from_frozen_graph" not "from_frozen_gragh"
You need to use compat.v1 since from_frozen_graph is not available in TF 2.x
I had the same problem as your utf-8 error.
you can define your converter like this:
converter = tf.compat.v1.lite.TFLiteConverter.from_saved_model("mymodel.pb/", signature_keys=['serving_default'])
I'm getting error while preparing List of operators of your serialized torchscript model. What is the issue here? Is it coming when loading it?
Python code
# Dump list of operators used by MobileNetV2:
import torch, yaml
root = '/content/drive/My Drive/Monitoring/'
model = torch.jit.load(root+'model.pt')
ops = torch.jit.export_opnames(model)
with open('MobileNetV2.yaml', 'w') as output:
yaml.dump(ops, output)
Stack Trace
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-39-8b61c35fb898> in <module>()
3
4 root = '/content/drive/My Drive/Monitoring/'
----> 5 model = torch.jit.load(root+'model.pt')
6 ops = torch.jit.export_opnames(model)
7 with open('MobileNetV2.yaml', 'w') as output:
/usr/local/lib/python3.6/dist-packages/torch/jit/_serialization.py in load(f, map_location, _extra_files)
159 cu = torch._C.CompilationUnit()
160 if isinstance(f, str) or isinstance(f, pathlib.Path):
--> 161 cpp_module = torch._C.import_ir_module(cu, f, map_location, _extra_files)
162 else:
163 cpp_module = torch._C.import_ir_module_from_buffer(
RuntimeError: [enforce fail at inline_container.cc:222] . file not found: archive/constants.pkl
I tried to load a Bert model from local directory and it was showing an error
I am using cuda 10.0 version and pytorch 1.6.0
Code to load model:-
output_dir = './ner_model/'
model = BertForTokenClassification.from_pretrained(output_dir)
tokenizer = BertTokenizer.from_pretrained(output_dir)
model.to(device)
Any help would be appreicated
ReadError: invalid header
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
~\anaconda3\envs\env\lib\site-packages\transformers\modeling_utils.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
511 try:
--> 512 state_dict = torch.load(resolved_archive_file, map_location="cpu")
513 except Exception:
~\anaconda3\envs\env\lib\site-packages\torch\serialization.py in load(f, map_location, pickle_module, **pickle_load_args)
385 try:
--> 386 return _load(f, map_location, pickle_module, **pickle_load_args)
387 finally:
~\anaconda3\envs\env\lib\site-packages\torch\serialization.py in _load(f, map_location, pickle_module, **pickle_load_args)
558 # .zip is used for torch.jit.save and will throw an un-pickling error here
--> 559 raise RuntimeError("{} is a zip archive (did you mean to use torch.jit.load()?)".format(f.name))
560 # if not a tarfile, reset file offset and proceed
RuntimeError: ./ner_model/pytorch_model.bin is a zip archive (did you mean to use torch.jit.load()?)
During handling of the above exception, another exception occurred:
OSError Traceback (most recent call last)
<ipython-input-13-770da388c2c8> in <module>
23
24 output_dir = './ner_model/'
---> 25 model = BertForTokenClassification.from_pretrained(output_dir)
26 tokenizer = BertTokenizer.from_pretrained(output_dir)
27 model.to(device)
~\anaconda3\envs\env\lib\site-packages\transformers\modeling_utils.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
513 except Exception:
514 raise OSError(
--> 515 "Unable to load weights from pytorch checkpoint file. "
516 "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
517 )
OSError: Unable to load weights from pytorch checkpoint file. If you tried to load a PyTorch model from a TF 2.0 checkpoint, pleas
e set from_tf=True.