I am not able to load the BERT model weights.
Code for saving and training the BERT model weights:
# Fetching the pre-trained version
from transformers import BertTokenizer, TFBertModel, BertConfig,TFBertForSequenceClassification
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-uncased')
bert_model = TFBertForSequenceClassification.from_pretrained('bert-base-multilingual-uncased',num_labels=3)
# Training and saving model
import tensorflow as tf
log_dir='tensorboard_data/tb_bert'
model_save_path='./models/bert_model.h5'
callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath='./models/bert_model.h5',save_weights_only=True,monitor='val_loss',mode='min',save_best_only=True),keras.callbacks.TensorBoard(log_dir=log_dir)]
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-5,epsilon=1e-08)
bert_model.compile(loss=loss,optimizer=optimizer,metrics=[metric])
h=bert_model.fit([train_inp,train_mask],train_label,batch_size=8,epochs=2,validation_data=([val_inp,val_mask],val_label),callbacks=callbacks)
Code for loading weights:
As I saved weights only, so First, I fetched the architecture of bert model
from transformers import BertTokenizer, TFBertModel, BertConfig,TFBertForSequenceClassification
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-uncased')
bert_model = TFBertForSequenceClassification.from_pretrained('bert-base-multilingual-uncased',num_labels=3)
model_save_path='/root/data/bert_model.h5'
bert_model.compile(loss=loss,optimizer=optimizer, metrics=[metric])
bert_model.load_weights(model_save_path) # error comes in this line
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-9-857931cc97b8> in <module>()
----> 1 bert_model.load_weights(model_save_path)
1 frames
/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
/usr/local/lib/python3.7/dist-packages/keras/saving/hdf5_format.py in load_weights_from_hdf5_group(f, model)
756 if len(weight_values) != len(symbolic_weights):
757 raise ValueError(
--> 758 f'Weight count mismatch for top-level weights when loading weights '
759 f'from file. '
760 f'Model expects {len(symbolic_weights)} top-level weight(s). '
ValueError: Weight count mismatch for top-level weights when loading weights from file. Model expects 0 top-level weight(s). Received 1 saved top-level weight(s)
Related
I've created a model, trained it, and saved it as follows:
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
I later try to load the same model as follows:
dir_to_trained_model = args.output_dir
config_trained = BertConfig.from_pretrained(dir_to_trained_model)
tokenizer = BertTokenizer.from_pretrained(dir_to_trained_model)
model_trained = BertForSequenceClassification.from_pretrained(dir_to_trained_model, config = config_trained)
However, I experience the following error at the final model_trained = ... line:
============================================================
<class 'transformers.tokenization_bert.BertTokenizer'>
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-26-5519b2c94546> in <module>
6 tokenizer = BertTokenizer.from_pretrained(dir_to_trained_model)
7
----> 8 model_trained = BertForSequenceClassification.from_pretrained(dir_to_trained_model, config = config_trained)
/home/s1097572/DNABERT/src/transformers/modeling_utils.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
737 raise RuntimeError(
738 "Error(s) in loading state_dict for {}:\n\t{}".format(
--> 739 model.__class__.__name__, "\n\t".join(error_msgs)
740 )
741 )
RuntimeError: Error(s) in loading state_dict for BertForSequenceClassification:
size mismatch for bert.embeddings.word_embeddings.weight: copying a param with shape torch.Size([4101, 768]) from checkpoint, the shape in current model is torch.Size([30522, 768]).
It seems the model and the config are incompatible, which doesn't make sense given that both source files originated from the same model. I am using Transformers=2.5.0 if that makes a difference. Any help is greatly appreciated.
here is my training data
train_audio_path = 'C:/Users/user/OneDrive/Bureau/input1/train/audio1/'
all_wave = []
all_label = []
for label in labels:
print(label)
waves = [f for f in os.listdir(train_audio_path + '/'+ label) if f.endswith('.wav')]
for wav in waves:
samples, sample_rate = librosa.load(train_audio_path + '/' + label + '/' + wav, sr = 16000)
if(len(samples)>=16000 or len(samples)<=16000) :
all_wave.append(samples)
all_label.append(label)
enter image description here
Here is my model:
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
metric = 'val_accuracy'
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10, min_delta=0.0001)
mc = ModelCheckpoint('best_model.hdf5', monitor=metric, verbose=1, save_best_only=True, mode='max')
# Display model architecture summary
history=model.fit(x_tr, y_tr ,epochs=100, callbacks=[es,mc], batch_size=32, validation_data=(x_val,y_val))
Here is the error I'm getting:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_8428/2388281761.py in <module>
1 # Display model architecture summary
----> 2 history=model.fit(x_tr, y_tr ,epochs=100, callbacks=[es,mc], batch_size=32, validation_data=(x_val,y_val))
~\anaconda3\lib\site-packages\keras\utils\traceback_utils.py in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
~\anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
104 dtype = dtypes.as_dtype(dtype).as_datatype_enum
105 ctx.ensure_initialized()
--> 106 return ops.EagerTensor(value, ctx.device_name, dtype)
107
108
ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type numpy.ndarray).
I've tried googling the error but i didn't find a suitable solution to the problem
++++ Any help would be greatly appreciated ++++
I was trying to run
text_field = Field(tokenize='spacy', lower=True, include_lengths=True, batch_first=True)
However, the error shows :
OSError Traceback (most recent call last)
<ipython-input-72-1ac550316aec> in <module>
2
3 label_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
----> 4 text_field = Field(tokenize='spacy', lower=True, include_lengths=True, batch_first=True)
5 fields = [('label', label_field), ('title', text_field), ('text', text_field), ('titletext', text_field)]
6
~/opt/anaconda3/lib/python3.8/site-packages/torchtext/data/field.py in __init__(self, sequential, use_vocab, init_token, eos_token, fix_length, dtype, preprocessing, postprocessing, lower, tokenize, tokenizer_language, include_lengths, batch_first, pad_token, unk_token, pad_first, truncate_first, stop_words, is_target)
161 # in case the tokenizer isn't picklable (e.g. spacy)
162 self.tokenizer_args = (tokenize, tokenizer_language)
--> 163 self.tokenize = get_tokenizer(tokenize, tokenizer_language)
164 self.include_lengths = include_lengths
165 self.batch_first = batch_first
~/opt/anaconda3/lib/python3.8/site-packages/torchtext/data/utils.py in get_tokenizer(tokenizer, language)
112 try:
113 import spacy
--> 114 spacy = spacy.load(language)
115 return partial(_spacy_tokenize, spacy=spacy)
116 except ImportError:
~/opt/anaconda3/lib/python3.8/site-packages/spacy/__init__.py in load(name, vocab, disable, exclude, config)
49 RETURNS (Language): The loaded nlp object.
50 """
---> 51 return util.load_model(
52 name, vocab=vocab, disable=disable, exclude=exclude, config=config
53 )
~/opt/anaconda3/lib/python3.8/site-packages/spacy/util.py in load_model(name, vocab, disable, exclude, config)
424 return load_model_from_path(name, **kwargs) # type: ignore[arg-type]
425 if name in OLD_MODEL_SHORTCUTS:
--> 426 raise IOError(Errors.E941.format(name=name, full=OLD_MODEL_SHORTCUTS[name])) # type: ignore[index]
427 raise IOError(Errors.E050.format(name=name))
428
OSError: [E941] Can't find model 'en'. It looks like you're trying to load a model from a shortcut, which is obsolete as of spaCy v3.0. To load the model, use its full name instead:
nlp = spacy.load("en_core_web_sm")
For more details on the available models, see the models directory: https://spacy.io/models. If you want to create a blank model, use spacy.blank: nlp = spacy.blank("en")
I even tried to do python -m spacy download en but still doesn't run on my jupyter notebook. Does anyone know how to fix this problem?
I used a pretrained imagenet VGG16 model in keras and add my own Dense on top.
I'm trying to save and load weights from the model i have trained.
the code im using to save the model is
import time
start = time.time()
history = model.fit_generator(generator=train_batches,
epochs=epochs,
steps_per_epoch=steps_train,
#callbacks=callbacks_list,
validation_data=valid_batches,
validation_steps=steps_valid,
shuffle=True)
end = time.time()
model.save("modelvgg.npy")
Let me know if this an incorrect way to do it,or if there is a better way to do it.
but when i try to load them,using this,
def __init__(self, vgg16_npy_path=None, trainable=True):
if vgg16_npy_path is None:
path = inspect.getfile(Vgg16)
path = os.path.abspath(os.path.join(path, os.pardir))
path = os.path.join(path, "modelvgg.npy")
vgg16_npy_path = path
print(path)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
self.trainable = trainable
print("npy file loaded")
but i get this error:
UnpicklingError Traceback (most recent call last)
~/.local/lib/python3.6/site-packages/numpy/lib/npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
446 try:
--> 447 return pickle.load(fid, **pickle_kwargs)
448 except Exception:
UnpicklingError: invalid load key, 'H'.
During handling of the above exception, another exception occurred:
OSError Traceback (most recent call last)
<ipython-input-5-d099900e8f3b> in <module>
46 labels = tf.placeholder(tf.float32, [batch_size, 2])
47
---> 48 vgg = vgg16.Vgg16()
49 model.build(images)
50 cost = (-1) * tf.reduce_sum(tf.multiply(labels, tf.log(model.prob)), axis=1)
~/Bureau/Grad-CAM_final/model/vgg16.py in __init__(self, vgg16_npy_path, trainable)
18 print(path)
19
---> 20 self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
21 self.trainable = trainable
22 print("npy file loaded")
~/.local/lib/python3.6/site-packages/numpy/lib/npyio.py in load(file, mmap_mode, allow_pickle, fix_imports, encoding)
448 except Exception:
449 raise IOError(
--> 450 "Failed to interpret file %s as a pickle" % repr(file))
451 finally:
452 if own_fid:
OSError: Failed to interpret file '/home/omri/Bureau/Grad-CAM_final/model/modelvgg.npy' as a pickle
Any suggestions on what i may be doing wrong? Thank you in advance.
This is not the correct way to load a keras model saved as HDF5 (since you saved it with model.save)
self.data_dict = np.load(vgg16_npy_path, encoding='latin1').item()
The correct way is to use keras.models.load_model:
from keras.models import load_model
model = load_model('your_file.hdf5')
It's my first post on stakcoverflow because I don't find any clue to solve this message "'PipelinedRDD' object has no attribute '_jdf'" that appear when I call trainer.fit on my train dataset to create a neural network model under Spark in Python
here is my code
from pyspark import SparkContext
from pyspark.ml.classification import MultilayerPerceptronClassifier, MultilayerPerceptronClassificationModel
from pyspark.mllib.feature import StandardScaler
from pyspark.mllib.regression import LabeledPoint
from pyspark.sql import SQLContext
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
### Import data in Spark ###
RDD_RAWfileWH= sc.textFile("c:/Anaconda2/Cognet/Data_For_Cognet_ready.csv")
header = RDD_RAWfileWH.first()
# Delete header from RAWData
RDD_RAWfile1 = RDD_RAWfileWH.filter(lambda x: x != header)
# Split each line of the RDD
RDD_RAWfile = RDD_RAWfile1.map(lambda line:[float(x) for x in line.split(',')])
FinalData = RDD_RAWfile.map(lambda row: LabeledPoint(row[0],[row[1:]]))
(trainingData, testData) = FinalData.randomSplit([0.7, 0.3])
layers = [15, 2, 3]
# create the trainer and set its parameters
trainer = MultilayerPerceptronClassifier(maxIter=100, layers=layers, blockSize=128,seed=1234)
# train the model
model = trainer.fit(trainingData)
and the trace
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-28-123dce2b085a> in <module>()
46 trainer = MultilayerPerceptronClassifier(maxIter=100, layers=layers, blockSize=128,seed=1234)
47 # train the model
---> 48 model = trainer.fit(trainingData)
49 # compute accuracy on the test set
50 # result = model.transform(test)
C:\Users\piod7321\spark-1.6.1-bin-hadoop2.6\python\pyspark\ml\pipeline.pyc in fit(self, dataset, params)
67 return self.copy(params)._fit(dataset)
68 else:
---> 69 return self._fit(dataset)
70 else:
71 raise ValueError("Params must be either a param map or a list/tuple of param maps, "
C:\Users\piod7321\spark-1.6.1-bin-hadoop2.6\python\pyspark\ml\wrapper.pyc in _fit(self, dataset)
131
132 def _fit(self, dataset):
--> 133 java_model = self._fit_java(dataset)
134 return self._create_model(java_model)
135
C:\Users\piod7321\spark-1.6.1-bin-hadoop2.6\python\pyspark\ml\wrapper.pyc in _fit_java(self, dataset)
128 """
129 self._transfer_params_to_java()
--> 130 return self._java_obj.fit(dataset._jdf)
131
132 def _fit(self, dataset):
AttributeError: 'PipelinedRDD' object has no attribute '_jdf'
I'am not an expert on Spark so If anyone know what is this jdf attribute and how to solve this issue it will be very helpfull for me.
thanks a lot