str object has no attribute 'decode' - python

BACKBONE = 'vgg16'
preprocess_input = sm.get_preprocessing(BACKBONE)
# preprocess input
x_train_new = preprocess_input(x_train)
x_test_new = preprocess_input(x_test)
# define model
model_resnet_backbone = sm.Unet(BACKBONE,encoder_weights='imagenet', classes=n_classes, activation='softmax')
metrics=['accuracy', jacard]
# compile keras model with defined optimozer, loss and metrics
#model_resnet_backbone.compile(optimizer='adam', loss=focal_loss, metrics=metrics)
model_resnet_backbone.compile(optimizer='adam', loss='categorical_crossentropy', metrics=metrics)
print(model_resnet_backbone.summary())
history_tf=model_resnet_backbone.fit(x_train_new,
y_train,
batch_size=16,
epochs=1,
verbose=1,
validation_data=(x_test_new, y_test))
I want to train using resnet34. But [this error][1] is coming, what should I do?
AttributeError Traceback (en son çağrı) () 8 9 # define model -
--> 10 model_resnet_backbone =
sm.Unet(BACKBONE,encoder_weights='imagenet',
sınıflar=n_classes, activation='softmax')
9 kare /tensorflow-1.15.2/python3.7/keras/engine/saving.py
load_weights_from_hdf5_group(f, Layer, reshape) 1181 """ 1182
f.attrs'de 'keras_version' ise: -> 1183 orijinal_keras_version
= f. attrs['keras_version'].decode('utf8') 1184 başka: 1185
orijinal_keras_version = '1'
AttributeError: 'str' nesnesinin 'decode' özniteliği yok

Related

Cannot add CRF layer on top of BERT in keras for NER

I am facing an unknown issue while training my BERT-CRF model for NER. I am using keras.contrib for the CRF model.
Here are the imported libraries.
!pip install transformers
!pip install git+https://www.github.com/keras-team/keras-contrib.git
import pandas as pd
import numpy as np
from transformers import TFBertModel, BertTokenizer, BertConfig
import tensorflow as tf
from tensorflow import keras
from keras_contrib.layers import CRF
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
Code for the model creation.
input_ids = keras.layers.Input(shape=(MAX_LEN,), dtype=tf.int32)
token_type_ids = keras.layers.Input(shape=(MAX_LEN,), dtype=tf.int32)
attention_mask = keras.layers.Input(shape=(MAX_LEN,), dtype=tf.int32)
bert_output = bert(
[input_ids,
attention_mask,
token_type_ids]
)[0]
bert_output = keras.layers.Dropout(0.3)(bert_output)
dense_layer_output = keras.layers.Dense(num_classes+1, activation='softmax', name='output')(bert_output)
crf = CRF(num_classes)
outputs = crf(dense_layer_output)
model = keras.Model(
inputs=[input_ids, token_type_ids, attention_mask],
outputs=[outputs],
)
model.compile(
loss=crf.loss_function,
metrics=[crf.accuracy],
optimizer=keras.optimizers.Adam(5e-5)
)
model.fit(
x_train,
y_train,
epochs=1,
verbose=1,
batch_size=32,
validation_data=(x_test, y_test)
)
While trying to train the model I am getting this error. I cannot understand from where it is originating and why.
WARNING:tensorflow:The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model.They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`).
WARNING:tensorflow:The parameter `return_dict` cannot be set in graph mode and will always be set to `True`.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-18-f369b38eb91d> in <module>()
5 verbose=1,
6 batch_size=32,
----> 7 validation_data=(x_test, y_test)
8 )
9 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
975 except Exception as e: # pylint:disable=broad-except
976 if hasattr(e, "ag_error_metadata"):
--> 977 raise e.ag_error_metadata.to_exception(e)
978 else:
979 raise
AttributeError: in user code:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py:805 train_function *
return step_function(self, iterator)
/usr/local/lib/python3.7/dist-packages/keras_contrib/losses/crf_losses.py:54 crf_loss *
crf, idx = y_pred._keras_history[:2]
AttributeError: 'Tensor' object has no attribute '_keras_history'
I have read on the internet that keras.contrib is depricated but I don't know any other way how to use a CRF layer on top of BERT. If there is a better way of doing it in keras then please suggest me.
I don't know whether this question is making sense or not but any help would be appreciated.
Thanks in advance!
The easiest way is to use the CRF layer of the TensorFlow addons. Then utilize the output of that to calculate the loss.
import tensorflow_addons as tfa
crf = tfa.layers.CRF(len(num_labels)+1)
Further, you can utilize it by creating your own Model class too for model creation.
from tensorflow_addons.text.crf import crf_log_likelihood
def unpack_data(data):
if len(data) == 2:
return data[0], data[1], None
elif len(data) == 3:
return data
else:
raise TypeError("Expected data to be a tuple of size 2 or 3.")
class ModelWithCRFLoss(tf.keras.Model):
"""Wrapper around the base model for custom training logic."""
def __init__(self, base_model):
super().__init__()
self.base_model = base_model
def call(self, inputs):
return self.base_model(inputs)
def compute_loss(self, x, y, sample_weight, training=False):
y_pred = self(x, training=training)
_, potentials, sequence_length, chain_kernel = y_pred
# we now add the CRF loss:
crf_loss = -crf_log_likelihood(potentials, y, sequence_length, chain_kernel)[0]
if sample_weight is not None:
crf_loss = crf_loss * sample_weight
return tf.reduce_mean(crf_loss), sum(self.losses)
def train_step(self, data):
x, y, sample_weight = unpack_data(data)
with tf.GradientTape() as tape:
crf_loss, internal_losses = self.compute_loss(
x, y, sample_weight, training=True
)
total_loss = crf_loss + internal_losses
gradients = tape.gradient(total_loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return {"crf_loss": crf_loss, "internal_losses": internal_losses}
def test_step(self, data):
x, y, sample_weight = unpack_data(data)
crf_loss, internal_losses = self.compute_loss(x, y, sample_weight)
return {"crf_loss_val": crf_loss, "internal_losses_val": internal_losses}
You can write along these lines of code
decoded_sequence, potentials, sequence_length, chain_kernel = crf(dense_layer_output, mask=attention_mask)
base_model = tf.keras.Model(
inputs=[input_ids, attention_mask],
outputs=crf_layer_outputs,
)
model = ModelWithCRFLoss(base_model)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=5e-3, epsilon=1e-08),
metrics=tf.metrics.SparseCategoricalAccuracy(),
)

Using CalibratedClassifierCV returns error: TypeError: predict_proba() missing 1 required positional argument: 'x'

I am trying to train a model using Sequential model of Keras, and then calibrate it using scikit-learn's CalibratedClassifierCV. For this I use the KerasClassifier wrapper. Here is the code I use:
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=5, min_lr=0.000001)
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10)
def create_model():
model_1 = Sequential()
n_cols = X_train_1.shape[1]
model_1.add(Dense(10, activation="selu", kernel_initializer="lecun_normal",input_shape=(n_cols,)))
model_1.add(Dense(10, activation="selu", kernel_initializer="lecun_normal"))
model_1.add(Dense(10, activation="selu", kernel_initializer="lecun_normal"))
model_1.add(Dense(10, activation="selu", kernel_initializer="lecun_normal"))
model_1.add(Dense(1, activation='sigmoid'))
opt = keras.optimizers.Nadam(lr=0.0001)
loss = tf.keras.losses.BinaryCrossentropy(reduction='sum')
model_1.compile(optimizer=opt, loss=loss)
return model_1
X_train_1, X_test_1, y_train_1, y_test_1, w_train_1, w_test_1 = train_test_split(scaled_X_1, y_1, w_1, test_size=0.35, random_state=42)
model_1 = KerasClassifier(build_fn=create_model, epochs=5, batch_size=5000, verbose=1)
history_1 = model_1.fit(X_train_1, y_train_1, callbacks=[reduce_lr, es], epochs=5, validation_split=0.35, batch_size=5000, sample_weight=w_train_1, verbose=1)
plt.plot(history_1.history['loss'])
plt.plot(history_1.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
calibrator_1 = CalibratedClassifierCV(model_1, cv='prefit')
calibrator_1.fit(X_test_1, y_test_1, sample_weight = w_test_1)
As you see, I clearly call the instance of CalibratedClassifierCV to calibrator_1 and then use fit(). Despite this I get this error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-21-1cab7be4f6d6> in <module>
46 calibrator_1 = CalibratedClassifierCV(model_1, cv='prefit')
47
---> 48 calibrator_1.fit(X_test_1, y_test_1, sample_weight = w_test_1)
~/.local/lib/python3.6/site-packages/sklearn/calibration.py in fit(self, X, y, sample_weight)
263 pred_method = _get_prediction_method(base_estimator)
264 n_classes = len(self.classes_)
--> 265 predictions = _compute_predictions(pred_method, X, n_classes)
266
267 calibrated_classifier = _fit_calibrator(
~/.local/lib/python3.6/site-packages/sklearn/calibration.py in _compute_predictions(pred_method, X, n_classes)
499 (X.shape[0], 1).
500 """
--> 501 predictions = pred_method(X=X)
502 if hasattr(pred_method, '__name__'):
503 method_name = pred_method.__name__
TypeError: predict_proba() missing 1 required positional argument: 'x'
Does anyone spot any errors here?
This seems like an issue with the newer version of scikit-learn (0.24). I installed the older 0.23 version and it runs well with the same exact code.

Get 'function' object has no attribute 'loss' when doing GridsearchCV

I want to try GridsearchCV on my model, my import is :
from keras import models
from keras import layers
from keras import regularizers
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
my code is:
def build_model(X_train = X_train,neurons=4,optimizer='Adam'):
model = models.Sequential()
model.add(layers.Dense(X_train.shape[1], kernel_regularizer=regularizers.l2(0.001),
activation='relu', input_shape=(X_train.shape[1],)))
model.add(layers.BatchNormalization())
model.add(layers.Dense(neurons, kernel_regularizer=regularizers.l2(0.001), activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizer,loss='binary_crossentropy',metrics=['accuracy'])
return build_model
model = KerasClassifier(build_fn=build_model, verbose=1)
# define the grid search parameters
batch_size = [16, 32, 64]
epochs = [50, 100]
param_grid = dict(batch_size=batch_size, epochs=epochs)
# search the grid
grid = GridSearchCV(estimator=model,
param_grid=param_grid,
cv=10,
verbose=2)
grid_result = grid.fit(X_train, y_train)
but I get a bug as below:
AttributeError Traceback (most recent call last)
<ipython-input-93-2eb813d3aab7> in <module>
12 verbose=2) # include n_jobs=-1 if you are using CPU
13
---> 14 grid_result = grid.fit(X_train, y_train)
15
16 print(model)
/anaconda3/envs/lance/lib/python3.7/site-packages/keras/wrappers/scikit_learn.py in fit(self, x, y, sample_weight, **kwargs)
208 if sample_weight is not None:
209 kwargs['sample_weight'] = sample_weight
--> 210 return super(KerasClassifier, self).fit(x, y, **kwargs)
211
212 def predict(self, x, **kwargs):
/anaconda3/envs/lance/lib/python3.7/site-packages/keras/wrappers/scikit_learn.py in fit(self, x, y, **kwargs)
141 self.model = self.build_fn(**self.filter_sk_params(self.build_fn))
142
--> 143 loss_name = self.model.loss
144 if hasattr(loss_name, '__name__'):
145 loss_name = loss_name.__name__
AttributeError: 'function' object has no attribute 'loss'
I can't understand what the bug is, and I'm sure the data processing is correct because it goes well without grid search, did I do something wrong?
At the end of the build_model function, you write return build_model. This returns a reference to the function itself, not to the model object you've been building so far. I'm pretty sure you want return model instead.

ValueError: Unknown loss function:focal_loss_fixed when loading model with my custom loss function

I designed my own loss function. However when trying to revert to the best model encountered during training with
model = load_model("lc_model.h5")
I got the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-105-9d09ef163b0a> in <module>
23
24 # revert to the best model encountered during training
---> 25 model = load_model("lc_model.h5")
C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\saving.py in load_model(filepath, custom_objects, compile)
417 f = h5dict(filepath, 'r')
418 try:
--> 419 model = _deserialize_model(f, custom_objects, compile)
420 finally:
421 if opened_new_file:
C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\saving.py in _deserialize_model(f, custom_objects, compile)
310 metrics=metrics,
311 loss_weights=loss_weights,
--> 312 sample_weight_mode=sample_weight_mode)
313
314 # Set optimizer weights.
C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py in compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, target_tensors, **kwargs)
137 loss_functions = [losses.get(l) for l in loss]
138 else:
--> 139 loss_function = losses.get(loss)
140 loss_functions = [loss_function for _ in range(len(self.outputs))]
141 self.loss_functions = loss_functions
C:\ProgramData\Anaconda3\lib\site-packages\keras\losses.py in get(identifier)
131 if isinstance(identifier, six.string_types):
132 identifier = str(identifier)
--> 133 return deserialize(identifier)
134 if isinstance(identifier, dict):
135 return deserialize(identifier)
C:\ProgramData\Anaconda3\lib\site-packages\keras\losses.py in deserialize(name, custom_objects)
112 module_objects=globals(),
113 custom_objects=custom_objects,
--> 114 printable_module_name='loss function')
115
116
C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\generic_utils.py in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name)
163 if fn is None:
164 raise ValueError('Unknown ' + printable_module_name +
--> 165 ':' + function_name)
166 return fn
167 else:
ValueError: Unknown loss function:focal_loss_fixed
Here is the neural network :
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
model = create_model(x_train.shape[1], y_train.shape[1])
epochs = 35
batch_sz = 64
print("Beginning model training with batch size {} and {} epochs".format(batch_sz, epochs))
checkpoint = ModelCheckpoint("lc_model.h5", monitor='val_acc', verbose=0, save_best_only=True, mode='auto', period=1)
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.constraints import maxnorm
def create_model(input_dim, output_dim):
print(output_dim)
# create model
model = Sequential()
# input layer
model.add(Dense(100, input_dim=input_dim, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
# hidden layer
model.add(Dense(60, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(0.2))
# output layer
model.add(Dense(output_dim, activation='softmax'))
# Compile model
# model.compile(loss='categorical_crossentropy', loss_weights=None, optimizer='adam', metrics=['accuracy'])
model.compile(loss=focal_loss(alpha=1), loss_weights=None, optimizer='adam', metrics=['accuracy'])
return model
# train the model
history = model.fit(x_train.as_matrix(),
y_train.as_matrix(),
validation_split=0.2,
epochs=epochs,
batch_size=batch_sz, # Can I tweak the batch here to get evenly distributed data ?
verbose=2,
class_weight = weights, # class_weight tells the model to "pay more attention" to samples from an under-represented fraud class.
callbacks=[checkpoint])
# revert to the best model encountered during training
model = load_model("lc_model.h5")
And here is my loss function:
import tensorflow as tf
def focal_loss(gamma=2., alpha=4.):
gamma = float(gamma)
alpha = float(alpha)
def focal_loss_fixed(y_true, y_pred):
"""Focal loss for multi-classification
FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t)
Notice: y_pred is probability after softmax
gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper
d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x)
Focal Loss for Dense Object Detection
https://arxiv.org/abs/1708.02002
Arguments:
y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls]
y_pred {tensor} -- model's output, shape of [batch_size, num_cls]
Keyword Arguments:
gamma {float} -- (default: {2.0})
alpha {float} -- (default: {4.0})
Returns:
[tensor] -- loss.
"""
epsilon = 1.e-9
y_true = tf.convert_to_tensor(y_true, tf.float32)
y_pred = tf.convert_to_tensor(y_pred, tf.float32)
model_out = tf.add(y_pred, epsilon)
ce = tf.multiply(y_true, -tf.log(model_out))
weight = tf.multiply(y_true, tf.pow(tf.subtract(1., model_out), gamma))
fl = tf.multiply(alpha, tf.multiply(weight, ce))
reduced_fl = tf.reduce_max(fl, axis=1)
return tf.reduce_mean(reduced_fl)
return focal_loss_fixed
# model.compile(loss=focal_loss(alpha=1), optimizer='nadam', metrics=['accuracy'])
# model.fit(X_train, y_train, epochs=3, batch_size=1000)
You have to load the custom_objects of focal_loss_fixed as shown below:
model = load_model("lc_model.h5", custom_objects={'focal_loss_fixed': focal_loss()})
However, if you wish to just perform inference with your model and not further optimization or training your model, you can simply wish to ignore the loss function like this:
model = load_model("lc_model.h5", compile=False)
The answer by #Prasad is great, but I would like to add a little explanation and a little correction:
while mentioning your custom loss function in the custom_objects dictionary you don't have to call your loss function, as it can give some parameter missing errors.
# Instead of this
model = load_model("lc_model.h5", custom_objects={'focal_loss_fixed': focal_loss()})
# ty this without calling your loss function
model = load_model("lc_model.h5", custom_objects={'focal_loss_fixed': focal_loss})
Also, the thing I would like to add here is that you have to use the name of your custom loss function as a key and your function object as its value in custom_objects. I know this is very basic but the thing to mention but I hope this will be helpful to someone.

Transfer learning on keras model always gives same predictions

I'm trying to train an image classifier using keras applications module. When I run predictions on validation set, all images are predicted as the same class. It is not always the same class, it varies during training. I'm using MobileNetV2 with weights from ImageNet but I also tried other models with same result.
I've tried using model from TensorFlow hub like described in this tutorial: https://www.tensorflow.org/beta/tutorials/images/hub_with_keras and it worked fine, so it is not a data set issue.
My code snippet:
image_size = 224
batch_size = 32
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=tf.keras.applications.mobilenet_v2.preprocess_input)
validation_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
preprocessing_function=tf.keras.applications.mobilenet_v2.preprocess_input)
train_generator = train_datagen.flow_from_directory(training_data_dir,
target_size=(image_size, image_size),
batch_size=batch_size)
validation_generator = train_datagen.flow_from_directory(validation_data_dir,
target_size=(image_size, image_size),
batch_size=batch_size)
IMG_SHAPE = (image_size, image_size, 3)
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights="imagenet")
base_model.trainable = False
model = tf.keras.Sequential([
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(train_generator.num_classes, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=0.001),
loss="categorical_crossentropy",
metrics=["accuracy"])
model.summary()
batch_stats = CollectBatchStats()
epoch_stats = CollectEpochStats(model, validation_generator)
checkpoint = tf.keras.callbacks.ModelCheckpoint(...)
epochs = 10
steps_per_epoch = train_generator.n // train_generator.batch_size
validation_steps = validation_generator.n // validation_generator.batch_size
history = model.fit_generator(train_generator,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
callbacks=[batch_stats, epoch_stats, checkpoint],
workers=4,
validation_data=validation_generator,
validation_steps=validation_steps)
Issue resolved: in my code I had following lines after model compilation:
sess = keras_backend.get_session()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
After removing them everything works fine.

Categories

Resources