I am trying to create two custom functions f1_metric and auc_metric in Keras. The f1_metric works, but the auc not, and I receive different errors. Here is my code:
def f1_metric(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
recall = true_positives / (possible_positives + K.epsilon())
f1_val = 2*(precision*recall)/(precision+recall+K.epsilon())
return f1_val
def auc_metric(y_true, yhat):
yhat = yhat[:, 1]
# calculate roc curves
fpr, tpr, thresholds = roc_curve(y_true, yhat)
auc=auc(tpr,fpr)
return auc
Here are my compile and fit codes:
opt = SGD(lr=0.01,momentum=0.9)
model.compile(loss='binary_crossentropy', optimizer=opt,metrics=['accuracy']) #
ca = SnapshotEnsemble(n_epochs, n_cycles, 0.01)
# fit model
history=model.fit(trainX, trainy, validation_data=(testX, testy), epochs=n_epochs,
verbose='auto', callbacks=[ca],batch_size=32)
Any Idea how to solve it? I want to define a similar function as I did for f1_metric.
From comments
Approximates the AUC (Area under the curve) of the ROC or PR curves.
tf.keras.metrics.AUC(
num_thresholds=200, curve='ROC',
summation_method='interpolation', name=None, dtype=None,
thresholds=None, multi_label=False, num_labels=None, label_weights=None,
from_logits=False
)
For more details you can refer here
(paraphrased from Captain Trojan)
Related
I wrote the custom metric below for y_true and y_pred of the shape (img_h, img_w, num_classes) (the predictions and true masks are one hot encoded and this shape is excluding the batch dimension) for multiclass semantic segmentation. This metric returns a mean f1 score (average of f1 scores for all classes in the predicted mask) from the y_true and y_pred values. But on fitting the model using model.fit() method of tensorflow, the training log always show nan as the value. I have written some custom metrics before but there is usually some value shown. Please help me understand my mistake.
The code:
class MeanF1Score(tf.keras.metrics.Metric):
def __init__(self):
super(MeanF1Score, self).__init__(name='mean_f1_score', dtype=tf.float32)
self.metric = self.add_weight(name='metric', initializer='zeros')
self.sample = self.add_weight(name='sample', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.argmax(y_true, axis=-1)
y_true = tf.reshape(y_true, shape=[-1])
y_pred = tf.argmax(y_pred, axis=-1)
y_pred = tf.reshape(y_pred, shape=[-1])
cm = tf.math.confusion_matrix(y_true, y_pred)
tp = tf.linalg.diag_part(cm)
tp_fp = tf.reduce_sum(cm, axis=1)
tp_fn = tf.reduce_sum(cm, axis=0)
precision = tp / tp_fp
recall = tp / tp_fn
f1 = (2.0 * precision * recall) / (precision + recall)
f1 = tf.cast(f1, dtype=tf.float32)
f1 = tf.reduce_sum(f1)
self.metric.assign_add(f1)
self.sample.assign_add(1.0)
def result(self):
return self.metric / self.sample
def reset_state(self):
self.metric = self.add_weight(name='metric', initializer='zeros')
self.sample = self.add_weight(name='sample', initializer='zeros')
I have trained a model in Google Colab and trying to load the model in my local machine to use on the web app. But as soon as I load_model() it shows this error:
Unable to restore custom object of type _tf_keras_metric. Please make sure that any custom layers are included in the `custom_objects` arg when calling `load_model()` and make sure that all layers implement `get_config` and `from_config`.
I have used CategoricalAccuracy as a metric in the model and I tried to pass it as a custom object but I get the same error.
Code used in Web App
def load_text_model():
model = keras.models.load_model("." + url_for('static', filename='models/text_model'), custom_objects={'CategoricalAccuracy': tf.keras.metrics.CategoricalAccuracy(name="accuracy")})
return model
Extra information if needed: I am making a multi-class text classification model. I have used bert english uncased model from thub as preprocessor and encoder. Below are the metrics I have used. If more code or context is required, I will provide it.
Code used while training model
def balanced_recall(y_true, y_pred):
"""This function calculates the balanced recall metric
recall = TP / (TP + FN)
"""
recall_by_class = 0
# iterate over each predicted class to get class-specific metric
for i in range(y_pred.shape[1]):
y_pred_class = y_pred[:, i]
y_true_class = y_true[:, i]
true_positives = K.sum(K.round(K.clip(y_true_class * y_pred_class, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true_class, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
recall_by_class = recall_by_class + recall
return recall_by_class / y_pred.shape[1]
def balanced_precision(y_true, y_pred):
"""This function calculates the balanced precision metric
precision = TP / (TP + FP)
"""
precision_by_class = 0
# iterate over each predicted class to get class-specific metric
for i in range(y_pred.shape[1]):
y_pred_class = y_pred[:, i]
y_true_class = y_true[:, i]
true_positives = K.sum(K.round(K.clip(y_true_class * y_pred_class, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred_class, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
precision_by_class = precision_by_class + precision
# return average balanced metric for each class
return precision_by_class / y_pred.shape[1]
def balanced_f1_score(y_true, y_pred):
"""This function calculates the F1 score metric"""
precision = balanced_precision(y_true, y_pred)
recall = balanced_recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
i = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
x = preprocessor(i)
x = encoder(x)
x = tf.keras.layers.Dropout(0.2, name="dropout")(x['default'])
x = tf.keras.layers.Dense(num_classes, activation='softmax', name="output")(x)
model = tf.keras.Model(i, x)
METRICS = [
tf.keras.metrics.CategoricalAccuracy(name="accuracy"),
balanced_recall,
balanced_precision,
balanced_f1_score
]
I would like to add the custom metrics and save the model based on the following code.
segmentation
I don't know how to make the above code.
I tried to make it but the following error has occurred.
Unable to restore custom object of type _tf_keras_metric currently. Please make sure that the layer implements `get_config`and `from_config` when saving. In addition, please use the `custom_objects` arg when calling `load_model()`.
Code:
# metric part
def precision(y_true, y_pred):
y_true = K.ones_like(y_true)
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
# compile part
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy', precision])
# save part
model.save('mymodel')
I found this function here
How to calculate F1 Macro in Keras? but i am not sure how i can write the same way for specificity? I am using tensorflow backend for keras.
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
I tried this solution but it gives error,
def compute_binary_specificity(y_pred, y_true):
"""Compute the confusion matrix for a set of predictions.
Returns
-------
out : the specificity
"""
TN = np.logical_and(K.eval(y_true) == 0, K.eval(y_pred) == 0)
FP = np.logical_and(K.eval(y_true) == 0, K.eval(y_pred) == 1)
# as Keras Tensors
TN = K.sum(K.variable(TN))
FP = K.sum(K.variable(FP))
specificity = TN / (TN + FP + K.epsilon())
return specificity
Error: InvalidArgumentError: You must feed a value for placeholder tensor 'dense_95_input' with dtype float and shape [?,140]
[[Node: dense_95_input = Placeholderdtype=DT_FLOAT, shape=[?,140], _device="/job:localhost/replica:0/task:0/device:CPU:0"]]
and points here
---> TN = np.logical_and(K.eval(y_true) == 0, K.eval(y_pred) == 0)
I found a version in stackoverflow
from keras import backend as K
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
model.compile(loss='binary_crossentropy',
optimizer= "adam",
metrics=[f1])
but could I use sklearn f1_score in creating custom metrics?
I want to use the average of f1_score macro and f1_score micro, could anybody help me? thanks
I think you can use the code you have showed above during training each batch. Because which is calculating each batch's F1 Score, and you can see the log printed in your terminal.
1/13 [=>............................] - ETA: 4s - loss: 0.2646 - f1: 0.2927
2/13 [===>..........................] - ETA: 4s - loss: 0.2664 - f1: 0.1463
...
13/13 [==============================] - 7s 505ms/step - loss: 0.2615 - f1: 0.1008 - val_loss: 0.2887 - val_f1: 0.1464
If you use fit method and want to calculate each epoch's F1, you should try to code like below.
class Metrics(Callback):
'''
Defined your personal callback
'''
def on_train_begin(self, logs={}):
self.val_f1s = []
self.val_recalls = []
self.val_precisions = []
def on_epoch_end(self, epoch, logs={}):
# val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()
val_predict = np.argmax(np.asarray(self.model.predict(self.validation_data[0])), axis=1)
# val_targ = self.validation_data[1]
val_targ = np.argmax(self.validation_data[1], axis=1)
_val_f1 = f1_score(val_targ, val_predict, average='macro')
# _val_recall = recall_score(val_targ, val_predict)
# _val_precision = precision_score(val_targ, val_predict)
self.val_f1s.append(_val_f1)
# self.val_recalls.append(_val_recall)
# self.val_precisions.append(_val_precision)
# print('— val_f1: %f — val_precision: %f — val_recall %f' %(_val_f1, _val_precision, _val_recall))
print(' — val_f1:', _val_f1)
return
use your callback fit method.
metrics = Metrics()
model.fit_generator(generator=generator_train,
steps_per_epoch=len(generator_train),
validation_data=generator_val,
validation_steps=len(generator_val),
epochs=epochs,
callbacks=[metrics])
There're tips to notice:
If you use fit_generator() method to train, you can only use the code you showed. Otherways, if using fit() method, you can try Callback function.
There all!