how to create specificity custom metric for Keras Neural Nets - python

I found this function here
How to calculate F1 Macro in Keras? but i am not sure how i can write the same way for specificity? I am using tensorflow backend for keras.
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
I tried this solution but it gives error,
def compute_binary_specificity(y_pred, y_true):
"""Compute the confusion matrix for a set of predictions.
Returns
-------
out : the specificity
"""
TN = np.logical_and(K.eval(y_true) == 0, K.eval(y_pred) == 0)
FP = np.logical_and(K.eval(y_true) == 0, K.eval(y_pred) == 1)
# as Keras Tensors
TN = K.sum(K.variable(TN))
FP = K.sum(K.variable(FP))
specificity = TN / (TN + FP + K.epsilon())
return specificity
Error: InvalidArgumentError: You must feed a value for placeholder tensor 'dense_95_input' with dtype float and shape [?,140]
[[Node: dense_95_input = Placeholderdtype=DT_FLOAT, shape=[?,140], _device="/job:localhost/replica:0/task:0/device:CPU:0"]]
and points here
---> TN = np.logical_and(K.eval(y_true) == 0, K.eval(y_pred) == 0)

Related

Custom keras metric returning nan as result

I wrote the custom metric below for y_true and y_pred of the shape (img_h, img_w, num_classes) (the predictions and true masks are one hot encoded and this shape is excluding the batch dimension) for multiclass semantic segmentation. This metric returns a mean f1 score (average of f1 scores for all classes in the predicted mask) from the y_true and y_pred values. But on fitting the model using model.fit() method of tensorflow, the training log always show nan as the value. I have written some custom metrics before but there is usually some value shown. Please help me understand my mistake.
The code:
class MeanF1Score(tf.keras.metrics.Metric):
def __init__(self):
super(MeanF1Score, self).__init__(name='mean_f1_score', dtype=tf.float32)
self.metric = self.add_weight(name='metric', initializer='zeros')
self.sample = self.add_weight(name='sample', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.argmax(y_true, axis=-1)
y_true = tf.reshape(y_true, shape=[-1])
y_pred = tf.argmax(y_pred, axis=-1)
y_pred = tf.reshape(y_pred, shape=[-1])
cm = tf.math.confusion_matrix(y_true, y_pred)
tp = tf.linalg.diag_part(cm)
tp_fp = tf.reduce_sum(cm, axis=1)
tp_fn = tf.reduce_sum(cm, axis=0)
precision = tp / tp_fp
recall = tp / tp_fn
f1 = (2.0 * precision * recall) / (precision + recall)
f1 = tf.cast(f1, dtype=tf.float32)
f1 = tf.reduce_sum(f1)
self.metric.assign_add(f1)
self.sample.assign_add(1.0)
def result(self):
return self.metric / self.sample
def reset_state(self):
self.metric = self.add_weight(name='metric', initializer='zeros')
self.sample = self.add_weight(name='sample', initializer='zeros')

Tensoflow: Unable to restore custom object of type _tf_keras_metric

I have trained a model in Google Colab and trying to load the model in my local machine to use on the web app. But as soon as I load_model() it shows this error:
Unable to restore custom object of type _tf_keras_metric. Please make sure that any custom layers are included in the `custom_objects` arg when calling `load_model()` and make sure that all layers implement `get_config` and `from_config`.
I have used CategoricalAccuracy as a metric in the model and I tried to pass it as a custom object but I get the same error.
Code used in Web App
def load_text_model():
model = keras.models.load_model("." + url_for('static', filename='models/text_model'), custom_objects={'CategoricalAccuracy': tf.keras.metrics.CategoricalAccuracy(name="accuracy")})
return model
Extra information if needed: I am making a multi-class text classification model. I have used bert english uncased model from thub as preprocessor and encoder. Below are the metrics I have used. If more code or context is required, I will provide it.
Code used while training model
def balanced_recall(y_true, y_pred):
"""This function calculates the balanced recall metric
recall = TP / (TP + FN)
"""
recall_by_class = 0
# iterate over each predicted class to get class-specific metric
for i in range(y_pred.shape[1]):
y_pred_class = y_pred[:, i]
y_true_class = y_true[:, i]
true_positives = K.sum(K.round(K.clip(y_true_class * y_pred_class, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true_class, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
recall_by_class = recall_by_class + recall
return recall_by_class / y_pred.shape[1]
def balanced_precision(y_true, y_pred):
"""This function calculates the balanced precision metric
precision = TP / (TP + FP)
"""
precision_by_class = 0
# iterate over each predicted class to get class-specific metric
for i in range(y_pred.shape[1]):
y_pred_class = y_pred[:, i]
y_true_class = y_true[:, i]
true_positives = K.sum(K.round(K.clip(y_true_class * y_pred_class, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred_class, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
precision_by_class = precision_by_class + precision
# return average balanced metric for each class
return precision_by_class / y_pred.shape[1]
def balanced_f1_score(y_true, y_pred):
"""This function calculates the F1 score metric"""
precision = balanced_precision(y_true, y_pred)
recall = balanced_recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
i = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
x = preprocessor(i)
x = encoder(x)
x = tf.keras.layers.Dropout(0.2, name="dropout")(x['default'])
x = tf.keras.layers.Dense(num_classes, activation='softmax', name="output")(x)
model = tf.keras.Model(i, x)
METRICS = [
tf.keras.metrics.CategoricalAccuracy(name="accuracy"),
balanced_recall,
balanced_precision,
balanced_f1_score
]

How to create an AUC custom function in Keras?

I am trying to create two custom functions f1_metric and auc_metric in Keras. The f1_metric works, but the auc not, and I receive different errors. Here is my code:
def f1_metric(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
recall = true_positives / (possible_positives + K.epsilon())
f1_val = 2*(precision*recall)/(precision+recall+K.epsilon())
return f1_val
def auc_metric(y_true, yhat):
yhat = yhat[:, 1]
# calculate roc curves
fpr, tpr, thresholds = roc_curve(y_true, yhat)
auc=auc(tpr,fpr)
return auc
Here are my compile and fit codes:
opt = SGD(lr=0.01,momentum=0.9)
model.compile(loss='binary_crossentropy', optimizer=opt,metrics=['accuracy']) #
ca = SnapshotEnsemble(n_epochs, n_cycles, 0.01)
# fit model
history=model.fit(trainX, trainy, validation_data=(testX, testy), epochs=n_epochs,
verbose='auto', callbacks=[ca],batch_size=32)
Any Idea how to solve it? I want to define a similar function as I did for f1_metric.
From comments
Approximates the AUC (Area under the curve) of the ROC or PR curves.
tf.keras.metrics.AUC(
num_thresholds=200, curve='ROC',
summation_method='interpolation', name=None, dtype=None,
thresholds=None, multi_label=False, num_labels=None, label_weights=None,
from_logits=False
)
For more details you can refer here
(paraphrased from Captain Trojan)

How to define a custom f1 metric in Keras

I'm defining a custom F1 metric in keras for a multiclass classification problem (in particular n_classes = 4 so the output layer has 4 neurons and a softmax activation function). The idea is to keep track of the true positives, false negatives and false positives so as to gradually update the f1 score batch after batch. The code is the following:
def compute_confusion_matrix(true, pred, K):
result = tf.zeros((K, K), dtype=tf.int32)
for i in range(len(true)):
result = tf.tensor_scatter_nd_add(tensor = result, indices=tf.constant([[true[i], pred[i]]]),
updates=tf.constant([1]))
return result
def f1_function(y_true, y_pred):
k = 4
y_pred_lab = np.argmax(y_pred, axis=1)
conf_mat= compute_confusion_matrix(y_true, y_pred_lab, K = k)
tp = tf.linalg.tensor_diag_part(conf_mat)
fp = tf.reduce_sum(conf_mat, axis = 0) - tp
fn = tf.reduce_sum(conf_mat, axis = 1) - tp
support = tf.reduce_sum(conf_mat, axis = 1)
return tp, fp, fn, support
The f1_function returns the true positives, false positives, false negatives and the support of each class exploiting the confusion matrix computed through the compute_confusion_matrix function. Even though these functions work when called separately, problems arise when called during the model fit.
The custom metric is defined by subclassing keras.metrics.Metric as follows:
class F1Metric(keras.metrics.Metric):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.f1_fn = f1_function
self.tp_count = self.add_weight("tp_count", initializer="zeros", shape = (4,))
self.fp_count = self.add_weight("fp_count", initializer="zeros", shape = (4,))
self.fn_count = self.add_weight("fn_count", initializer="zeros", shape = (4,))
self.support_total = self.add_weight("support_total", initializer = "zeros", shape = (4,))
def update_state(self, y_true, y_pred, sample_weight=None):
tp, fp, fn, support = self.f1_fn(y_true, y_pred)
print(tp)
self.tp_count.assign_add(tf.cast(tp, dtype=tf.float32))
self.fp_count.assign_add(tf.cast(fp, dtype=tf.float32))
self.fn_count.assign_add(tf.cast(fn, dtype=tf.float32))
self.support_total.assign_add(tf.cast(support, dtype=tf.float32))
def result(self):
precisions = self.tp_count / (self.tp_count + self.fp_count)
recalls = self.tp_count / (self.tp_count + self.fn_count)
f1 = tf.constant(2, dtype=tf.float32) * (precisions*recalls) / (precisions + recalls)
weighted_f1 = (f1 * self.support_total) / tf.reduce_sum(tf.cast(self.support_total, dtype=tf.float32))
return weighted_f1
when i use this metric in model.fit I get this error: TypeError: Scalar tensor has no len().
Any explanation for this problem? Thanks.
EDIT:
The problem above was due to the type of y_true passed to the f1_function which was <class 'tensorflow.python.framework.ops.EagerTensor'>. So I transformed it into a 1D array by doing: y_true = np.ravel(y_true).
However, during the fit it gives me the following error close to the end of the first epoch:
"Cannot assign to variable tp_count:0 due to variable shape (4,) and value shape () are incompatible."
The only thing i noticed is that the length of y_true and y_pred is no longer the same as the batch size (32) but less than that. That limit should be given by the size of the training set so it shouldn't be the cause of the problem. Any idea?

TensorFlow 2 custom loss: "No gradients provided for any variable" error

I have an image segmentation problem I have to solve in TensorFlow 2.
In particular I have a training set composed by aerial images paired with their respective masks. In a mask the terrain is colored in black and the buildings are colored in white. The purpose is to predict the mask for the images in the test set.
I use a UNet with a final Conv2DTranspose with 1 filter and a sigmoid activation function. The prediction is made in the following way on the output of the final sigmoid layer: if y_pred>0.5, then it's a building, otherwise it's the background.
I want to implement a dice loss, so I wrote the following function
def dice_loss(y_true, y_pred):
print("[dice_loss] y_pred=",y_pred,"y_true=",y_true)
y_pred = tf.cast(y_pred > 0.5, tf.float32)
y_true = tf.cast(y_true, tf.float32)
numerator = 2 * tf.reduce_sum(y_true * y_pred)
denominator = tf.reduce_sum(y_true + y_pred)
return 1 - numerator / denominator
which I pass to TensorFlow in the following way:
loss = dice_loss
optimizer = tf.keras.optimizers.Adam(learning_rate=config.learning_rate)
metrics = [my_IoU, 'acc']
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
but at training time TensorFlow throw me the following error:
ValueError: No gradients provided for any variable:
The problem is in your loss function (obviously). Particularly, the following operation.
y_pred = tf.cast(y_pred > 0.5, tf.float32)
This is not a differentiable operation. Which results in Gradients being None. Change your loss function to the following and it will work.
def dice_loss(y_true, y_pred):
print("[dice_loss] y_pred=",y_pred,"y_true=",y_true)
y_true = tf.cast(y_true, tf.float32)
numerator = 2 * tf.reduce_sum(y_true * y_pred)
denominator = tf.reduce_sum(y_true + y_pred)
return 1 - numerator / denominator

Categories

Resources