ImportError when importing metric from sklearn - python

When I am trying to import a metric from sklearn, I get the following error:
from sklearn.metrics import mean_absolute_percentage_error
ImportError: cannot import name 'mean_absolute_percentage_error' from 'sklearn.metrics'
/Users/carter/opt/anaconda3/lib/python3.8/site-packages/sklearn/metrics/__init__.py)
I have used conda update all, and reinstalled scikit-learn to no avail. Any other reasons this might happen and solutions?

The function mean_absolute_percentage_error is new in scikit-learn version 0.24 as noted in the documentation.
As of December 2020, the latest version of scikit-learn available from Anaconda is v0.23.2, so that's why you're not able to import mean_absolute_percentage_error.
You could try installing the latest version from source instead, or implement the function you need yourself. The source is available here if you'd like to take a look.

The answer above is the right one. For those who cannot upgrade/install from source, below is the required code.
The function itself relies on other functions - one defined in the same module and others is from sklearn.utils.validation.
Here is the required code I pulled from the source - if anyone needs it (and I hope I am not violating any license):
from sklearn.utils.validation import check_consistent_length, check_array
def mean_absolute_percentage_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute percentage error regression loss.
Note here that we do not represent the output as a percentage in range
[0, 100]. Instead, we represent it in range [0, 1/eps]. Read more in the
:ref:`User Guide <mean_absolute_percentage_error>`.
.. versionadded:: 0.24
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
multioutput : {'raw_values', 'uniform_average'} or array-like
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
If input is list then the shape must be (n_outputs,).
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats in the range [0, 1/eps]
If multioutput is 'raw_values', then mean absolute percentage error
is returned for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAPE output is non-negative floating point. The best value is 0.0.
But note the fact that bad predictions can lead to arbitarily large
MAPE values, especially if some y_true values are very close to zero.
Note that we return a large value instead of `inf` when y_true is zero.
Examples
--------
>>> from sklearn.metrics import mean_absolute_percentage_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_percentage_error(y_true, y_pred)
0.3273...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_percentage_error(y_true, y_pred)
0.5515...
>>> mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.3, 0.7])
0.6198...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
check_consistent_length(y_true, y_pred, sample_weight)
epsilon = np.finfo(np.float64).eps
mape = np.abs(y_pred - y_true) / np.maximum(np.abs(y_true), epsilon)
output_errors = np.average(mape,
weights=sample_weight, axis=0)
if isinstance(multioutput, str):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def _check_reg_targets(y_true, y_pred, multioutput, dtype="numeric"):
"""Check that y_true and y_pred belong to the same regression task.
Parameters
----------
y_true : array-like
y_pred : array-like
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'.
y_true : array-like of shape (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
dtype : str or list, default="numeric"
the dtype argument passed to check_array.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False, dtype=dtype)
y_pred = check_array(y_pred, ensure_2d=False, dtype=dtype)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ('raw_values', 'uniform_average',
'variance_weighted')
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}".format(
allowed_multioutput_str,
multioutput))
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput

You can go with one of these two solutions:
Upgrade your sklearn version
!pip install scikit-learn==0.24
Then,
from sklearn.metrics import mean_absolute_percentage_error
Build your own function to calculate MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
But the problem with the above function is that when you have (0) true value your MAPE will go (inf). So, to solve this problem we use,
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / np.maximum(np.ones(len(y_true)), np.abs(y_true))))*100

Just had the same issue. Accessing Anaconda Prompt for the environment that one is working on, and running
pip install scikit-learn
Solved the problem.
It updated scikit-learn's version (at this precise moment it was upgraded to version 1.0.2, but it is present in versions starting at 0.24.), and now one is able to import and run sklearn.metrics.mean_absolute_percentage_error.
Here is an example (Source):
>>> from sklearn.metrics import mean_absolute_percentage_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_percentage_error(y_true, y_pred)
0.3273...
Note: You may want to keep in mind that the MAPE can be problematic, as it may result in divisions by zero (see my answer here).

Related

How to implement the Residual Standard Error (RSE) as a metric in Keras?

How can I compute the Residual Standard Error (RSE) as a custom metric in Keras?
The RSE is given by: sqrt[RSS / (n-2)]
Where the RSS is: sum((y_true -y_pred)**2)
This question refers to a post on stackoverflow. In this post, a user by the name of Swain Subrat Kumar shows the implementation of the Residual Standard Error (RSE). He even provides a minimum working example (MWE) which I believe to be correct.
I repost a shortened version here:
def RSE(y_true, y_predicted):
'''
y_true, y_pred: np.array()
'''
RSS = np.sum(np.square(y_true - y_predicted))
return math.sqrt(RSS / (len(y_true) - 2))
I am trying to translate this code into keras/tensorflow so that I can use it as a metric. So far, I have this:
def rse(y_true, y_pred):
'''
y_true, y_pred: tensor
'''
tmp=tf.cast(len(y_true), tf.float32) - tf.constant(2.0)
RSS = K.sum(K.square(y_true - y_pred)) # residual sum of squares
return K.sqrt(tf.math.divide(RSS, tmp))
However, this is not correct. The RSS is ok. Where it all goes wrong is in dividing the RSS by (len(y_true)-2).
How can I fix this? Many thanks in advance.
P.S.: I am having similar problems when trying to create my own variance metric.
If you are using the rse function as a metric or a loss, it's being applied to batches of data i.e; tensors which are of size (B, n) where B is the designated batch size and n being the number of elements in each vector (assuming each is 1-D). When you apply the division using len(y_true) - 2, the len function is going to return the number of samples in the batch B (the first dimension), where it should be using the value of the second dimension n. If you change the rse function to use the value of the second dimension in the tensor (y_true.shape[1]), the results are correct:
def rse(y_true, y_pred):
'''
y_true, y_pred: tensor
'''
tmp = tf.cast(y_true.shape[1], tf.float32) - tf.constant(2.0)
RSS = K.sum(K.square(y_true - y_pred)) # residual sum of squares
return K.sqrt(tf.math.divide(RSS, tmp))
In a fully reproducible dummy example:
import tensorflow as tf
import tensorflow.keras.backend as K
import numpy as np
def rse(y_true, y_pred):
'''
y_true, y_pred: tensor
'''
tmp = tf.cast(y_true.shape[1], tf.float32) - tf.constant(2.0)
RSS = K.sum(K.square(y_true - y_pred)) # residual sum of squares
return K.sqrt(tf.math.divide(RSS, tmp))
if __name__ == "__main__":
# NOTE: call `expand_dims` to simulate the idea of a batch (i.e a 2D tensor with shape (1, 5))
# so B = 1, n = 5
y_true = np.expand_dims(np.array([1, 2, 3, 4, 6], dtype=np.float32), axis=0)
y_pred = np.expand_dims(np.array([1, 2, 3, 4, 5], dtype=np.float32), axis=0)
print(rse(y_true, y_pred))
Output is:
tf.Tensor(0.57735026, shape=(), dtype=float32)
Which is correct (simply the square root of 1/3, since we only have 1 error in the example data).

Need help implementing a custom loss function in lightGBM (Zero-inflated Log Normal Loss)

Im trying to implement this zero-inflated log normal loss function based on this paper in lightGBM (https://arxiv.org/pdf/1912.07753.pdf) (page 5). But, admittedly, I just don’t know how. I don’t understand how to get the gradient and hessian of this function in order to implement it in LGBM and I’ve never needed to implement a custom loss function in the past.
The authors of this paper have open sourced their code, and the function is available in tensorflow (https://github.com/google/lifetime_value/blob/master/lifetime_value/zero_inflated_lognormal.py), but I’m unable to translate this to fit the parameters required for a custom loss function in LightGBM. An example of how LGBM accepts custom loss functions— loglikelihood loss would be written as:
def loglikelihood(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
grad = preds - labels
hess = preds * (1. - preds)
return grad, hess
Similarly, I would need to define a custom eval metric to accompany it, such as:
def binary_error(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
return 'error', np.mean(labels != (preds > 0.5)), False
Both of the above two examples are taken from the following repository:
https://github.com/microsoft/LightGBM/blob/e83042f20633d7f74dda0d18624721447a610c8b/examples/python-guide/advanced_example.py#L136
Would appreciate any help on this, and especially detailed guidance to help me learn how to do this on my own.
According to the LGBM documentation for custom loss functions:
It should have the signature objective(y_true, y_pred) -> grad, hess or objective(y_true, y_pred, group) -> grad, hess:
y_true: numpy 1-D array of shape = [n_samples]
The target values.
y_pred: numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
The predicted values. Predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task.
group: numpy 1-D array
Group/query data. Only used in the learning-to-rank task. sum(group) = n_samples. For example, if you have a 100-document dataset with group = [10, 20, 40, 10, 10, 10], that means that you have 6 groups, where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
grad: numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
The value of the first order derivative (gradient) of the loss with respect to the elements of y_pred for each sample point.
hess: numpy 1-D array of shape = [n_samples] or numpy 2-D array of shape = [n_samples, n_classes] (for multi-class task)
The value of the second order derivative (Hessian) of the loss with respect to the elements of y_pred for each sample point.
This is the "translation", as you defined it, of the tensorflow implementation. Most of the work is just defining the functions yourself (i.e. softplus, crossentropy, etc.)
The mean absolute percentage error is used in the linked paper, not sure if that is the eval metric you want to use.
import math
import numpy as np
epsilon = 1e-7
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def softplus(beta=1, threshold=20):
return 1 / beta* math.log(1 + math.exp(beta*x))
def BinaryCrossEntropy(y_true, y_pred):
y_pred = np.clip(y_pred, epsilon, 1 - epsilon)
term_0 = (1-y_true) * np.log(1-y_pred + epsilon)
term_1 = y_true * np.log(y_pred + epsilon)
return -np.mean(term_0+term_1, axis=0)
def zero_inflated_lognormal_pred(logits):
positive_probs = sigmoid(logits[..., :1])
loc = logits[..., 1:2]
scale = softplus(logits[..., 2:])
preds = (
positive_probs *
np.exp(loc + 0.5 * np.square(scale)))
return preds
def mean_abs_pct_error(preds, train_data):
labels = train_data.get_label()
decile_labels=np.percentile(labels,np.linspace(10,100,10))
decile_preds=np.percentile(preds,np.linspace(10,100,10))
MAPE = sum(np.absolute(decile_preds - decile_labels)/decile_labels)
return 'error', MAPE, False
def zero_inflated_lognormal_loss(train_data,
logits):
labels = train_data.get_label()
positive = labels > 0
positive_logits = logits[..., :1]
classification_loss = BinaryCrossEntropy(
y_true=positive, y_pred=positive_logits)
loc = logits[..., 1:2]
scale = math.maximum(
softplus(logits[..., 2:]),
math.sqrt(epsilon))
safe_labels = positive * labels + (
1 - positive) * np.ones(labels.shape)
regression_loss = -np.mean(
positive * np.LogNormal(mean=loc, stdev=scale).log_prob(safe_labels),
axis=-1)
return classification_loss + regression_loss

How to correctly use the Tensorflow MeanIOU metric?

I want to use the MeanIoU metric in keras (doc link). But I don't really understand how it could be integrated with the keras api. In the example, the prediction and the ground truth are given as binary values but with keras we should get probabilities, especially because the loss is mse...
We should have something like:
m = tf.keras.metrics.MeanIoU(num_classes=2)
m.update_state([0, 0, 1, 1], [0.3, 0.6, 0.2, 0.9])
But now the result isn't the same, we have:
# <tf.Variable 'UnreadVariable' shape=(2, 2) dtype=float64, numpy=array([[2., 0.],
# [2., 0.]])>
m.result().numpy() # 0.25
So my question is how should we use this metric if the output of the model is probabilities? binary or even in a multi-class setting (one hot)?
For the Accuracy there is a distinction between BinaryAccuracy and CategoricalAccuracy and they both take probabilities in y_pred. Shouldn't it be the same for MeanIoU?
I am having similar issues. Despite looking for examples online, all demonstrations happens after applying argmax on the model's output.
The workaround I have for now is to subclass tf.keras.metrics.MeanIoU:
class MyMeanIOU(tf.keras.metrics.MeanIoU):
def update_state(self, y_true, y_pred, sample_weight=None):
return super().update_state(tf.argmax(y_true, axis=-1), tf.argmax(y_pred, axis=-1), sample_weight)
It is also possible to create your own function, but it is recommended to subclass tf.keras.metrics.Metric if you wish to benefit from the extra features such as distributed strategies.
I am still looking for cleaner solutions.
i have the same problem, and i look into the source code.
In tf2.0, at the end of the update_state function, there is :
current_cm = confusion_matrix.confusion_matrix(
y_true,
y_pred,
self.num_classes,
weights=sample_weight,
dtype=dtypes.float64)
and i look into confusion_matrix function,
with ops.name_scope(name, 'confusion_matrix',
(predictions, labels, num_classes, weights)) as name:
labels, predictions = remove_squeezable_dimensions(
ops.convert_to_tensor(labels, name='labels'),
ops.convert_to_tensor(
predictions, name='predictions'))
predictions = math_ops.cast(predictions, dtypes.int64)
labels = math_ops.cast(labels, dtypes.int64)
# Sanity checks - underflow or overflow can cause memory corruption.
labels = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(
labels, message='`labels` contains negative values')],
labels)
predictions = control_flow_ops.with_dependencies(
[check_ops.assert_non_negative(
predictions, message='`predictions` contains negative values')],
predictions)
if num_classes is None:
num_classes = math_ops.maximum(math_ops.reduce_max(predictions),
math_ops.reduce_max(labels)) + 1
else:
num_classes_int64 = math_ops.cast(num_classes, dtypes.int64)
labels = control_flow_ops.with_dependencies(
[check_ops.assert_less(
labels, num_classes_int64, message='`labels` out of bound')],
labels)
predictions = control_flow_ops.with_dependencies(
[check_ops.assert_less(
predictions, num_classes_int64,
message='`predictions` out of bound')],
predictions)
if weights is not None:
weights = ops.convert_to_tensor(weights, name='weights')
predictions.get_shape().assert_is_compatible_with(weights.get_shape())
weights = math_ops.cast(weights, dtype)
shape = array_ops.stack([num_classes, num_classes])
indices = array_ops.stack([labels, predictions], axis=1)
values = (array_ops.ones_like(predictions, dtype)
if weights is None else weights)
cm_sparse = sparse_tensor.SparseTensor(
indices=indices,
values=values,
dense_shape=math_ops.cast(shape, dtypes.int64))
zero_matrix = array_ops.zeros(math_ops.cast(shape, dtypes.int32), dtype)
return sparse_ops.sparse_add(zero_matrix, cm_sparse)
the trick is at 6th line of the code, tf use math_ops.cast cast the predictions to int64, so when you send [0.3, 0.6, 0.2, 0.9] into cast function, it returns [0, 0, 0, 0].
So, that's why you got a confusion maxtrix
[[2., 0.],
[2., 0.]]

Weighted categorical cross entropy semantic segmentation

I wanted to use a FCN (kind of U-Net) in order to make some semantic segmentation.
I performed it using Python & Keras based on Tensorflow backend. Now I have good results, I'm trying to improve them, and I think one way to do such a thing is by improving my loss computation.
I know that in my output, the several classes are imbalanced, and using the default categorical_crossentropy function can be a problem.
My model inputs and outputs are both in the float32 format, input are channel_first and output and channel_last (permutation done at the end of the model)
In the binary case, when I only want to segment one class, I have change the loss function in this way so it can add the weights case by case depending on the content of the output :
def weighted_loss(y_true, y_pred):
def weighted_binary_cross_entropy(y_true, y_pred):
w = tf.reduce_sum(y_true)/tf_cast(tf_size(y_true), tf_float32)
real_th = 0.5-th
tf_th = tf.fill(tf.shape(y_pred), real_th)
tf_zeros = tf.fill(tf.shape(y_pred), 0.)
return (1.0 - w) * y_true * - tf.log(tf.maximum(tf.zeros, tf.sigmoid(y_pred) + tf_th)) +
(1- y_true) * w * -tf.log(1 - tf.maximum(tf_zeros, tf.sigmoid(y_pred) + tf_th))
return weighted_binary_coss_entropy
Note that th is the activation threshold which by default is 1/nClasses and which I have changed in order to see what value gives me the best results
What do you think about it?
What about change it so it will be able to compute the weighted categorical cross entropy (in the case of multi-class)
Your implementation will work for binary classes , for multi class it will just be
-y_true * tf.log(tf.sigmoid(y_pred))
and use inbuilt tensorflow method for calculating categorical entropy as it avoids overflow for y_pred<0
you can view this answer Unbalanced data and weighted cross entropy ,it explains weighted categorical cross entropy implementation.
The only change for categorical_crossentropy would be
def weighted_loss(y_true, y_pred):
def weighted_categorical_cross_entropy(y_true, y_pred):
w = tf.reduce_sum(y_true)/tf_cast(tf_size(y_true), tf_float32)
loss = w * tf.nn.softmax_cross_entropy_with_logits(onehot_labels, logits)
return loss
return weighted_categorical_cross_entropy
extracting prediction for individual class
def loss(y_true, y_pred):
s = tf.shape(y_true)
# if number of output classes is at last
number_classses = s[-1]
# this will give you one hot code for your prediction
clf_pred = tf.one_hot(tf.argmax(y_pred, axis=-1), depth=number_classses, axis=-1)
# extract the values of y_pred where y_pred is max among the classes
prediction = tf.where(tf.equal(clf_pred, 1), y_pred, tf.zeros_like(y_pred))
# if one hotcode == 1 then class1_prediction == y_pred else class1_prediction ==0
class1_prediction = prediction[:, :, :, 0:1]
# you can compute your loss here on individual class and return the loss ,just for simplicity i am returning the class1_prediction
return class1_prediction
output from model
y_pred = [[[[0.5, 0.3, 0.7],
[0.6, 0.3, 0.2]]
,
[[0.7, 0.9, 0.6],
[0.3 ,0.9, 0.3]]]]
corresponding ground truth
y_true = [[[[0, 1, 0],
[1 ,0, 0]]
,
[[1,0 , 0],
[0,1, 0]]]]
prediction for class 1
prediction = loss(y_true, y_pred)
# prediction = [[[[0. ],[0.6]],[0. ],[0. ]]]]

Use tf.metrics in Keras?

I'm especially interested in specificity_at_sensitivity. Looking through the Keras docs:
from keras import metrics
model.compile(loss='mean_squared_error',
optimizer='sgd',
metrics=[metrics.mae, metrics.categorical_accuracy])
But it looks like the metrics list must have functions of arity 2, accepting (y_true, y_pred) and returning a single tensor value.
EDIT: Currently here is how I do things:
from sklearn.metrics import confusion_matrix
predictions = model.predict(x_test)
y_test = np.argmax(y_test, axis=-1)
predictions = np.argmax(predictions, axis=-1)
c = confusion_matrix(y_test, predictions)
print('Confusion matrix:\n', c)
print('sensitivity', c[0, 0] / (c[0, 1] + c[0, 0]))
print('specificity', c[1, 1] / (c[1, 1] + c[1, 0]))
The disadvantage of this approach, is I only get the output I care about when training has finished. Would prefer to get metrics every 10 epochs or so.
I've found a related issue on github, and it seems that tf.metrics are still not supported by Keras models. However, in case you are very interested in using tf.metrics.specificity_at_sensitivity, I would suggest the following workaround (inspired by BogdanRuzh's solution):
def specificity_at_sensitivity(sensitivity, **kwargs):
def metric(labels, predictions):
# any tensorflow metric
value, update_op = tf.metrics.specificity_at_sensitivity(labels, predictions, sensitivity, **kwargs)
# find all variables created for this metric
metric_vars = [i for i in tf.local_variables() if 'specificity_at_sensitivity' in i.name.split('/')[2]]
# Add metric variables to GLOBAL_VARIABLES collection.
# They will be initialized for new session.
for v in metric_vars:
tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, v)
# force to update metric values
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
return metric
model.compile(loss='mean_squared_error',
optimizer='sgd',
metrics=[metrics.mae,
metrics.categorical_accuracy,
specificity_at_sensitivity(0.5)])
UPDATE:
You can use model.evaluate to retrieve the metrics after training.
I don't think there is a strict limit to only two incoming arguments, in metrics.py the function is just three incoming arguments, but k selects the default value of 5.
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
return K.mean(K.in_top_k(y_pred, K.cast(K.max(y_true, axis=-1), 'int32'), k), axis=-1)

Categories

Resources