keras custom metric with sample weights - python

I am trying to define a custom metric in Keras that takes into account sample weights. When fitting the model I use the sample weights as follows:
training_history = model.fit(
train_data,
train_labels,
sample_weight = train_weights,
epochs = num_epochs,
batch_size = 128,
validation_data = (validation_data, validatation_labels, validation_weights ),
)
An example of a custom metric I am using is the AUC (area under roc curve ), which I defined as follows:
from keras import backend as K
import tensorflow as tf
def auc(true_labels, predictions, weights = None):
auc = tf.metrics.auc(true_labels, predictions, weights = weights)[1]
K.get_session().run(tf.local_variables_initializer())
return auc
and I use this metric when compiling the model:
model.compile(
optimizer = optimizer,
loss = 'binary_crossentropy',
metrics = ['accuracy', auc]
)
But as far as I can tell, the metric does not take into account the sample weights. In fact I verified this by comparing the metric value I see when training the model using the custom metric defined above to what I get by computing it myself from the model output and the sample weights, which indeed yield very different results. How would I define the auc metric shown above to take into account the sample weights?

You could wrap your metric with another function that takes sample_weights as an argument:
def auc(weights):
def metric(true_labels, predictions):
auc = tf.metrics.auc(true_labels, predictions, weights=weights)[1]
K.get_session().run(tf.local_variables_initializer())
return auc
return metric
And then define an extra input placeholder that will receive the sample weights:
sample_weights = Input(shape=(1,))
Your model can then be compiled as follows:
model.compile(
optimizer = optimizer,
loss = 'binary_crossentropy',
metrics = ['accuracy', auc(sample_weights)]
)
NOTE: Not tested.

Related

Can't get GridSearchCV working with Keras

I'm trying to use GridSearchCV to optimise the hyperparameters in a custom model built with Keras. My code so far:
https://pastebin.com/ujYJf67c#9suyZ8vM
The model definition:
def build_nn_model(n, hyperparameters, loss, metrics, opt):
model = keras.Sequential([
keras.layers.Dense(hyperparameters[0], activation=hyperparameters[1], # number of outputs to next layer
input_shape=[n]), # number of features
keras.layers.Dense(hyperparameters[2], activation=hyperparameters[3]),
keras.layers.Dense(hyperparameters[4], activation=hyperparameters[5]),
keras.layers.Dense(1) # 1 output (redshift)
])
model.compile(loss=loss,
optimizer = opt,
metrics = metrics)
return model
and the grid search:
optimizer = ['SGD', 'RMSprop', 'Adagrad', 'Adadelta', 'Adam', 'Adamax', 'Nadam']
epochs = [10, 50, 100]
param_grid = dict(epochs=epochs, optimizer=optimizer)
grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring='accuracy', n_jobs=-1, refit='boolean')
grid_result = grid.fit(X_train, y_train)
throws an error:
TypeError: Cannot clone object '<keras.engine.sequential.Sequential object at 0x0000028B8C50C0D0>' (type <class 'keras.engine.sequential.Sequential'>): it does not seem to be a scikit-learn estimator as it does not implement a 'get_params' method.
How can I get GridSearchCV to play nicely with the model as it's defined?
I'm assuming you are training a classifier, so you have to wrap it in KerasClassifier:
from scikeras.wrappers import KerasClassifier
...
model = KerasClassifier(build_nn_model)
# Do grid search
Remember to provide for each of build_nn_model's parameters either a default value or a grid in GridSearchCV.
For a regression model use KerasRegressor instead.

Hyperparameter Tuning (Keras) a Neural Network Regression

We have developed an Artificial Neural Network in Python, and in that regard we would like tune the hyperparameters with GridSearchCV to find the best possible hyperparameters. The goal of our ANN is to predict temperature based on other relevant features, and so far this is the evaluation of the performance of the neural network:
Coefficient of Determination (R2) Root Mean Square Error (RMSE) Mean Squared Error (MSE) Mean Absolute Percent Error (MAPE) Mean Absolute Error (MAE) Mean Bias Error (MBE)
0.9808840288506496 0.7527763482280911 0.5666722304516204 0.09142692180578049 0.588041786518511 -0.07293321963266877
As of now, we have no clue on how to utilize GridSearchCV correctly, and we therefore seek help to move us towards a solution that would satisfy our goal. We have a function that might work, but are not able to apply it correctly to our code.
This is the hyperparameter tuning function (GridSearchCV):
def hyperparameterTuning():
# Listing all the parameters to try
Parameter_Trials = {'batch_size': [10, 20, 30],
'epochs': [10, 20],
'Optimizer_trial': ['adam', 'rmsprop']
}
# Creating the regression ANN model
RegModel = KerasRegressor(make_regression_ann, verbose=0)
# Creating the Grid search space
grid_search = GridSearchCV(estimator=RegModel,
param_grid=Parameter_Trials,
scoring=None,
cv=5)
# Running Grid Search for different paramenters
grid_search.fit(X, y, verbose=1)
print('### Printing Best parameters ###')
grid_search.best_params_
Our main function:
if __name__ == '__main__':
print('--------------')
dataframe = pd.read_csv("/.../file.csv")
# Splitting data into training and tesing data
X_train, X_test, y_train, y_test, PredictorScalerFit, TargetVarScalerFit = splitData(dataframe=dataframe)
# Making the Regression Artificial Neural Network (ANN)
ann = ANN(X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test, PredictorScalerFit=PredictorScalerFit, TargetVarScalerFit=TargetVarScalerFit)
# Evaluation of the performance of the Aritifical Neural Network (ANN)
eval = evaluation(y_test_orig=ann['temp'], y_test_pred=ann['Predicted_temp'])
Our function to split data into training and testing data:
def splitData(dataframe):
X = dataframe[Predictors].values
y = dataframe[TargetVariable].values
### Sandardization of data ###
PredictorScaler = StandardScaler()
TargetVarScaler = StandardScaler()
# Storing the fit object for later reference
PredictorScalerFit = PredictorScaler.fit(X)
TargetVarScalerFit = TargetVarScaler.fit(y)
# Generating the standardized values of X and y
X = PredictorScalerFit.transform(X)
y = TargetVarScalerFit.transform(y)
# Split the data into training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
return X_train, X_test, y_train, y_test, PredictorScalerFit, TargetVarScalerFit
Our function to fit the model and to utilize the Artificial Neural Network (ANN)
def ANN(X_train, y_train, X_test, y_test, TargetVarScalerFit, PredictorScalerFit):
model = make_regression_ann()
# Fitting the ANN to the Training set
model.fit(X_train, y_train, batch_size=5, epochs=100, verbose=1)
# Generating Predictions on testing data
Predictions = model.predict(X_test)
# Scaling the predicted temp data back to original price scale
Predictions = TargetVarScalerFit.inverse_transform(Predictions)
# Scaling the y_test temp data back to original temp scale
y_test_orig = TargetVarScalerFit.inverse_transform(y_test)
# Scaling the test data back to original scale
Test_Data = PredictorScalerFit.inverse_transform(X_test)
TestingData = pd.DataFrame(data=Test_Data, columns=Predictors)
TestingData['temp'] = y_test_orig
TestingData['Predicted_temp'] = Predictions
TestingData.head()
# Computing the absolute percent error
APE = 100 * (abs(TestingData['temp'] - TestingData['Predicted_temp']) / TestingData['temp'])
TestingData['APE'] = APE
# ...
TestingData = TestingData.round(2)
TestingData.to_csv("TestingData.csv")
return TestingData
Our function to make the model of the ANN
def make_regression_ann():
# create ANN model
model = Sequential()
# Defining the Input layer and FIRST hidden layer, both are same!
model.add(Dense(units=8, input_dim=7, kernel_initializer='normal', activation='sigmoid'))
# Defining the Second layer of the model
# after the first layer we don't have to specify input_dim as keras configure it automatically
model.add(Dense(units=6, kernel_initializer='normal', activation='sigmoid'))
# The output neuron is a single fully connected node
# Since we will be predicting a single number
model.add(Dense(1, kernel_initializer='normal'))
# Compiling the model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
Our function to evaluate the performance of the ANN
def evaluation(y_test_orig, y_test_pred):
# Computing the Mean Absolute Percent Error
MAPE = mean_absolute_percentage_error(y_test_orig, y_test_pred)
# Computing R2 Score
r2 = r2_score(y_test_orig, y_test_pred)
# Computing Mean Square Error (MSE)
MSE = mean_squared_error(y_test_orig, y_test_pred)
# Computing Root Mean Square Error (RMSE)
RMSE = mean_squared_error(y_test_orig, y_test_pred, squared=False)
# Computing Mean Absolute Error (MAE)
MAE = mean_absolute_error(y_test_orig, y_test_pred)
# Computing Mean Bias Error (MBE)
MBE = np.mean(y_test_pred - y_test_orig) # here we calculate MBE
print('--------------')
print('The Coefficient of Determination (R2) of ANN model is:', r2)
print("The Root Mean Squared Error (RMSE) of ANN model is:", RMSE)
print("The Mean Squared Error (MSE) of ANN model is:", MSE)
print('The Mean Absolute Percent Error (MAPE) of ANN model is:', MAPE)
print("The Mean Absolute Error (MAE) of ANN model is:", MAE)
print("The Mean Bias Error (MBE) of ANN model is:", MBE)
print('--------------')
eval_list = [r2, RMSE, MSE, MAPE, MAE, MBE]
columns = ['Coefficient of Determination (R2)', 'Root Mean Square Error (RMSE)', 'Mean Squared Error (MSE)',
'Mean Absolute Percent Error (MAPE)', 'Mean Absolute Error (MAE)', 'Mean Bias Error (MBE)']
dataframe = pd.DataFrame([eval_list], columns=columns)
return dataframe
Your code should work if you update the make_regression_ann function to include any hyperparameters that you want to optimize as inputs, with the exception of the fitting parameters.
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import make_regression
def make_regression_ann(initializer='uniform', activation='relu', optimizer='adam', loss='mse'):
model = Sequential()
model.add(Dense(units=8, input_dim=7, kernel_initializer=initializer, activation=activation))
model.add(Dense(units=6, kernel_initializer=initializer, activation=activation))
model.add(Dense(1, kernel_initializer=initializer))
model.compile(loss=loss, optimizer=optimizer)
return model
param_grid = {
'initializer': ['normal', 'uniform'],
'activation': ['relu', 'sigmoid'],
'optimizer': ['adam', 'rmsprop'],
'loss': ['mse', 'mae'],
'batch_size': [32, 64],
'epochs': [5, 10],
}
grid_search = GridSearchCV(
estimator=KerasRegressor(make_regression_ann, verbose=0),
param_grid=param_grid,
scoring='neg_mean_absolute_percentage_error',
cv=3,
)
X, y = make_regression(n_features=7, n_samples=100, random_state=42)
grid_search.fit(X, y, verbose=1)
grid_search.best_params_
# {'activation': 'sigmoid',
# 'batch_size': 32,
# 'epochs': 10,
# 'initializer': 'normal',
# 'loss': 'mae',
# 'optimizer': 'adam'}
The way I used GridSearchCV successfully, recently was:
tuned_parameters2 = {'C': [1,10,100,10000], 'max_iter':[5000,10000,50000]}
model2 = GridSearchCV(svm.LinearSVC(), tuned_parameters2)
model2.fit(features, y_train)
So separate dictionary with hyperparameters, then assign your model to GridSearchCV(make_regression_ann, the_hyperparam_dict). Then fit it with the data.
In your case this approach would require more refactoring. It’s up to you to decide if maybe it’s better to feed ANN to GridSearchCV.

Tensorflow 2.0: How are metrics computed when the ouput is sequential?

I have been working with binary sequential inputs and outputs using Tensorflow 2.0, and I've been wondering which approach Tensorflow uses to compute metrics such as recall or accuracy during training in those scenarios.
Each sample to my network consists of 60 timesteps, each with 300 features, and thus my expected output is a (60, 1) array of 1s and 0s. Suppose I have 2000 validation samples. When evaluating the validation set for each epoch, does tensorflow concatenates all of the 2000 samples into a single (2000*60=120000, 1) array and then compares to the concatenated groundtruth labels, or does it evalutes each of the (60, 1) individually and then returns a mean of those values? Is there any way to modify this behavior?
Tensorflow/Keras by default computes the metrics batch-wise for train data, while it computes the same metrics on ALL the data passed in validation_data parameters in fit method.
This means that the metric printed during fitting for the train data is the mean of that score calculated on all the batches. In other words, for trainset keras evaluates each bach individually and then returns a mean of those values. For validation data is different, keras gets all the validation samples and then compares them with the "concatenated" groundtruth labels.
To prove this behavior with code I propose a dummy example. I provide a custom callback that computes for sure the accuracy score on ALL the data passed at the end of the epoch (for train and optionally validation). this is useful for us to understand the behavior of tensorflow during training.
import numpy as np
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras.callbacks import *
class ACC_custom(tf.keras.callbacks.Callback):
def __init__(self, train, validation=None):
super(ACC_custom, self).__init__()
self.validation = validation
self.train = train
def on_epoch_end(self, epoch, logs={}):
logs['ACC_score_train'] = float('-inf')
X_train, y_train = self.train[0], self.train[1]
y_pred = (self.model.predict(X_train).ravel()>0.5)+0
score = accuracy_score(y_train.ravel(), y_pred)
if (self.validation):
logs['ACC_score_val'] = float('-inf')
X_valid, y_valid = self.validation[0], self.validation[1]
y_val_pred = (self.model.predict(X_valid).ravel()>0.5)+0
val_score = accuracy_score(y_valid.ravel(), y_val_pred)
logs['ACC_score_train'] = np.round(score, 5)
logs['ACC_score_val'] = np.round(val_score, 5)
else:
logs['ACC_score_train'] = np.round(score, 5)
create dummy data
x_train = np.random.uniform(0,1, (1000,60,10))
y_train = np.random.randint(0,2, (1000,60,1))
x_val = np.random.uniform(0,1, (500,60,10))
y_val = np.random.randint(0,2, (500,60,1))
fit model
inp = Input(shape=((60,10)), dtype='float32')
x = Dense(32, activation='relu')(inp)
out = Dense(1, activation='sigmoid')(x)
model = Model(inp, out)
es = EarlyStopping(patience=10, verbose=1, min_delta=0.001,
monitor='ACC_score_val', mode='max', restore_best_weights=True)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(x_train,y_train, epochs=10, verbose=2,
callbacks=[ACC_custom(train=(x_train,y_train),validation=(x_val,y_val)),es],
validation_data=(x_val,y_val))
in the graphs below I make a comparison between the accuracies computed by our callback and the accuracy computed by keras
plt.plot(history.history['ACC_score_train'], label='accuracy_callback_train')
plt.plot(history.history['accuracy'], label='accuracy_default_train')
plt.legend(); plt.title('train accuracy')
plt.plot(history.history['ACC_score_val'], label='accuracy_callback_valid')
plt.plot(history.history['val_accuracy'], label='accuracy_default_valid')
plt.legend(); plt.title('validation accuracy')
as we can see the accuracy on the train data (first plot) is different between the default method and our callbacks. this means that the accuracy of train data is calculated batch-wise.
the validation accuracy (second plot) calculated by our callback and the default method is the same! this means that the score on validation data is computed one-shoot

How to get the model loss in sklearn

Whenever an sklearn model is fit to some data, it minimizes some loss function. How can I obtain the model loss using that loss function?
e.g.
model = sklearn.linear_model.LogisticRegression().fit(X_train,y_train)
model.get_loss(X_train, y_train) #gives the loss for these values
model.get_loss(X_test, y_test) #gives the loss for other values
Note that the .score method does NOT do this thing.
LogisticRegression minimises log loss, so you would expect the loss to be the .score, only negated. However, this actually returns the mean accuracy.
To calculate log loss you need to use the log_loss metric:
I haven't tested it, but something like this:
from sklearn.metrics import log_loss
model = sklearn.linear_model.LogisticRegression().fit(X_train, y_train)
loss = log_loss(X_test, model.predict_proba(X_test), eps=1e-15)

How to calculate precision and recall in Keras

I am building a multi-class classifier with Keras 2.02 (with Tensorflow backend),and I do not know how to calculate precision and recall in Keras. Please help me.
Python package keras-metrics could be useful for this (I'm the package's author).
import keras
import keras_metrics
model = models.Sequential()
model.add(keras.layers.Dense(1, activation="sigmoid", input_dim=2))
model.add(keras.layers.Dense(1, activation="softmax"))
model.compile(optimizer="sgd",
loss="binary_crossentropy",
metrics=[keras_metrics.precision(), keras_metrics.recall()])
UPDATE: Starting with Keras version 2.3.0, such metrics as precision, recall, etc. are provided within library distribution package.
The usage is the following:
model.compile(optimizer="sgd",
loss="binary_crossentropy",
metrics=[keras.metrics.Precision(), keras.metrics.Recall()])
As of Keras 2.0, precision and recall were removed from the master branch. You will have to implement them yourself. Follow this guide to create custom metrics : Here.
Precision and recall equation can be found Here
Or reuse the code from keras before it was removed Here.
There metrics were remove because they were batch-wise so the value may or may not be correct.
My answer is based on the comment of Keras GH issue. It calculates validation precision and recall at every epoch for a onehot-encoded classification task. Also please look at this SO answer to see how it can be done with keras.backend functionality.
import keras as keras
import numpy as np
from keras.optimizers import SGD
from sklearn.metrics import precision_score, recall_score
model = keras.models.Sequential()
# ...
sgd = SGD(lr=0.001, momentum=0.9)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
class Metrics(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self._data = []
def on_epoch_end(self, batch, logs={}):
X_val, y_val = self.validation_data[0], self.validation_data[1]
y_predict = np.asarray(model.predict(X_val))
y_val = np.argmax(y_val, axis=1)
y_predict = np.argmax(y_predict, axis=1)
self._data.append({
'val_recall': recall_score(y_val, y_predict),
'val_precision': precision_score(y_val, y_predict),
})
return
def get_data(self):
return self._data
metrics = Metrics()
history = model.fit(X_train, y_train, epochs=100, validation_data=(X_val, y_val), callbacks=[metrics])
metrics.get_data()
This thread is a little stale, but just in case it'll help someone landing here. If you are willing to upgrade to Keras v2.1.6, there has been a lot of work on getting stateful metrics to work though there seems to be more work that is being done (https://github.com/keras-team/keras/pull/9446).
Anyway, I found the best way to integrate precision/recall was using the custom metric that subclasses Layer, shown by example in BinaryTruePositives.
For recall, this would look like:
class Recall(keras.layers.Layer):
"""Stateful Metric to count the total recall over all batches.
Assumes predictions and targets of shape `(samples, 1)`.
# Arguments
name: String, name for the metric.
"""
def __init__(self, name='recall', **kwargs):
super(Recall, self).__init__(name=name, **kwargs)
self.stateful = True
self.recall = K.variable(value=0.0, dtype='float32')
self.true_positives = K.variable(value=0, dtype='int32')
self.false_negatives = K.variable(value=0, dtype='int32')
def reset_states(self):
K.set_value(self.recall, 0.0)
K.set_value(self.true_positives, 0)
K.set_value(self.false_negatives, 0)
def __call__(self, y_true, y_pred):
"""Computes the number of true positives in a batch.
# Arguments
y_true: Tensor, batch_wise labels
y_pred: Tensor, batch_wise predictions
# Returns
The total number of true positives seen this epoch at the
completion of the batch.
"""
y_true = K.cast(y_true, 'int32')
y_pred = K.cast(K.round(y_pred), 'int32')
# False negative calculations
y_true = K.cast(y_true, 'int32')
y_pred = K.cast(K.round(y_pred), 'int32')
false_neg = K.cast(K.sum(K.cast(K.greater(y_pred, y_true), 'int32')), 'int32')
current_false_neg = self.false_negatives * 1
self.add_update(K.update_add(self.false_negatives,
false_neg),
inputs=[y_true, y_pred])
# True positive calculations
correct_preds = K.cast(K.equal(y_pred, y_true), 'int32')
true_pos = K.cast(K.sum(correct_preds * y_true), 'int32')
current_true_pos = self.true_positives * 1
self.add_update(K.update_add(self.true_positives,
true_pos),
inputs=[y_true, y_pred])
# Combine
recall = (K.cast(self.true_positives, 'float32') / (K.cast(self.true_positives, 'float32') + K.cast(self.false_negatives, 'float32') + K.cast(K.epsilon(), 'float32')))
self.add_update(K.update(self.recall,
recall),
inputs=[y_true, y_pred])
return recall
Use Scikit Learn framework for this.
from sklearn.metrics import classification_report
history = model.fit(x_train, y_train, batch_size=32, epochs=10, verbose=1, validation_data=(x_test, y_test), shuffle=True)
pred = model.predict(x_test, batch_size=32, verbose=1)
predicted = np.argmax(pred, axis=1)
report = classification_report(np.argmax(y_test, axis=1), predicted)
print(report)
This blog is very useful.

Categories

Resources