custom metric function with additional parameter - python

def custom_metric(y_prem):
def score_func(y_true, y_pred):
diff = y_pred - y_true
return tf.reduce_sum(diff[y_prem>=y_pred])
return score_func
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(32, input_shape=[len(X_train[0, :])], activation='tanh'),
tf.keras.layers.Dense(8, input_shape=[len(X_train[0, :])], activation='linear'),
tf.keras.layers.Dense(4, input_shape=[len(X_train[0, :])], activation='tanh'),
tf.keras.layers.Dense(1, activation='relu'),
])
model.compile(optimizer='adam', loss='mean_squared_error', metrics=[custom_metric(y_prem)])
model.summary()
model.fit(X_train_minmax, y_train, epochs=30, batch_size=len(y_train))
y_prem and y_train are both the same size(50646)
I have tried to define this custom metric function where y_prem is a vector in the size of the prediction. I want to sum the diff between the pred and the true only on the indexes where the pred is lower than y_prem but when I trained the model I received an error message:
File "C:/Users/zehavi kelman/PycharmProjects/Accident_predicting/simpego_test.py", line 61, in score_func *
return K.sum(diff[y_prem>=y_pred])
ValueError: Shapes (50646, 1) and (50646, 50646) are incompatible
How can I fix that?

I am not sure of what you want to do but I implemented a reproducible example that do not output an error message (pay attention to the x and y shapes):
import tensorflow as tf
x = tf.random.uniform(shape=[50646, 5], minval=0, maxval=1)
y = tf.random.uniform(shape=[50646, 1], minval=0, maxval=1)
y_prem = tf.random.uniform(shape=[50646, 1], minval=0, maxval=1)
def custom_metric(y_prem):
def score_func(y_true, y_pred):
diff = y_pred - y_true
return tf.reduce_sum(diff[y_prem>=y_pred])
return score_func
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(32, input_shape=[len(x[0, :])], activation='tanh'),
tf.keras.layers.Dense(8, activation='linear'),
tf.keras.layers.Dense(4, activation='tanh'),
tf.keras.layers.Dense(1, activation='relu'),
])
model.compile(optimizer='adam', loss='mean_squared_error', metrics=[custom_metric(y_prem)])
model.summary()
model.fit(x, y, epochs=30, batch_size=len(y))

Related

Is there a way to print the calculated max gradient of each layer for a given mini-batch?

I am implementing a fully-connected model for classification using the MNIST dataset. A part of the code is the following:
model=tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(28, 28, 1)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss='categorical_crossentropy',
optimizer=tf.optimizers.SGD(),
metrics=["accuracy"]
)
model.fit(
x_train,
y_train,
batch_size=64,
epochs=3,
validation_data=(x_test, y_test)
)
Is there a way to print the max gradient for each layer for a given mini-batch?
Define a custom training loop instead of invoking compile() and fit().
optimizer=tf.keras.optimizers.Adam(0.001)
loss=tf.keras.losses.SparseCategoricalCrossentropy()
for x, y in zip(x_train, y_train):
with tf.GradientTape() as tape:
predictions = model(x)
loss_value = loss(y, predictions)
gradients = tape.gradient(loss_value, model.trainable_weights)
grads_and_vars = zip(gradients, model.trainable_weights)
optimizer.apply_gradients(grads_and_vars)
for layer in range(0, 4): # for 4 layers
print('max gradient of layer={}, kernel={}, bias={}'.format(
layer, gradients[layer].numpy().max(), gradients[layer*2+1].numpy().max()))
Check this out : About Keras

Tensorflow NN architecture incompatible shape

I am trying to create a Multitask NN using Tensorflow. Following is the architecture that I am trying to develop:
METRICS= [tf.keras.metrics.TruePositives(name='TP'),
tf.keras.metrics.FalsePositives(name='FP'),
tf.keras.metrics.TrueNegatives(name='TN'),
tf.keras.metrics.FalseNegatives(name='FN'),
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.AUC(curve='PR', name='PR-AUC')]
input_shape = (X_train.shape[1],)
inputlayer = tf.keras.layers.Input(shape=input_shape)
l1 = tf.keras.layers.Dense(input_shape[0]*2, activation= 'relu')(inputlayer)
l2 = tf.keras.layers.Dropout(0.1)(l1)
l3 = tf.keras.layers.Dense(int(input_shape[0]/2), activation='relu')(l2)
output1 = tf.keras.layers.Dense(1, activation='sigmoid', name = 'output1')(l3)
output2 = tf.keras.layers.Dense(10, activation='softmax', name = 'output2')(l3)
output3 = tf.keras.layers.Dense(12, activation='softmax', name = 'output3')(l3)
model = tf.keras.Model(inputs=inputlayer, outputs=[output1, output2, output3])
model.compile(loss={"output1": 'binary_crossentropy',
"output2": 'categorical_crossentropy',
"output3": 'categorical_crossentropy'},
optimizer=tf.keras.optimizers.Adam(learning_rate=.01),
metrics = METRICS, loss_weights = [1, 1e-1, 1e-1])
And this is the model architecture:
Then I tried to train the model like this:
BATCH_SIZE= 20
model.fit(X_train, [y1_train,y2_train,y3_train], batch_size=BATCH_SIZE, epochs=10, verbose=0)
But I got the following issue:
ValueError: Shapes (None, 1) and (None, 10) are incompatible
I already verified the labels of each output and they are respectively 2, 10 and 12
I couldn't understood what the problem is exactly, can anyone give me a suggestion please?
I think you might have mixed up the order of your labels. Here is a working example:
import tensorflow as tf
METRICS= [tf.keras.metrics.TruePositives(name='TP'),
tf.keras.metrics.FalsePositives(name='FP'),
tf.keras.metrics.TrueNegatives(name='TN'),
tf.keras.metrics.FalseNegatives(name='FN'),
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.AUC(curve='PR', name='PR-AUC')]
input_shape = (31,)
inputlayer = tf.keras.layers.Input(shape=input_shape)
l1 = tf.keras.layers.Dense(input_shape[0]*2, activation= 'relu')(inputlayer)
l2 = tf.keras.layers.Dropout(0.1)(l1)
l3 = tf.keras.layers.Dense(int(input_shape[0]/2), activation='relu')(l2)
output1 = tf.keras.layers.Dense(1, activation='sigmoid', name = 'output1')(l3)
output2 = tf.keras.layers.Dense(10, activation='softmax', name = 'output2')(l3)
output3 = tf.keras.layers.Dense(12, activation='softmax', name = 'output3')(l3)
model = tf.keras.Model(inputs=inputlayer, outputs=[output1, output2, output3])
model.compile(loss={"output1": 'binary_crossentropy',
"output2": 'categorical_crossentropy',
"output3": 'categorical_crossentropy'},
optimizer=tf.keras.optimizers.Adam(learning_rate=.01),
metrics = METRICS, loss_weights = [1, 1e-1, 1e-1])
y1_train, y2_train, y3_train = tf.random.uniform((50, 1), maxval=2), tf.random.uniform((50, 10), maxval=11), tf.random.uniform((50, 12), maxval=13)
model.fit(tf.random.normal((50, 31)), [y1_train,y2_train,y3_train], batch_size=20, epochs=10)
You need to make sure that y1_train, y2_train, and y3_train are in the correct order and have the correct shape, that is (samples, 1), (samples, 10), and (samples, 12).

Keras - min and max in predicting Y

How can i set min and max values so the model only predicts Y output between them?
model = Sequential([
Dense(units=100, input_shape=(3, ), activation='tanh'),
Dense(units=18, activation='tanh'),
Dense(units=1, activation='tanh'),
Dense(units=1, activation='softmax')
])
opt = keras.optimizers.SGD(learning_rate=0.0001, momentum=0.9999, nesterov=False)
model.compile(optimizer=opt, loss='mae', metrics=[tf.keras.metrics.MeanAbsoluteError()])
model.fit(df, target, epochs=300, shuffle=False, verbose=1, callbacks=[estop, rlronp], validation_split=0.2)
tf.clip_by_value:
tf.clip_by_value(
t, clip_value_min, clip_value_max, name=None
)
In your code:
Dense(units=1, activation=lambda x: tf.clip_by_value(x, -5, 5))

How to get precision and recall, for a keras model?

i want to see precision and recall for my model for a binary image classification but i can find how do to that
Here is my code
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(1024, activation='relu')(x)
x = tf.keras.layers.Dense(1024, activation='relu')(x)
x = tf.keras.layers.Dense(512, activation='relu')(x)
preds = tf.keras.layers.Dense(2, activation='softmax')(x)
model = tf.keras.Model(inputs = base_model.input, outputs = preds)
for layer in model.layers[:175]:
layer.trainable = False
for layer in model.layers[175:]:
layer.trainable = True
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit_generator(generator=train_generator,
epochs=20,
steps_per_epoch=step_size_train,
validation_data = test_generator,
validation_steps=step_size_test)```
If you want precision and recall during train then you can add precision and recall metrics to the metrics list during model compilation as below
model.compile(optimizer='Adam', loss='categorical_crossentropy',
metrics=['accuracy',
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall()])
Example
input = tf.keras.layers.Input(8)
x = tf.keras.layers.Dense(4, activation='relu')(input)
output = tf.keras.layers.Dense(2, activation='softmax')(x)
model = tf.keras.Model(inputs = input, outputs = output)
model.compile(optimizer='Adam', loss='categorical_crossentropy',
metrics=['accuracy',
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall()])
X = np.random.randn(100,8)
y = np.random.randint(0,2, (100, 2))
model.fit(X, y, epochs=10)

Error in keras sparse_categorical_crossentropy loss function

I am trying a deep neural network prediction but getting error:
InvalidArgumentError: logits and labels must have the same first dimension, got logits shape [32,4] and labels shape [128]
Here are the features:
new_features.shape
(19973, 8)
new_features[0].shape
(8,)
Here are the label/output
output.shape
(19973, 4)
output[0].shape
(4,)
Here is the keras code
model = Sequential(
[
Dense(units=8, input_shape=new_features[0].shape, name="layer1"),
Dense(units=1024, activation="relu", name="layer2"),
Dense(units=1024, activation="relu", name="layer3"),
Dense(units=4, name="layer4", activation="softmax"),
]
)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
model.fit(new_features, output, epochs=2)
The features and labels contain float values.
The problem is in your target shape. First of all your target in classification problems must be int
if you have 1D integer encoded target you can use sparse_categorical_crossentropy as loss function
X = np.random.randint(0,10, (1000,100))
y = np.random.randint(0,3, 1000)
model = Sequential([
Dense(128, input_dim = 100),
Dense(3, activation='softmax'),
])
model.summary()
model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
history = model.fit(X, y, epochs=3)
Otherwise, if you have one-hot encoded your target in order to have 2D shape (n_samples, n_class) you can use categorical_crossentropy
X = np.random.randint(0,10, (1000,100))
y = pd.get_dummies(np.random.randint(0,3, 1000)).values
model = Sequential([
Dense(128, input_dim = 100),
Dense(3, activation='softmax'),
])
model.summary()
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
history = model.fit(X, y, epochs=3)

Categories

Resources