Have one doubt. I am also using mask rcnn but tensorflow is 2.0. I am trying to run the tensorboard but I am only getting one loss(plotting graph using tensorboard-only one loss) instead of each loss graph. I am not sure how to retrieve each loss.
This is my code def compile section of model.py
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
added_loss_name = []
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output.name in added_loss_name:
#if layer.output in self.keras_model.losses:
continue
loss = (
tf.math.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
added_loss_name.append(layer.output.name)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.metrics.append(loss)
I want to see each loss seperately instead of one single loss like the image given below( matterport /Mask_RCNN ).
separate losses
Related
Tensorflow version: Tensorflow 2.1
I want to get the gradients with respect to the input instead of the gradient with respect to the trainable weights. I adjust the example from https://www.tensorflow.org/guide/keras/train_and_evaluate to
import tensorflow as tf
import numpy as np
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, 'Not enough GPU hardware devices available'
tf.config.experimental.set_memory_growth(physical_devices[0], True)
def loss_fun(y_true, y_pred):
loss = tf.reduce_mean(tf.square(y_true - y_pred), axis=-1)
return loss
# Create a dataset
x = np.random.rand(10, 180, 320, 3).astype(np.float32)
y = np.random.rand(10, 1).astype(np.float32)
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(1)
# Create a model
base_model = tf.keras.applications.MobileNet(input_shape=(180, 320, 3), weights=None, include_top=False)
x = tf.keras.layers.GlobalAveragePooling2D()(base_model.output)
output = tf.keras.layers.Dense(1)(x)
model = tf.keras.models.Model(inputs=base_model.input, outputs=output)
for input, target in dataset:
for iteration in range(400):
with tf.GradientTape() as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
prediction = model(input, training=False) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss_value = loss_fun(target, prediction)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, model.inputs)
print(grads) # output: [None]
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
optimizer.apply_gradients(zip(grads, model.inputs))
print('Iteration {}'.format(iteration))
However, this doesnot work, because grads = tape.gradient(loss_value, model.inputs) returns [None]. Is this intended behaviour or not? If yes, what is the recommended way to get the gradients with respect to the input?
To get it working two things needs to be added:
Converting image to a tf.Variable
Using tape.watch to watch the gradient with respect to the desired variable
image = tf.Variable(input)
for iteration in range(400):
with tf.GradientTape() as tape:
tape.watch(image)
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
prediction = model(image, training=False) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss_value = loss_fun(target, prediction)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, image)
#print(grads) # output: [None]
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
optimizer.apply_gradients(zip([grads], [image]))
print('Iteration {}'.format(iteration))
I am doing a slight modification of a standard neural network by defining a custom loss function. The custom loss function depends not only on y_true and y_pred, but also on the training data. I implemented it using the wrapping solution described here.
Specifically, I wanted to define a custom loss function that is the standard mse plus the mse between the input and the square of y_pred:
def custom_loss(x_true)
def loss(y_true, y_pred):
return K.mean(K.square(y_pred - y_true) + K.square(y_true - x_true))
return loss
Then I compile the model using
model_custom.compile(loss = custom_loss( x_true=training_data ), optimizer='adam')
fit the model using
model_custom.fit(training_data, training_label, epochs=100, batch_size = training_data.shape[0])
All of the above works fine, because the batch size is actually the number of all the training samples.
But if I set a different batch_size (e.g., 10) when I have 1000 training samples, there will be an error
Incompatible shapes: [1000] vs. [10].
It seems that Keras is able to automatically adjust the size of the inputs to its own loss function base on the batch size, but cannot do so for the custom loss function.
Do you know how to solve this issue?
Thank you!
==========================================================================
* Update: the batch size issue is solved, but another issue occurred
Thank you, Ori, for the suggestion of concatenating the input and output layers! It "worked", in the sense that the codes can run under any batch size. However, it seems that the result from training the new model is wrong... Below is a simplified version of the codes to demonstrate the problem:
import numpy as np
import scipy.io
import keras
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Dense, Activation
from numpy.random import seed
from tensorflow import set_random_seed
def custom_loss(y_true, y_pred): # this is essentially the mean_square_error
mse = K.mean( K.square( y_pred[:,2] - y_true ) )
return mse
# set the seeds so that we get the same initialization across different trials
seed_numpy = 0
seed_tensorflow = 0
# generate data of x = [ y^3 y^2 ]
y = np.random.rand(5000+1000,1) * 2 # generate 5000 training and 1000 testing samples
x = np.concatenate( ( np.power(y, 3) , np.power(y, 2) ) , axis=1 )
training_data = x[0:5000:1,:]
training_label = y[0:5000:1]
testing_data = x[5000:6000:1,:]
testing_label = y[5000:6000:1]
# build the standard neural network with one hidden layer
seed(seed_numpy)
set_random_seed(seed_tensorflow)
input_standard = Input(shape=(2,)) # input
hidden_standard = Dense(10, activation='relu', input_shape=(2,))(input_standard) # hidden layer
output_standard = Dense(1, activation='linear')(hidden_standard) # output layer
model_standard = Model(inputs=[input_standard], outputs=[output_standard]) # build the model
model_standard.compile(loss='mean_squared_error', optimizer='adam') # compile the model
model_standard.fit(training_data, training_label, epochs=50, batch_size = 500) # train the model
testing_label_pred_standard = model_standard.predict(testing_data) # make prediction
# get the mean squared error
mse_standard = np.sum( np.power( testing_label_pred_standard - testing_label , 2 ) ) / 1000
# build the neural network with the custom loss
seed(seed_numpy)
set_random_seed(seed_tensorflow)
input_custom = Input(shape=(2,)) # input
hidden_custom = Dense(10, activation='relu', input_shape=(2,))(input_custom) # hidden layer
output_custom_temp = Dense(1, activation='linear')(hidden_custom) # output layer
output_custom = keras.layers.concatenate([input_custom, output_custom_temp])
model_custom = Model(inputs=[input_custom], outputs=[output_custom]) # build the model
model_custom.compile(loss = custom_loss, optimizer='adam') # compile the model
model_custom.fit(training_data, training_label, epochs=50, batch_size = 500) # train the model
testing_label_pred_custom = model_custom.predict(testing_data) # make prediction
# get the mean squared error
mse_custom = np.sum( np.power( testing_label_pred_custom[:,2:3:1] - testing_label , 2 ) ) / 1000
# compare the result
print( [ mse_standard , mse_custom ] )
Basically, I have a standard one-hidden-layer neural network, and a custom one-hidden-layer neural network whose output layer is concatenated with the input layer. For testing purpose, I did not use the concatenated input layer in the custom loss function, because I wanted to see if the custom network can reproduce the standard neural network. Since the custom loss function is equivalent to the standard 'mean_squared_error' loss, both networks should have the same training results (I also reset the random seeds to make sure that they have the same initialization).
However, the training results are very different. It seems that the concatenation makes the training process different? Any ideas?
Thank you again for all your help!
Final update: Ori's approach of concatenating input and output layers works, and is verified by using the generator. Thanks!!
The problem is that when compiling the model, you set x_true to be a static tensor, in the size of all the samples. While the input for keras loss functions are the y_true and y_pred, where each of them is of size [batch_size, :].
As I see it there are 2 options you can solve this, the first one is using a generator for creating the batches, in such a way that you will have control over which indices are evaluated each time, and at the loss function you could slice the x_true tensor to fit the samples being evaluated:
def custom_loss(x_true)
def loss(y_true, y_pred):
x_true_samples = relevant_samples(x_true)
return K.mean(K.square(y_pred - y_true) + K.square(y_true - x_true_samples))
return loss
This solution can be complicated, what I would suggest is a simpler workaround -
Concatenate the input layer with the output layer, such that your new output will be of the form original_output , input.
Now you can use a new modified loss function:
def loss(y_true, y_pred):
return K.mean(K.square(y_pred[:,:output_shape] - y_true[:,:output_shape]) +
K.square(y_true[:,:output_shape] - y_pred[:,outputshape:))
Now your new loss function will take in account both the input data, and the prediction.
Edit:
Note that while you set the seed, your models are not exactly the same, and as you did not use a generator, you let keras choose the batches, and for different models he might pick different samples.
As your model does not converge, different samples can lead to different results.
I added a generator to your code, to verify the samples we pick for training, now you can see both results are the same:
def custom_loss(y_true, y_pred): # this is essentially the mean_square_error
mse = keras.losses.mean_squared_error(y_true, y_pred[:,2])
return mse
def generator(x, y, batch_size):
curIndex = 0
batch_x = np.zeros((batch_size,2))
batch_y = np.zeros((batch_size,1))
while True:
for i in range(batch_size):
batch_x[i] = x[curIndex,:]
batch_y[i] = y[curIndex,:]
i += 1;
if i == 5000:
i = 0
yield batch_x, batch_y
# set the seeds so that we get the same initialization across different trials
seed_numpy = 0
seed_tensorflow = 0
# generate data of x = [ y^3 y^2 ]
y = np.random.rand(5000+1000,1) * 2 # generate 5000 training and 1000 testing samples
x = np.concatenate( ( np.power(y, 3) , np.power(y, 2) ) , axis=1 )
training_data = x[0:5000:1,:]
training_label = y[0:5000:1]
testing_data = x[5000:6000:1,:]
testing_label = y[5000:6000:1]
batch_size = 32
# build the standard neural network with one hidden layer
seed(seed_numpy)
set_random_seed(seed_tensorflow)
input_standard = Input(shape=(2,)) # input
hidden_standard = Dense(10, activation='relu', input_shape=(2,))(input_standard) # hidden layer
output_standard = Dense(1, activation='linear')(hidden_standard) # output layer
model_standard = Model(inputs=[input_standard], outputs=[output_standard]) # build the model
model_standard.compile(loss='mse', optimizer='adam') # compile the model
#model_standard.fit(training_data, training_label, epochs=50, batch_size = 10) # train the model
model_standard.fit_generator(generator(training_data,training_label,batch_size), steps_per_epoch= 32, epochs= 100)
testing_label_pred_standard = model_standard.predict(testing_data) # make prediction
# get the mean squared error
mse_standard = np.sum( np.power( testing_label_pred_standard - testing_label , 2 ) ) / 1000
# build the neural network with the custom loss
seed(seed_numpy)
set_random_seed(seed_tensorflow)
input_custom = Input(shape=(2,)) # input
hidden_custom = Dense(10, activation='relu', input_shape=(2,))(input_custom) # hidden layer
output_custom_temp = Dense(1, activation='linear')(hidden_custom) # output layer
output_custom = keras.layers.concatenate([input_custom, output_custom_temp])
model_custom = Model(inputs=input_custom, outputs=output_custom) # build the model
model_custom.compile(loss = custom_loss, optimizer='adam') # compile the model
#model_custom.fit(training_data, training_label, epochs=50, batch_size = 10) # train the model
model_custom.fit_generator(generator(training_data,training_label,batch_size), steps_per_epoch= 32, epochs= 100)
testing_label_pred_custom = model_custom.predict(testing_data)
# get the mean squared error
mse_custom = np.sum( np.power( testing_label_pred_custom[:,2:3:1] - testing_label , 2 ) ) / 1000
# compare the result
print( [ mse_standard , mse_custom ] )
I have been working on deployment of a custom estimator (tensorflow model). After training on ml-engine everything is Ok, but when use ml-engine predictions in batch model I could not get the key (or any id of the original input) as you know batch predictions is in distributed mode and "keys" helps to understand which predictions correspond. I found this post where solve this problem, but using a pre-made (canned) tensorflow model (census use case).
How can adapt my custom model (tf.contrib.learn.Estimator()) in order to get "keys" in prediction?
An example of my output file:
{"predicted": [0.04930919408798218, 0.05402487516403198, 0.059984803199768066, 0.017936021089553833]}
And my model function is as follows:
SEQ_LEN = 12
DEFAULTS = [[0.0] for x in range(0, SEQ_LEN)]
BATCH_SIZE = 32
TIMESERIES_COL = 'rawdata'
N_OUTPUTS = 4 # in each sequence, 1-8 are features, and 9-12 are labels
N_INPUTS = SEQ_LEN - N_OUTPUTS
LSTM_SIZE = 10 # number of hidden layers in each of the LSTM cells
LAMBDA_L2_REG = 0 # regularization coefficient
def simple_rnn(features, targets, mode):
# 0. Reformat input shape to become a sequence
x = tf.split(features[TIMESERIES_COL], N_INPUTS, 1)
#print 'x={}'.format(x)
# 1. configure the RNN
lstm_cell = tf.contrib.rnn.BasicLSTMCell(LSTM_SIZE, forget_bias=1.0)
outputs, _ = tf.contrib.rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# slice to keep only the last cell of the RNN
outputs = outputs[-1]
#print 'last outputs={}'.format(outputs)
# output is result of linear activation of last layer of RNN
w = tf.Variable(tf.random_normal([LSTM_SIZE, N_OUTPUTS]))
b = tf.Variable(tf.random_normal([N_OUTPUTS]))
predictions = tf.matmul(outputs, w) + b
# 2. loss function, training/eval ops
if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
l2_reg = tf.reduce_mean(tf.nn.l2_loss(w))
loss = tf.losses.mean_squared_error(targets, predictions)+LAMBDA_L2_REG*l2_reg
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
#learning_rate=0.01,
learning_rate = tf.train.exponential_decay(0.01, tf.contrib.framework.get_global_step(),500, 0.96, staircase=True),
optimizer="Adam",
clip_gradients=2.5)
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(targets, predictions)
}
else:
loss = None
train_op = None
eval_metric_ops = None
# 3. Create predictions
predictions_dict = {"predicted": predictions}
# 4. return ModelFnOps
return tf.contrib.learn.ModelFnOps(
mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
I use python 2.7 and tensorflow 1.6.
Thanks in advance!
What you are looking for is forward_features. However, there is a bug in that function in which the model export didn't work correctly; the fix looks like it won't land until TF 1.8.
There is more info in this answer, including a potential workaround, repeated here for your convenience (taken from this code sample):
def forward_key_to_export(estimator):
estimator = tf.contrib.estimator.forward_features(estimator, KEY_COLUMN)
# return estimator
## This shouldn't be necessary (I've filed CL/187793590 to update extenders.py with this code)
config = estimator.config
def model_fn2(features, labels, mode):
estimatorSpec = estimator._call_model_fn(features, labels, mode, config=config)
if estimatorSpec.export_outputs:
for ekey in ['predict', 'serving_default']:
if (ekey in estimatorSpec.export_outputs and
isinstance(estimatorSpec.export_outputs[ekey],
tf.estimator.export.PredictOutput)):
estimatorSpec.export_outputs[ekey] = \
tf.estimator.export.PredictOutput(estimatorSpec.predictions)
return estimatorSpec
return tf.estimator.Estimator(model_fn=model_fn2, config=config)
##
To use it, you would do something like this:
estimator = build_estimator(...)
estimator = forward_key_to_export(estimator)
I'm using Tensorboard 1.5 and I would like to see how my gradients are doing.
Here is an example of layer I am using:
net = tf.layers.dense(features, 40, activation=tf.nn.relu, kernel_regularizer=regularizer,
kernel_initializer=tf.contrib.layers.xavier_initializer())
And here is my optimizer:
train_op = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss)
For my model parameters I create summaries this way:
for var in tf.trainable_variables():
tf.summary.histogram(var.name, var)
Is there a similar way to get the all gradients in a for loop to create my summaries?
You should first get the gradients using compute_gradients of the optimizer and then pass them to summary:
opt = tf.train.AdamOptimizer(learning_rate = learning_rate)
# Calculate the gradients for the batch of data
grads = opt.compute_gradients(loss)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
And then to perform the training, you can call the apply_gradients of optimizer:
# Apply the gradients to adjust the shared variables.
train_op = opt.apply_gradients(grads, global_step=global_step)
for more, you can go to tensorflow cifar10 tutorial.
I want to turn my _model_fn for Estimator into a multi GPU solution.
Is there a way to do it within the Esitmator API or do I have to explicitly code device placement and synchronization.
I know I can use tf.device('gpu:X') to place my model on GPU X. I also know I can loop over available GPU names to replicate my model across multiple GPUs. I also know I can use a single input queue for multiple GPUs.
What I do not know is which parts (optimizer, loss calculation), I can actually move to a GPU and where I have to synchronize the computation.
From the Cifar10 example I figure that I have to only synchronize the gradient.
Especially when using
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=learning_rate,
learning_rate_decay_fn=_learning_rate_decay_fn,
optimizer=optimizer)
I cannot call optimizer.compute_gradients() or optimizer.apply_gradients() manually anymore as this is internally handled by .optimize_loss(..)
I am wondering how to average the gradients like it is done in the cifar10 example Cifar10-MultiGPU or if this is even the right approach for Estimator.
Actually you can implement multi GPU in model_fn function same as before.
You can find full code in here. It is support multi threading queue reader and multi GPU to very high speed training when using estimator.
Code snippet: (GET FULL CODE)
def model_fn(features, labels, mode, params):
# network
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=params['num_classes'],
weight_decay=0.00004,
is_training=(mode == tf.estimator.ModeKeys.TRAIN))
# if predict. Provide an estimator spec for `ModeKeys.PREDICT`.
if mode == tf.estimator.ModeKeys.PREDICT:
logits, end_points = network_fn(features)
return tf.estimator.EstimatorSpec(mode=mode, predictions={"output": logits})
# Create global_step and lr
global_step = tf.train.get_global_step()
learning_rate = get_learning_rate("exponential", FLAGS.base_lr,
global_step, decay_steps=10000)
# Create optimizer
optimizer = get_optimizer(FLAGS.optimizer, learning_rate)
# Multi GPU support - need to make sure that the splits sum up to
# the batch size (in case the batch size is not divisible by
# the number of gpus. This code will put remaining samples in the
# last gpu. E.g. for a batch size of 15 with 2 gpus, the splits
# will be [7, 8].
batch_size = tf.shape(features)[0]
split_size = batch_size // len(params['gpus_list'])
splits = [split_size, ] * (len(params['gpus_list']) - 1)
splits.append(batch_size - split_size * (len(params['gpus_list']) - 1))
# Split the features and labels
features_split = tf.split(features, splits, axis=0)
labels_split = tf.split(labels, splits, axis=0)
tower_grads = []
eval_logits = []
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(len(params['gpus_list'])):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % ("classification", i)) as scope:
# model and loss
logits, end_points = network_fn(features_split[i])
tf.losses.softmax_cross_entropy(labels_split[i], logits)
update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, scope)
updates_op = tf.group(*update_ops)
with tf.control_dependencies([updates_op]):
losses = tf.get_collection(tf.GraphKeys.LOSSES, scope)
total_loss = tf.add_n(losses, name='total_loss')
# reuse var
tf.get_variable_scope().reuse_variables()
# grad compute
grads = optimizer.compute_gradients(total_loss)
tower_grads.append(grads)
# for eval metric ops
eval_logits.append(logits)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Apply the gradients to adjust the shared variables.
apply_gradient_op = optimizer.apply_gradients(
grads, global_step=global_step)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(0.9999, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create eval metric ops
_predictions = tf.argmax(tf.concat(eval_logits, 0), 1)
_labels = tf.argmax(labels, 1)
eval_metric_ops = {
"acc": slim.metrics.streaming_accuracy(_predictions, _labels)}
# Provide an estimator spec for `ModeKeys.EVAL` and `ModeKeys.TRAIN` modes.
return tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)