Saving and restoring Keras BLSTM CTC model - python

I have been working on speech emotion recognition deep neural network. I have used keras Bidirectional LSTM with CTC loss. i trained the model and saved it
model_json = model.to_json()
with open("ctc_model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("ctc_weights.h5")
The problem is i can not use this model to test on on unseen data because the model accepts 4 argument as input and calculates the ctc loss..just build the model and train. so how can i save a model in such away that in require only one input. not the labels, and length. Basically how can i save a model as this function test_func = K.function([net_input], [output])
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
shift = 2
y_pred = y_pred[:, shift:, :]
input_length -= shift
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
def build_model(nb_feat, nb_class, optimizer='Adadelta'):
net_input = Input(name="the_input", shape=(200, nb_feat))
forward_lstm1 = LSTM(output_dim=64,
return_sequences=True,
activation="tanh"
)(net_input)
backward_lstm1 = LSTM(output_dim=64,
return_sequences=True,
activation="tanh",
go_backwards=True
)(net_input)
blstm_output1 = Merge(mode='concat')([forward_lstm1, backward_lstm1])
forward_lstm2 = LSTM(output_dim=64,
return_sequences=True,
activation="tanh"
)(blstm_output1)
backward_lstm2 = LSTM(output_dim=64,
return_sequences=True,
activation="tanh",
go_backwards=True
)(blstm_output1)
blstm_output2 = Merge(mode='concat')([forward_lstm2, backward_lstm2])
hidden = TimeDistributed(Dense(512, activation='tanh'))(blstm_output2)
output = TimeDistributed(Dense(nb_class + 1, activation='softmax')) (hidden)
labels = Input(name='the_labels', shape=[1], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")([output, labels, input_length, label_length])
model = Model(input=[net_input, labels, input_length, label_length], output=[loss_out])
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=optimizer, metrics=[])
test_func = K.function([net_input], [output])
return model, test_func
model, test_func = build_model(nb_feat=nb_feat, nb_class=nb_class, optimizer=optimizer)
for epoch in range(number_epoches):
inputs_train = {'the_input': X_train[i:i+batch_size],
'the_labels': y_train[i:i+batch_size],
'input_length': np.sum(X_train_mask[i:i+batch_size], axis=1, dtype=np.int32),
'label_length': np.squeeze(y_train_mask[i:i+batch_size]),
}
outputs_train = {'ctc': np.zeros([inputs_train["the_labels"].shape[0]])}
ctcloss = model.train_on_batch(x=inputs_train, y=outputs_train)
total_ctcloss += ctcloss * inputs_train["the_input"].shape[0] * 1.
loss_train[epoch] = total_ctcloss / X_train.shape[0]
Here is the my model summary

Try the following solution:
import keras.backend as K
def get_prediction_function(model):
input_tensor = model.layers[0].input
output_tensor = model.layers[-5].output
net_function = K.function([input_tensor, K.learning_phase()], [output_tensor])
def _result_function(x):
return net_function([x, 0])[0]
return _result_function
Now your network function might be obtained by:
test_function = get_prediction_function(model)

Related

How to Change keras tensor values while model building

I am trying to make a model that has lambda layer that executes a custom function the output shape of it is to be of shape `(batch_size, 10, 5)
def yolo_head(x):
boxes_ = x[:,:,:,:,1:5]
confidence_ = x[:,:,:,:,:1]
classes_ = x[:,:,:,:,5:]
scores, boxes, classes = yolo_filter_boxes(boxes_, confidence_, classes_, threshold)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_box, threshold_iou)
classes = tf.cast(classes, tf.float32, name=None)
preds = scores*classes
second_use = 0
use = len(preds.get_shape().as_list())
if use>1:
second_use=preds.get_shape().as_list()[1]
if use==1:
preds = tf.zeros((preds.get_shape().as_list()[0],10,5), dtype=tf.dtypes.float32, name=None)
elif second_use<10:
current = tf.zeros((preds.get_shape().as_list()[0],10-second_use,5), dtype=tf.dtypes.float32, name=None)
preds = tf.reshape(preds, (-1,1), name=None)
preds = tf.concat(axis=1, values=[preds, boxes])
preds = tf.concat(axis=0, values=[preds,current])
else:
preds = tf.reshape(preds, (-1,1), name=None)
preds = tf.concat(axis=1,values=[preds, boxes])
return preds
def create_model():
total_ = 19*19*no_of_anchors*6
pretrained_model = tf.keras.applications.MobileNetV2(
input_shape=(512, 512, 3),
include_top=False,
weights='imagenet',
pooling='max'
)
pretrained_model.trainable = False
inputs = pretrained_model.input
x=tf.keras.layers.Flatten()(pretrained_model.output)
x = tf.keras.layers.Dense(total_, activation='relu',bias_initializer=tf.zeros_initializer(),kernel_initializer=tf.keras.initializers.Ones())(x)
x = tf.keras.layers.Reshape((19, 19, no_of_anchors, 6))(x)
outputs = tf.keras.layers.Lambda(yolo_head)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
[1]: https://i.stack.imgur.com/rJXNp.png [This is the output which shows error][1]

Graph disconnected: cannot obtain value for tensor "x" Tensor at layer "x" . The following previous layers were accessed without issue: []

I am building a small network using some custom network boxes for each use case, It looks like this :
def top_block(dropout = None, training = None):
# scaled input
input_1 = tf.keras.Input(shape=(1,15), dtype='float32')
input_2 = tf.keras.Input(shape=(1,15), dtype='float32')
if dropout:
layer_one = tf.keras.layers.Dropout(rate = dropout)(input_1, training = training)
layer_two = tf.keras.layers.Dropout(rate = dropout)(input_2, training = training)
return [layer_one,layer_two]
return [input_1,input_2]
def bottom_layer(input_layers):
data = tf.reduce_mean(input_layers,0)
cls_layer = tf.keras.layers.Dense(1,
kernel_initializer = keras.initializers.glorot_uniform(seed=200),
activation = 'sigmoid')(data)
model = tf.keras.Model([input_layers[0], input_layers[1]], cls_layer , name = 'model_1')
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics=['accuracy'])
model.summary()
return model
If I am trying to access this network without dropout, it's working fine :
top_ = top_block()
model = bottom_layer(top_ )
But if I am accessing with dropout, it's giving error:
top_ = top_block(dropout = 0.2, training = True)
model = bottom_layer(top_ )
ValueError: Graph disconnected: cannot obtain value for tensor Tensor("input_72:0", shape=(None, 1, 15), dtype=float32) at layer "input_72". The following previous layers were accessed without issue: []
How to access the model with dropout layer?
How to disable training = False during evaluate? Do I need to load full model and old model weights?
Thank You!
I just realized my input is coming from intermediate layer (dropout layer), It should come directly from Input layer :
def top_block():
# scaled input
input_1 = tf.keras.Input(shape=(1,15), dtype='float32')
input_2 = tf.keras.Input(shape=(1,15), dtype='float32')
return [input_1, input_2]
def apply_dropout(layers_data, dropout_val, training):
layer_one = tf.keras.layers.Dropout(rate = dropout_val)(layers_data[0], training = training)
layer_two = tf.keras.layers.Dropout(rate = dropout_val)(layers_data[1], training = training)
return [layer_one, layer_two]
def bottom_layer(input_layers, data):
data = tf.reduce_mean(data, 0)
cls_layer = tf.keras.layers.Dense(1,
kernel_initializer = keras.initializers.glorot_uniform(seed=200),
activation = 'sigmoid')(data)
model = tf.keras.Model(input_layers, cls_layer , name = 'model_1')
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics=['accuracy'])
model.summary()
return model
It's working now
top_ = top_block()
dropout_ = apply_dropout(top_, 0.2, True)
model = bottom_layer(top_ , dropout_)

Re-implementing TF 1.0 sampled_softmax_loss funtion for seq2seq model in to TF 2 Keras model

I have a TF 1.0.1 code of seq2seq model. I am trying to rewrite it using Tensorflow Keras.
TF 1.0.1 code has following decoder architecure:
with tf.variable_scope("decoder_scope") as decoder_scope:
# output projection
# we need to specify output projection manually, because sampled softmax needs to have access to the the projection matrix
output_projection_w_t = tf.get_variable("output_projection_w", [vocabulary_size, state_size], dtype=DTYPE)
output_projection_w = tf.transpose(output_projection_w_t)
output_projection_b = tf.get_variable("output_projection_b", [vocabulary_size], dtype=DTYPE)
decoder_cell = tf.contrib.rnn.LSTMCell(num_units=state_size)
decoder_cell = DtypeDropoutWrapper(cell=decoder_cell, output_keep_prob=tf_keep_probabiltiy, dtype=DTYPE)
decoder_cell = contrib_rnn.MultiRNNCell(cells=[decoder_cell] * num_lstm_layers, state_is_tuple=True)
# define decoder train netowrk
decoder_outputs_tr, _ , _ = dynamic_rnn_decoder(
cell=decoder_cell,
decoder_fn= simple_decoder_fn_train(last_encoder_state, name=None),
inputs=decoder_inputs,
sequence_length=decoder_sequence_lengths,
parallel_iterations=None,
swap_memory=False,
time_major=False)
# define decoder inference network
decoder_scope.reuse_variables()
Here is how the sampled_softmax_loss is calculated:
decoder_forward_outputs = tf.reshape(decoder_outputs_tr,[-1, state_size])
decoder_target_labels = tf.reshape(decoder_labels ,[-1, 1]) #decoder_labels is target sequnce of decoder
sampled_softmax_losses = tf.nn.sampled_softmax_loss(
weights = output_projection_w_t,
biases = output_projection_b,
inputs = decoder_forward_outputs,
labels = decoder_target_labels ,
num_sampled = 500,
num_classes=vocabulary_size,
num_true = 1,
)
total_loss_op = tf.reduce_mean(sampled_softmax_losses)
And, this is my decoder in Keras:
decoder_inputs = tf.keras.Input(shape=(None,), name='decoder_input')
emb_layer = tf.keras.layers.Embedding(vocabulary_size, state_size)
x_d = emb_layer(decoder_inputs)
d_lstm_layer = tf.keras.layers.LSTM(embed_dim, return_sequences=True)
d_lstm_out = d_lstm_layer(x_d, initial_state=encoder_states)
This is my sampled_softmax_loss function I use for Keras model:
class SampledSoftmaxLoss(object):
def __init__(self, model):
self.model = model
output_layer = model.layers[-1]
self.input = output_layer.input
self.weights = output_layer.weights
def loss(self, y_true, y_pred, **kwargs):
loss = tf.nn.sampled_softmax_loss(
weights=self.weights[0],
biases=self.weights[1],
labels=tf.reshape(y_true ,[-1, 1]),
inputs=tf.reshape(d_lstm_out,[-1, state_size]),
num_sampled = 500,
num_classes = vocabulary_size
)
But, it does not work.
Can anyone help me to implement sampled_loss_funtion in Keras correctly.

Custom loss function- Keras-

I'm trying to implement a mixed model where part of it is a variational autoencoder and the other part takes the latent space and makes predictions on the properties of the input. I'd like to train these two models jointly. Here are my models:
# build encoder model
inputs = Input(shape=input_shape, name='encoder_input')
x = Dense(intermediate_dim1, activation='relu')(inputs)
x1 = Dense(intermediate_dim2, activation='relu')(x)
x2 = Dense(intermediate_dim3, activation='relu')(x1)
z_mean = Dense(latent_dim, name='z_mean')(x2)
z_log_var = Dense(latent_dim, name='z_log_var')(x2)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
# build decoder model
latent_inputs = Input(shape=(latent_dim,), name='z_sampling1')
x1 = Dense(intermediate_dim3, activation='relu')(latent_inputs)
x2 = Dense(intermediate_dim2, activation='relu')(x1)
x3 = Dense(intermediate_dim1, activation='relu')(x2)
outputs = Dense(2*original_dim+1, activation='sigmoid')(x3)
# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
#build property predictor model
latent_inputs = Input(shape=(latent_dim,), name='z_sampling2')
x1 = Dense(64, activation='relu')(latent_inputs)
x2 = Dense(128, activation='relu')(x1)
outputs = Dense(property_dim, activation='sigmoid')(x2)
predModel = Model(latent_inputs, outputs, name='predictor')
This is the full model that has the inupts of the encoder and the output of only the predictor model.
#build full model
vaeOutputs = decoder(encoder(inputs)[2])
predOutputs = predModel(encoder(inputs)[0])
vaePred = Model(inputs, [vaeOutputs,predOutputs], name='vae_fullimage')
vaePred.summary()
Now I have trouble defining the loss function and training the model:
This is my attempt:
if __name__ == '__main__':
parser = argparse.ArgumentParser()
help_ = "Load h5 model trained weights"
parser.add_argument("-w", "--weights", help=help_)
help_ = "Use mse loss instead of binary cross entropy (default)"
parser.add_argument("-m",
"--mse",
help=help_, action='store_true')
#args = parser.parse_args()
args = parser.parse_known_args()[0]
models = (encoder, decoder)
def custom_loss(y_true, y_pred):
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
reconstruction_loss = binary_crossentropy(y_true[0], y_pred[0])
reconstruction_loss*= original_dim
#y_pred = predOutputs
prediction_loss =K.square(y_pred[1] - y_true[1])
total_loss = K.mean(prediction_loss, axis= -1) + K.mean (reconstruction_loss) + K.mean(kl_loss)
return total_loss
optimizer = keras.optimizers.Adam(learning_rate=0.001)
vaePred.compile(optimizer, custom_loss)
vaePred.summary()
if args.weights:
vaePred.load_weights(args.weights)
else:
# train the autoencoder
history =vaePred.fit(x=x_train, y=[x_train,property_train],
epochs=epochs,
callbacks=callbacks,
batch_size=batch_size,
validation_data=(x_test, [x_test,property_test]))
It appears that you are training an autoencoder(AE) (a generative model that seeks to predict itself). The outputs for an AE should equal the inputs if it is perfect. Therefore you should change y_true to be inputs.
change:
prediction_loss = mse(y_true, predOutputs)
to be:
prediction_loss = mse(inputs, predOutputs)
note: I have not run or tested any of this code. It appears to be example code from Keras.

How to visualize mean edit distance in Tensorboard using Keras callback?

So far, I have been experimenting with Tensorflow and Keras. I took a code from image_ocr.py which allowed me to train printed text ocr. I want to see the training progress as it goes and have successfuly visualized the accuracy and loss of the training model. However, from what I have heard OCR RNN does not take accuracy as a validation but using mean edit distance instead to validate the accuracy of the words. In this case, I have been trying to get a variable called mean_ed and mean_norm_ed to be visualized in Tensorboard from class VizCallback. I have tried the method from this link but it still does not work. Can anyone help me with visualizing the mean edit distance variables? Here are the code snippets from my code:
class VizCallback(keras.callbacks.Callback):
def __init__(self, run_name, test_func, text_img_gen, num_display_words=6):
self.test_func = test_func
self.output_dir = os.path.join(
OUTPUT_DIR, run_name)
self.text_img_gen = text_img_gen
self.num_display_words = num_display_words
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def on_train_begin(self, logs={}):
self.med = []
self.nmed = []
def show_edit_distance(self, num, logs={}):
num_left = num
mean_norm_ed = 0.0
mean_ed = 0.0
while num_left > 0:
word_batch = next(self.text_img_gen)[0]
num_proc = min(word_batch['the_input'].shape[0], num_left)
decoded_res = decode_batch(self.test_func, word_batch['the_input'][0:num_proc])
for j in range(num_proc):
edit_dist = editdistance.eval(decoded_res[j], word_batch['source_str'][j])
mean_ed += float(edit_dist)
mean_norm_ed += float(edit_dist) / len(word_batch['source_str'][j])
num_left -= num_proc
mean_norm_ed = mean_norm_ed / num
mean_ed = mean_ed / num
#Create scalar summaries for both mean edit distance and normalized mean edit distance
tf_med_ph = tf.placeholder(tf.float32,shape=None,name='med_summary')
tf_nmed_ph = tf.placeholder(tf.float32,shape=None,name='nmed_summary')
tf_med = tf.summary.scalar('med', tf_med_ph)
tf_nmed = tf.summary.scalar('nmed', tf_nmed_ph)
performance_summaries = tf.summary.merge([tf_med,tf_nmed])
#Create a session for displaying the summary
config = tf.ConfigProto(allow_soft_placement=True)
session = tf.InteractiveSession(config=config)
summ_writer = tf.summary.FileWriter(os.path.join('summaries','first'), session.graph)
# Execute the summaries defined above
summ = session.run(performance_summaries, feed_dict={tf_med_ph:mean_ed, tf_nmed_ph:mean_norm_ed})
# Write the obtained summaries to the file, so it can be displayed in the TensorBoard
summ_writer.add_summary(summ, epoch)
session.close()
print('\nOut of %d samples: Mean edit distance: %.3f Mean normalized edit distance: %0.3f'
% (num, mean_ed, mean_norm_ed))
def on_epoch_end(self, epoch, logs={}):
self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch)))
self.show_edit_distance(256)
word_batch = next(self.text_img_gen)[0]
res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words])
if word_batch['the_input'][0].shape[0] < 256:
cols = 2
else:
cols = 1
for i in range(self.num_display_words):
plt.subplot(self.num_display_words // cols, cols, i + 1)
if K.image_data_format() == 'channels_first':
the_input = word_batch['the_input'][i, 0, :, :]
else:
the_input = word_batch['the_input'][i, :, :, 0]
plt.imshow(the_input.T, cmap='Greys_r')
plt.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i]))
fig = plt.gcf()
fig.set_size_inches(10, 13)
plt.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch)))
plt.close()
def train(run_name, start_epoch, stop_epoch, img_w):
# Input Parameters
img_h = 64
words_per_epoch = 16000
val_split = 0.2
val_words = int(words_per_epoch * (val_split))
# Network parameters
conv_filters = 16
kernel_size = (3, 3)
pool_size = 2
time_dense_size = 32
rnn_size = 512
minibatch_size = 32
if K.image_data_format() == 'channels_first':
input_shape = (1, img_w, img_h)
else:
input_shape = (img_w, img_h, 1)
fdir = os.path.dirname(get_file('wordlists.tgz',
origin='http://test.com/wordlist.tgz', untar=True))
img_gen = TextImageGenerator(monogram_file=os.path.join(fdir, 'wordlist_mono_clean.txt'),
bigram_file=os.path.join(fdir, 'wordlist_bi_clean.txt'),
minibatch_size=minibatch_size,
img_w=img_w,
img_h=img_h,
downsample_factor=(pool_size ** 2),
val_split=words_per_epoch - val_words
)
act = 'relu'
input_data = Input(name='the_input', shape=input_shape, dtype='float32')
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal',
name='conv1')(input_data)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal',
name='conv2')(inner)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)
conv_to_rnn_dims = (img_w // (pool_size ** 2), (img_h // (pool_size ** 2)) * conv_filters)
inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)
# cuts down input size going into RNN:
inner = Dense(time_dense_size, activation=act, name='dense1')(inner)
# Two layers of bidirectional GRUs
# GRU seems to work as well, if not better than LSTM:
gru_1 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru1')(inner)
gru_1b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru1_b')(inner)
gru1_merged = add([gru_1, gru_1b])
gru_2 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru2_b')(gru1_merged)
# transforms RNN output to character activations:
inner = Dense(img_gen.get_output_size(), kernel_initializer='he_normal',
name='dense2')(concatenate([gru_2, gru_2b]))
y_pred = Activation('softmax', name='softmax')(inner)
Model(inputs=input_data, outputs=y_pred).summary()
labels = Input(name='the_labels', shape=[img_gen.absolute_max_string_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])
# clipnorm seems to speeds up convergence
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)
#Make tensorboard instance
init_op = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init_op)
tbname="tensorboard-of-{}".format(int(time.time()))
tensorboard = keras.callbacks.TensorBoard(
log_dir="logs/{}".format(tbname),
histogram_freq=0,
write_images=True)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd,
metrics=['accuracy'])
if start_epoch > 0:
weight_file = os.path.join(OUTPUT_DIR, os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
model.load_weights(weight_file)
# captures output of softmax so we can decode the output during visualization
test_func = K.function([input_data], [y_pred])
viz_cb = VizCallback(run_name, test_func, img_gen.next_val())
model.fit_generator(generator=img_gen.next_train(),
steps_per_epoch=(words_per_epoch - val_words) // minibatch_size,
epochs=stop_epoch,
validation_data=img_gen.next_val(),
validation_steps=val_words // minibatch_size,
callbacks=[tensorboard,viz_cb, img_gen],
initial_epoch=start_epoch)
Any help would be much appriciated. Thank you!
P.S. I am using Tensorflow 1.9.0 and Python 3.6.8
UPDATE
now it is just a matter of passing the variable performance_summaries from the VizCallbak class towards the metrics in the train function. Any help here?
You could modify show_edit_distance to add the summaries every time this function is being called:
def show_edit_distance(self, num, epoch):
...
summary = tf.Summary()
summary.value.add(tag='mean_ed', simple_value=mean_ed)
summ_writer.add_summary(summary, epoch)
summary = tf.Summary()
summary.value.add(tag='mean_norm_ed', simple_value=mean_norm_ed)
summ_writer.add_summary(summary, epoch)
...
Note that you will need an extra argument epoch:
def on_epoch_end(self, epoch, logs={}):
...
self.show_edit_distance(256, epoch)
...
The Tensorboard callback should automatically pick up these summaries, as they're being added to the GraphKeys.SUMMARIES collection.
NOTE: Unfortunately, I couldn't test the solution. Please let me know if there is something I am missing.

Categories

Resources