Tensorflow Keras multiple input model - python

I need to adapt this model for two text columns input (instead one column)
tfhub_handle_encoder = \
"https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1"
tfhub_handle_preprocess = \
"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
def build_classifier_model():
text_input = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text')
preprocessing_layer = hub.KerasLayer(
tfhub_handle_preprocess, name='preprocessing')
encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(
tfhub_handle_encoder, trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
net = tf.keras.layers.Dense(
6, activation='softmax', name='classifier')(net)
model = tf.keras.Model(text_input, net)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
metric = tf.metrics.CategoricalAccuracy('accuracy')
optimizer = Adam(
learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
model.compile(
optimizer=optimizer, loss=loss, metrics=metric)
model.summary()
return model
history = classifier_model.fit(
x=X_train['f'].values,
y=y_train_c,
validation_data=(X_valid['f'].values, y_valid_c),
epochs=15)
Seems like this is model from tutorial: https://www.tensorflow.org/text/tutorials/classify_text_with_bert
I have tried modify code for two input layer, but get error because after concatenate there is wrong tensor dimensions:
def build_classifier_model():
input1 = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text')
input2 = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text1')
text_input = tf.keras.layers.concatenate([input1, input2], axis=-1)
preprocessing_layer = hub.KerasLayer(
tfhub_handle_preprocess, name='preprocessing')
encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(
tfhub_handle_encoder, trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
net = tf.keras.layers.Dense(
6, activation='softmax', name='classifier')(net)
model = tf.keras.Model([input1, input2], net)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
metric = tf.metrics.CategoricalAccuracy('accuracy')
optimizer = Adam(
learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
model.compile(
optimizer=optimizer, loss=loss, metrics=metric)
model.summary()
return model
Error:
InvalidArgumentError: logits and labels must be broadcastable: logits_size=[64,6] labels_size=[32,6]
[[node categorical_crossentropy/softmax_cross_entropy_with_logits (defined at tmp/ipykernel_39/1837193519.py:5) ]] [Op:__inference_train_function_271676]
If use concatenate with another dimension then model doensn't compile

Weirdly enough, replacing your Concatenation layer with tf.strings.join inside your model seems to work:
def build_classifier_model():
input1 = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text')
input2 = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text1')
text_input = tf.strings.join([input1, input2])
preprocessing_layer = hub.KerasLayer(
tfhub_handle_preprocess, name='preprocessing')
encoder_inputs = preprocessing_layer(text_input)
encoder = hub.KerasLayer(
tfhub_handle_encoder, trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
net = outputs['pooled_output']
net = tf.keras.layers.Dropout(0.1)(net)
output = tf.keras.layers.Dense(
6, activation='softmax', name='classifier')(net)
model = tf.keras.Model([input1, input2], output)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
metric = tf.metrics.CategoricalAccuracy('accuracy')
optimizer = Adam(
learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
model.compile(
optimizer=optimizer, loss=loss, metrics=metric)
model.summary()
return model
Epoch 1/5
497/1094 [============>.................] - ETA: 2:14 - loss: 1.8664 - accuracy: 0.1641
You could also consider simply doing text_input = input1 + input2 , since the Concatenation layer seems to mess up the batch dimension. Or you could feed each input to your encoder and concatenate the results afterwards:
def build_classifier_model():
input1 = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text')
input2 = tf.keras.layers.Input(
shape=(), dtype=tf.string, name='text1')
preprocessing_layer = hub.KerasLayer(
tfhub_handle_preprocess, name='preprocessing')
encoder_input1 = preprocessing_layer(input1)
encoder_input2 = preprocessing_layer(input2)
encoder = hub.KerasLayer(
tfhub_handle_encoder, trainable=True, name='BERT_encoder')
output1 = encoder(encoder_input1)
output2 = encoder(encoder_input2)
net = tf.keras.layers.Concatenate(axis=-1)([output1['pooled_output'], output2['pooled_output']])
net = tf.keras.layers.Dropout(0.1)(net)
output = tf.keras.layers.Dense(
6, activation='softmax', name='classifier')(net)
model = tf.keras.Model([input1, input2], output)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False) # (from_logits=True)
metric = tf.metrics.CategoricalAccuracy('accuracy')
optimizer = Adam(
learning_rate=5e-05, epsilon=1e-08, decay=0.01, clipnorm=1.0)
model.compile(
optimizer=optimizer, loss=loss, metrics=metric)
model.summary()
return model

Related

Graph disconnected error even though it is not

I have trained the following model:
K.clear_session()
latent_dim = 500
encoder_inputs = Input(shape=(INPUT_MAX_LENGTH-1,))
enc_emb = Embedding(vocab_size_source, latent_dim, trainable=True, mask_zero=True)(encoder_inputs)
encoder_lstm1 = LSTM(latent_dim,return_sequences=True,return_state=True)
encoder_output1, state_h1, state_c1 = encoder_lstm1(enc_emb)
encoder_lstm2 = LSTM(latent_dim,return_sequences=True,return_state=True)
encoder_output2, state_h2, state_c2 = encoder_lstm2(encoder_output1)
encoder_lstm3=LSTM(latent_dim, return_state=True, return_sequences=True)
encoder_outputs, state_h, state_c= encoder_lstm3(encoder_output2)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(INPUT_MAX_LENGTH-1,))
dec_emb_layer = Embedding(vocab_size_target, latent_dim, trainable=True, mask_zero=True)
dec_emb = dec_emb_layer(decoder_inputs)
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(dec_emb, initial_state=[state_h, state_c])
attn_layer = BahdanauAttention(units=latent_dim)
attn_out, attn_states = attn_layer(encoder_outputs, decoder_outputs)
decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_outputs, attn_out])
decoder_dense = TimeDistributed(Dense(vocab_size_target, activation='softmax'))
decoder_outputs = decoder_dense(decoder_concat_input)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.summary()
And now I am trying to create an inference pipeline:
# Define sampling models
# Restore the model and construct the encoder and decoder.
#model = keras.models.load_model("s2s")
encoder_inputs = model.input[0] # input_1
encoder_outputs, state_h_enc, state_c_enc = model.layers[6].output # lstm_1
encoder_states = [state_h_enc, state_c_enc]
encoder_model = Model(encoder_inputs, encoder_states)
dec_inputs = model.input[1] # input_2
dec_emd_layer = model.layers[5]
dec_embedding = dec_emd_layer(dec_inputs)
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_lstm = model.layers[7]
decoder_outputs, state_h_dec, state_c_dec = decoder_lstm(
dec_embedding, initial_state=decoder_states_inputs
)
attn_layer = model.layers[8]
attn_out_inf, attn_states_inf = attn_layer(encoder_outputs, decoder_outputs)
concat_layer = model.layers[9]
decoder_concat_input = concat_layer([decoder_outputs, attn_out_inf])
decoder_states = [state_h_dec, state_c_dec]
decoder_dense = model.layers[10]
decoder_outputs = decoder_dense(decoder_concat_input)
decoder_model = Model(
[dec_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states
)
But I keep getting the following error:
ValueError: Graph disconnected: cannot obtain value for tensor KerasTensor(type_spec=TensorSpec(shape=(None, 99), dtype=tf.float32, name='input_1'), name='input_1', description="created by layer 'input_1'") at layer "embedding". The following previous layers were accessed without issue: []
I see no disconnect in the graph. I have also changed variable names to ensure there are no conflicts.

How to log a tensorflow layer output in tf.estimator.Estimator()

In this tutorial, they successfully log the softmax function by giving a name to the tf.nn.softmax node.
tf.nn.softmax(logits, name="softmax_tensor") # giving name to the node
.
.
.
tensors_to_log = {"probadfabilities": "softmax_tensor"} # logging the node
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
Now, instead of the softmax, I would like to also log the output of the last Dense layer.
logits = tf.layers.dense(inputs=dropout, units=10, name='logits')
.
.
.
tensors_to_log = {"last_layer": "logits"}
But it gives me the following error
KeyError: "The name 'logits:0' refers to a Tensor which does not
exist. The operation, 'logits', does not exist in the graph."
My question is: how to log the layer output in tensorflow?
My code
import tensorflow as tf
import numpy as np
import os
tf.logging.set_verbosity(tf.logging.INFO)
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=128,
kernel_size=[7, 7],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=256,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 256])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10, name='logits')
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
((train_data, train_labels),
(eval_data, eval_labels)) = tf.keras.datasets.mnist.load_data()
train_data = train_data/np.float32(255)
train_labels = train_labels.astype(np.int32) # not required
eval_data = eval_data/np.float32(255)
eval_labels = eval_labels.astype(np.int32)
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="./mnist_convnet_model")
# Set up logging for predictions
tensors_to_log = {"last_layer": "logits"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
# train one step and display the probabilties
mnist_classifier.train(
input_fn=train_input_fn,
steps=10,
hooks=[logging_hook])
In the tf.official ResNet implementation, they use tf.identity for this purpose:
logits = tf.identity(logits, 'logits')

How to construct encoder from a loaded model in Keras?

I have an encoder-decoder model whose structure is the same as the one at machinelearningmastery.com with num_encoder_tokens = 1949,
num_decoder_tokens = 1944, and latent_dim = 2048.
I would like to construct the encoder and decoder models by loading the already trained model and try decoding some samples, but I get the error "Graph disconnected: cannot obtain value for tensor Tensor("input_1_1:0", shape=(?,?, 1949), dtype=float32) at layer "input_1". The following previous layers were accessed without issue: [].
Part of my code is the following:
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
decoder_inputs = Input(shape=(None, num_decoder_tokens))
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2)
model.save('modelname.h5')
# ...from here different python file for inference...
encoder = LSTM(latent_dim, return_state=True)
model = load_model('modelname.h5')
encoder_model = Model(model.output, encoder(model.output)) # I get the error here
And what I would like to do here is:
encoder_inputs = Input(shape=(None, 1949))
encoder = LSTM(2048, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
encoder_model = Model(encoder_inputs, encoder_states)
I would highly appreciate it if anyone could help me.
Take a look at Robert Sim's answer to this post in stack overflow: Restore keras seq2seq model
And to this post in github: https://github.com/keras-team/keras/pull/9119.
He also provides an example in: https://github.com/simra/keras/blob/simra/s2srestore/examples/lstm_seq2seq_restore.py where you can see how the model is loaded. The following code has been taken from that example.
# Restore the model and construct the encoder and decoder.
model = load_model('s2s.h5')
encoder_inputs = model.input[0] # input_1
encoder_outputs, state_h_enc, state_c_enc = model.layers[2].output # lstm_1
encoder_states = [state_h_enc, state_c_enc]
encoder_model = Model(encoder_inputs, encoder_states)
decoder_inputs = model.input[1] # input_2
decoder_state_input_h = Input(shape=(latent_dim,), name='input_3')
decoder_state_input_c = Input(shape=(latent_dim,), name='input_4')
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_lstm = model.layers[3]
decoder_outputs, state_h_dec, state_c_dec = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h_dec, state_c_dec]
decoder_dense = model.layers[4]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)

Tensorflow 1.6 get the prediction output from estimator.predict()

I have this code which just Mnist tesorflow example and I would to do get the prediction for test data
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
# Our application logic will be added here
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
train_op=train_op)
# Add evaluation metrics (for EVAL mode)
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes[:, tf.newaxis],
'probabilities': tf.nn.softmax(logits),
'logits': logits,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
def main(unused_argv):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images[:54000] # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)[:54000]
eval_data = train_data[:500] # Returns np.array
eval_labels = train_labels[:500] # np.asarray(mnist.test.labels,
dtype=np.int32)
test_data = train_data[1000:]
test_label = train_labels[1000:]
# eval_data = mnist.test.images # Returns np.array
# eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="./tmp/mnist_convnet_model")
# Set up logging for predictions
tensors_train_to_log = {"probabilities": "softmax_tensor"}
# tensors_eval_to_log = {"accuracy": "classes"}
logging_train_hook = tf.train.LoggingTensorHook(
tensors=tensors_train_to_log, every_n_iter=6000)
# logging_eval_hook = tf.train.LoggingTensorHook(
# tensors=tensors_eval_to_log, every_n_iter=1000)
# Train the model
print("Training Data length:", np.shape(train_data))
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=10,
num_epochs=1,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=True)
# input_fn=train_input_fn,
# steps=20000,
# hooks=[logging_hook])
# Evaluate the model and print results
# eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
# print(eval_results)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=6500)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
tf.estimator.train_and_evaluate(estimator=mnist_classifier,
train_spec=train_spec,eval_spec=eval_spec)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_data[0]},
y=test_label,
num_epochs=1,
shuffle=True)
# mnist_classifier.train(
test_spec = tf.estimator.EvalSpec(input_fn=test_input_fn)
predictions = mnist_classifier.predict(test_spec)
print(predictions["logits"][0])
# print(predictions["logits"])
#I got an error when I tried to print this
if __name__ == "__main__":
tf.app.run()
the code work will like I got trained model but still when I tried to print the prediction I could find a way to do that. So, any one did this example and print the pridected data not just the evaluation accuracy.
try this:
training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
It is a generator object and, to print it, you should use
print(list(predictions)[0])
Following should print all the predictions -
for i in range(300):
print(list(predictions)[0])
This should work
outputs = [list(next(predictions).values())[0] for i in range(300)]

Saving and restoring Keras BLSTM CTC model

I have been working on speech emotion recognition deep neural network. I have used keras Bidirectional LSTM with CTC loss. i trained the model and saved it
model_json = model.to_json()
with open("ctc_model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("ctc_weights.h5")
The problem is i can not use this model to test on on unseen data because the model accepts 4 argument as input and calculates the ctc loss..just build the model and train. so how can i save a model in such away that in require only one input. not the labels, and length. Basically how can i save a model as this function test_func = K.function([net_input], [output])
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
shift = 2
y_pred = y_pred[:, shift:, :]
input_length -= shift
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
def build_model(nb_feat, nb_class, optimizer='Adadelta'):
net_input = Input(name="the_input", shape=(200, nb_feat))
forward_lstm1 = LSTM(output_dim=64,
return_sequences=True,
activation="tanh"
)(net_input)
backward_lstm1 = LSTM(output_dim=64,
return_sequences=True,
activation="tanh",
go_backwards=True
)(net_input)
blstm_output1 = Merge(mode='concat')([forward_lstm1, backward_lstm1])
forward_lstm2 = LSTM(output_dim=64,
return_sequences=True,
activation="tanh"
)(blstm_output1)
backward_lstm2 = LSTM(output_dim=64,
return_sequences=True,
activation="tanh",
go_backwards=True
)(blstm_output1)
blstm_output2 = Merge(mode='concat')([forward_lstm2, backward_lstm2])
hidden = TimeDistributed(Dense(512, activation='tanh'))(blstm_output2)
output = TimeDistributed(Dense(nb_class + 1, activation='softmax')) (hidden)
labels = Input(name='the_labels', shape=[1], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")([output, labels, input_length, label_length])
model = Model(input=[net_input, labels, input_length, label_length], output=[loss_out])
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=optimizer, metrics=[])
test_func = K.function([net_input], [output])
return model, test_func
model, test_func = build_model(nb_feat=nb_feat, nb_class=nb_class, optimizer=optimizer)
for epoch in range(number_epoches):
inputs_train = {'the_input': X_train[i:i+batch_size],
'the_labels': y_train[i:i+batch_size],
'input_length': np.sum(X_train_mask[i:i+batch_size], axis=1, dtype=np.int32),
'label_length': np.squeeze(y_train_mask[i:i+batch_size]),
}
outputs_train = {'ctc': np.zeros([inputs_train["the_labels"].shape[0]])}
ctcloss = model.train_on_batch(x=inputs_train, y=outputs_train)
total_ctcloss += ctcloss * inputs_train["the_input"].shape[0] * 1.
loss_train[epoch] = total_ctcloss / X_train.shape[0]
Here is the my model summary
Try the following solution:
import keras.backend as K
def get_prediction_function(model):
input_tensor = model.layers[0].input
output_tensor = model.layers[-5].output
net_function = K.function([input_tensor, K.learning_phase()], [output_tensor])
def _result_function(x):
return net_function([x, 0])[0]
return _result_function
Now your network function might be obtained by:
test_function = get_prediction_function(model)

Categories

Resources