Following the instructions on tf custom estimator
I have created a cnn estimator and tried to train it. While training, i initialized tensorboard and was hoping to see some visualizations about training steps. However, tensorboard only showed the graph of my custom estimator but none of the scalar values i have defined.
Here's roughly what I have in code
def model_fn(features, labels, mode, params=None):
tf.logging.set_verbosity(tf.logging.INFO)
n_classes = params['n_classes']
base_learning_rate = params['learning_rate']
decay_rate = params['decay_rate']
embedding_dim = params['embedding_dim']
x = VGG_block1(features, (3, 3), 64, name='block1_1')
x = VGG_block1(x, (3, 3), 128, name='block1_2')
x = VGG_block1(x, (3, 3), 256, name='block1_3', regularizer=tf.contrib.layers.l1_regularizer(.1))
x = VGG_block2(x, (3, 3), 512, name='block2_4')
x = VGG_block2(x, (3, 3), 1024, name='block2_5')
x = conv2d(x, 512, (5, 5), padding='valid', normalizer_fn=batch_norm, activation_fn=tf.nn.leaky_relu,
weights_initializer=he_uniform())
x = flatten(x)
embedding = fully_connected(x, embedding_dim)
logits = fully_connected(embedding, n_classes)
# make predictions
predictions = {
'classes': tf.argmax(logits, axis=1, name='classes'),
'probabilities': tf.nn.softmax(logits, name='softmax'),
'embeddings':embedding
}
# if we are in prediction mode
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# otherwise define losses for training
c_loss, center = center_loss(embedding, labels, .9, n_classes)
xent_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits))
total_loss = xent_loss + 0.5 * c_loss
# evaluation methods
accuracy, update_op = tf.metrics.accuracy(labels=labels, predictions=predictions['classes'], name='accuracy')
batch_acc = tf.reduce_mean(tf.cast(tf.equal(tf.cast(labels, tf.int64), predictions['classes']), tf.float32))
tf.summary.scalar('batch_acc', batch_acc)
tf.summary.scalar('streaming_acc', update_op)
tf.summary.scalar('total_loss', total_loss)
tf.summary.scalar('center_loss', c_loss)
tf.summary.scalar('xent_loss', xent_loss)
# training mode
if mode == tf.estimator.ModeKeys.TRAIN:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
global_step = tf.Variable(0, trainable=False)
global_step_op = tf.assign(global_step, global_step + 1)
learning_rate = tf.train.exponential_decay(base_learning_rate, global_step, 8000, decay_rate, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
with tf.control_dependencies(update_ops+[global_step_op]):
objective = optimizer.minimize(total_loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=total_loss, train_op=objective)
eval_metric_ops = {
'accuracy': (accuracy, update_op)
}
return tf.estimator.EstimatorSpec(mode=mode, loss=total_loss, eval_metric_ops=eval_metric_ops)
X_train, X_test, y_train, y_test = load_data()
epochs = 10
batch_size = 64
n_classes = len(classes)
model_params = {'n_classes':n_classes,
'learning_rate':0.0001,
'decay_rate':0.5,
'embedding_dim':128}
model_dir = 'output'
face_classifier = tf.estimator.Estimator(model_fn=model_fn, params=model_params, model_dir=model_dir)
My Tensorflow version is 1.12.0
Edit
Forgot to mention I was using eager execution for this exercise, for unknown reasons that was the cause of this bug
as was mentioned in the edit, disabling eager execution solved the problem
Related
Working on cifar-10 dataset to build CNN and evaluate loss and accuracy. What I am trying to do is split the dataset into training and test data using keras and the train the model.
But on the very last step, it is giving me dimension error and nothing I could do to fix it. Please help!
Here is the code:
import numpy as np
import pickle
import tensorflow as tf
import os
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import sklearn
path ='cifar-10-batches-py'
def load_cfar10_batch(path):
with open(path + '/data_batch_1', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data']
labels = batch['labels']
return features, labels
x = features.reshape((len(features), 3, 32, 32)).transpose(0, 2, 3, 1)
x.shape
y = labels
def one_hot_encode(y):
encoded = np.zeros((len(y), 10))
for index, val in enumerate(y):
encoded[index][val] = 1
return encoded
def normalize(x):
x_norm = x/255
return x_norm
from sklearn import preprocessing
scaler = preprocessing.StandardScaler()
scaled_df = scaler.fit_transform(features)
scaled_df = scaled_df.reshape(10000,3,32,32).transpose(0,2,3,1)
plt.imshow(scaled_df[9999])
def _preprocess_and_save(normalize_and_standardize, one_hot_encode, features, labels, filename):
features = normalize(x)
labels = one_hot_encode(y)
pickle.dump((features, labels), open(filename, 'wb'))
features, labels = load_cfar10_batch(path)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.2)
def preprocess_and_save_data(path, normalize, one_hot_encode):
#check where the code for _preprocess_and_save is
_preprocess_and_save(normalize, one_hot_encode, np.array(x_test), np.array(y_test), 'preprocess_test.p')
_preprocess_and_save(normalize, one_hot_encode, np.array(x_train), np.array(y_train), 'preprocess_training.p')
preprocess_and_save_data(path, normalize, one_hot_encode)
x_test, y_test = pickle.load(open('preprocess_test.p', mode='rb'))
y_train, y_train = pickle.load(open('preprocess_training.p', mode='rb'))
def tf_reset():
try:
sess.close()
except:
pass
tf.reset_default_graph()
return tf.Session()
sess = tf_reset()
x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3), name='input_x')
y = tf.placeholder(tf.float32, shape=(None, 10), name='output_y')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
def conv_net(x, keep_prob):
#x = tf.reshape(x,[-1,32,32,3])
conv1_filter = tf.Variable(tf.truncated_normal(shape=[3, 3, 3, 64], mean=0, stddev=0.08))
conv2_filter = tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 128], mean=0, stddev=0.08))
conv3_filter = tf.Variable(tf.truncated_normal(shape=[5, 5, 128, 256], mean=0, stddev=0.08))
conv4_filter = tf.Variable(tf.truncated_normal(shape=[5, 5, 256, 512], mean=0, stddev=0.08))
#Layer1
conv1 = tf.nn.conv2d(x, conv1_filter, strides=[1,1,1,1], padding='SAME')
conv1 = tf.nn.relu(conv1)
conv1_pool = tf.nn.max_pool(conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
conv1_bn = tf.layers.batch_normalization(conv1_pool)
#Layer2
conv2 = tf.nn.conv2d(conv1_bn, conv2_filter, strides=[1,1,1,1], padding='SAME')
conv2 = tf.nn.relu(conv2)
conv2_pool = tf.nn.max_pool(conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
conv2_bn = tf.layers.batch_normalization(conv2_pool)
#Layer 3
conv3 = tf.nn.conv2d(conv2_bn, conv3_filter, strides=[1,1,1,1], padding='SAME')
conv3 = tf.nn.relu(conv3)
conv3_pool = tf.nn.max_pool(conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
conv3_bn = tf.layers.batch_normalization(conv3_pool)
#Layer 4
conv4 = tf.nn.conv2d(conv3_bn, conv4_filter, strides=[1,1,1,1], padding='SAME')
conv4 = tf.nn.relu(conv4)
conv4_pool = tf.nn.max_pool(conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
conv4_bn = tf.layers.batch_normalization(conv4_pool)
flat = tf.contrib.layers.flatten(conv4_bn)
full1 = tf.contrib.layers.fully_connected(inputs=flat, num_outputs=128, activation_fn=tf.nn.relu)
full1 = tf.nn.dropout(full1, keep_prob)
full1 = tf.layers.batch_normalization(full1)
full2 = tf.contrib.layers.fully_connected(inputs=full1, num_outputs=256, activation_fn=tf.nn.relu)
full2 = tf.nn.dropout(full2, keep_prob)
full2 = tf.layers.batch_normalization(full2)
full3 = tf.contrib.layers.fully_connected(inputs=full2, num_outputs=512, activation_fn=tf.nn.relu)
full3 = tf.nn.dropout(full3, keep_prob)
full3 = tf.layers.batch_normalization(full3)
full4 = tf.contrib.layers.fully_connected(inputs=full3, num_outputs=1024, activation_fn=tf.nn.relu)
full4 = tf.nn.dropout(full4, keep_prob)
full4 = tf.layers.batch_normalization(full4)
out = tf.contrib.layers.fully_connected(inputs=full3, num_outputs=10, activation_fn=None)
return out
iterations = 101
batch_size = 128
keep_probability = 0.7
learning_rate = 0.001
logits = conv_net(x, keep_prob)
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
session.run(optimizer,
feed_dict={
x: feature_batch,
y: label_batch,
keep_prob: keep_probability
})
def print_stats(sess, feature_batch, label_batch, cost, accuracy):
loss = sess.run(cost,
feed_dict={
x: feature_batch,
y: label_batch,
keep_prob: 1.
})
valid_acc = sess.run(accuracy,
feed_dict={
x: x_train,
y: y_train,
keep_prob: 1.
})
print('Loss: {:>10.4f} Validation Accuracy: {:.6f}'.format(loss, valid_acc))
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training(batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
features = features.reshape((len(features), 3, 32, 32)).transpose(0, 2, 3, 1)
filename = 'preprocess_training.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for i in range(iterations):
for batch_features, batch_labels in load_preprocess_training(batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
if i % 10 == 0:
print('Iterations {}, CIFAR-10 Batch {}: '.format(i, 1), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
ValueError: Cannot feed value of shape (8000, 3072) for Tensor 'input_x:0', which has shape '(?, 32, 32, 3)'
the problem is located in here:
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
You should reshape the item in features from 3072 to [32,32,3]
Good luck
I'm trying to implement a network for MNIST dataset using custom estimators.
Here is my input function:
def input_train_fn():
train, test = tf.keras.datasets.mnist.load_data()
mnist_x, mnist_y = train
mnist_y = tf.cast(mnist_y, tf.int32)
mnist_x = tf.cast(mnist_x, tf.int32)
features = {'image': mnist_x}
labels = mnist_y
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
return dataset
Here is how I define my model:
def my_model(features, labels, mode, params):
# create net
net = tf.feature_column.input_layer(features, params['feature_columns'])
# create hidden layers
for unit in params['hidden_units']:
net = tf.layers.dense(net, unit, tf.nn.relu)
# create output layer
legits = tf.layers.dense(net, params['n_classes'], activation=None)
# predict (if in predict mode)
predicted_classes = tf.arg_max(legits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes,
'probabilities': tf.nn.softmax(legits),
'logits': legits
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# define loss function
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=legits)
# evaluation metrics
accuracy = tf.metrics.accuracy(labels=labels,
predictions=predicted_classes,
name='acc_op')
metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
And this is how I call the train function:
feature_columns = [tf.feature_column.numeric_column('image', shape=[28, 28], dtype=tf.int32), ]
classifier = tf.estimator.Estimator(model_fn=my_model,
params={
'feature_columns': feature_columns,
'hidden_units': [10, 10],
'n_classes': 10,
}, model_dir='/model')
classifier.train(input_fn=input_train_fn, steps=10)
As far as I can see i'm doing everything by the book both for estimators and feature_columns but I get the error:
ValueError: Cannot reshape a tensor with 784 elements to shape [28,784] (21952 elements) for 'input_layer/image/Reshape' (op: 'Reshape') with input shapes: [28,28], 2 and with input tensors computed as partial shapes: input1 = [28,784].
Is there anything I'm missing?
thanks in advance and any help appreciated.
First, you need to produce batches. For more detail see https://www.tensorflow.org/guide/datasets
...
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset = dataset.batch(size)
return dataset
Then reshape your image and cast to float. -1 is for batch_size, it will be substituted during training. Cast labels to float is optional depending on the datatype provided.
net = tf.cast(tf.reshape(features, [-1, 28*28]), tf.float32)
labels = tf.cast(labels, tf.int64)
net = tf.layers.dense(net, 10, tf.nn.relu)
legits = tf.layers.dense(net, 10, activation=None)
predicted_classes = tf.arg_max(legits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes,
'probabilities': tf.nn.softmax(legits),
'logits': legits
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=legits)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
classifier = tf.estimator.Estimator(model_fn=my_model)
classifier.train(input_fn=lambda: input_train_fn(), steps=10)
I build Neural network in tensorflow and trained it.
I extracted weights and bias from estimator
weights1 = self.model.get_variable_value('dense/kernel')
bias1 = self.model.get_variable_value('dense/bias')
weights2 = self.model.get_variable_value('dense_1/kernel')
bias1 = self.model.get_variable_value('dense_1/bias')
...
And I build MLP with numpy in python
layer1 = np.dot(inputs, weight1)
layer1 = np.add(layer1, bias1)
layer1 = np.maximum(layer1, layer1 * 0.2, layer1)
...
I used leaky_relu activation function, so I was implemented it too but the output is completely different with from tensorflow. I don't know what's wrong about it.
Edit)
def my_dnn_regression_fn(features, labels, mode, params):
top = tf.feature_column.input_layer(features, params["feature_columns"])
for units in params.get("hidden_units", [20]):
top = tf.layers.dense(inputs=top, units=units, activation=tf.nn.leaky_relu)
output_layer = tf.layers.dense(inputs=top, units=1)
predictions = tf.squeeze(output_layer, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode, predictions={"label": predictions})
average_loss = tf.losses.mean_squared_error(labels, predictions)
batch_size = tf.shape(labels)[0]
total_loss = tf.to_float(batch_size) * average_loss
if mode == tf.estimator.ModeKeys.TRAIN:
mse = tf.metrics.mean_squared_error(labels, predictions)
rmse = tf.metrics.root_mean_squared_error(labels, predictions)
absolute_error = tf.metrics.mean_absolute_error(labels, predictions)
mre = tf.metrics.mean_relative_error(labels, predictions, labels)
tf.summary.scalar('mse', mse[1])
tf.summary.scalar('mre', mre[1])
tf.summary.scalar('rmse', rmse[1])
tf.summary.scalar('absolute', absolute_error[1])
# vars = tf.trainable_variables()
# lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars]) * 0.001
l1_regularizer = tf.contrib.layers.l1_regularizer(
scale=0.001, scope=None
)
weights = tf.trainable_variables() # all vars of your graph
lossL1 = tf.contrib.layers.apply_regularization(l1_regularizer, weights)
# average_loss = tf.add(average_loss, lossL2)
average_loss = tf.add(average_loss, lossL1)
total_loss = tf.to_float(batch_size) * average_loss
optimizer = params.get("optimizer", tf.train.AdamOptimizer)
optimizer = optimizer(params.get("learning_rate", None))
train_op = optimizer.minimize(
loss=average_loss, global_step=tf.train.get_global_step())
# eval_metrics = {"rmse": rmse, "absolute": absolute_error, "mre": mre}
eval_metrics = {"mse": mse, "rmse": rmse, "absolute": absolute_error, "mre": mre}
return tf.estimator.EstimatorSpec(
mode=mode, loss=total_loss, train_op=train_op, eval_metric_ops=eval_metrics)
assert mode == tf.estimator.ModeKeys.EVAL
mse = tf.metrics.mean_squared_error(labels, predictions)
rmse = tf.metrics.root_mean_squared_error(labels, predictions)
absolute_error = tf.metrics.mean_absolute_error(labels, predictions)
mre = tf.metrics.mean_relative_error(labels, predictions, labels)
eval_metrics = {"mse": mse, "rmse": rmse, "absolute": absolute_error, "mre": mre}
return tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metrics)
My dnn regrassion code!!
I have this code which just Mnist tesorflow example and I would to do get the prediction for test data
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
# Our application logic will be added here
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
train_op=train_op)
# Add evaluation metrics (for EVAL mode)
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes[:, tf.newaxis],
'probabilities': tf.nn.softmax(logits),
'logits': logits,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
def main(unused_argv):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images[:54000] # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)[:54000]
eval_data = train_data[:500] # Returns np.array
eval_labels = train_labels[:500] # np.asarray(mnist.test.labels,
dtype=np.int32)
test_data = train_data[1000:]
test_label = train_labels[1000:]
# eval_data = mnist.test.images # Returns np.array
# eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="./tmp/mnist_convnet_model")
# Set up logging for predictions
tensors_train_to_log = {"probabilities": "softmax_tensor"}
# tensors_eval_to_log = {"accuracy": "classes"}
logging_train_hook = tf.train.LoggingTensorHook(
tensors=tensors_train_to_log, every_n_iter=6000)
# logging_eval_hook = tf.train.LoggingTensorHook(
# tensors=tensors_eval_to_log, every_n_iter=1000)
# Train the model
print("Training Data length:", np.shape(train_data))
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=10,
num_epochs=1,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=True)
# input_fn=train_input_fn,
# steps=20000,
# hooks=[logging_hook])
# Evaluate the model and print results
# eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
# print(eval_results)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=6500)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
tf.estimator.train_and_evaluate(estimator=mnist_classifier,
train_spec=train_spec,eval_spec=eval_spec)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_data[0]},
y=test_label,
num_epochs=1,
shuffle=True)
# mnist_classifier.train(
test_spec = tf.estimator.EvalSpec(input_fn=test_input_fn)
predictions = mnist_classifier.predict(test_spec)
print(predictions["logits"][0])
# print(predictions["logits"])
#I got an error when I tried to print this
if __name__ == "__main__":
tf.app.run()
the code work will like I got trained model but still when I tried to print the prediction I could find a way to do that. So, any one did this example and print the pridected data not just the evaluation accuracy.
try this:
training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
It is a generator object and, to print it, you should use
print(list(predictions)[0])
Following should print all the predictions -
for i in range(300):
print(list(predictions)[0])
This should work
outputs = [list(next(predictions).values())[0] for i in range(300)]
I have been working on speech emotion recognition deep neural network. I have used keras Bidirectional LSTM with CTC loss. i trained the model and saved it
model_json = model.to_json()
with open("ctc_model.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("ctc_weights.h5")
The problem is i can not use this model to test on on unseen data because the model accepts 4 argument as input and calculates the ctc loss..just build the model and train. so how can i save a model in such away that in require only one input. not the labels, and length. Basically how can i save a model as this function test_func = K.function([net_input], [output])
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
shift = 2
y_pred = y_pred[:, shift:, :]
input_length -= shift
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
def build_model(nb_feat, nb_class, optimizer='Adadelta'):
net_input = Input(name="the_input", shape=(200, nb_feat))
forward_lstm1 = LSTM(output_dim=64,
return_sequences=True,
activation="tanh"
)(net_input)
backward_lstm1 = LSTM(output_dim=64,
return_sequences=True,
activation="tanh",
go_backwards=True
)(net_input)
blstm_output1 = Merge(mode='concat')([forward_lstm1, backward_lstm1])
forward_lstm2 = LSTM(output_dim=64,
return_sequences=True,
activation="tanh"
)(blstm_output1)
backward_lstm2 = LSTM(output_dim=64,
return_sequences=True,
activation="tanh",
go_backwards=True
)(blstm_output1)
blstm_output2 = Merge(mode='concat')([forward_lstm2, backward_lstm2])
hidden = TimeDistributed(Dense(512, activation='tanh'))(blstm_output2)
output = TimeDistributed(Dense(nb_class + 1, activation='softmax')) (hidden)
labels = Input(name='the_labels', shape=[1], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")([output, labels, input_length, label_length])
model = Model(input=[net_input, labels, input_length, label_length], output=[loss_out])
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=optimizer, metrics=[])
test_func = K.function([net_input], [output])
return model, test_func
model, test_func = build_model(nb_feat=nb_feat, nb_class=nb_class, optimizer=optimizer)
for epoch in range(number_epoches):
inputs_train = {'the_input': X_train[i:i+batch_size],
'the_labels': y_train[i:i+batch_size],
'input_length': np.sum(X_train_mask[i:i+batch_size], axis=1, dtype=np.int32),
'label_length': np.squeeze(y_train_mask[i:i+batch_size]),
}
outputs_train = {'ctc': np.zeros([inputs_train["the_labels"].shape[0]])}
ctcloss = model.train_on_batch(x=inputs_train, y=outputs_train)
total_ctcloss += ctcloss * inputs_train["the_input"].shape[0] * 1.
loss_train[epoch] = total_ctcloss / X_train.shape[0]
Here is the my model summary
Try the following solution:
import keras.backend as K
def get_prediction_function(model):
input_tensor = model.layers[0].input
output_tensor = model.layers[-5].output
net_function = K.function([input_tensor, K.learning_phase()], [output_tensor])
def _result_function(x):
return net_function([x, 0])[0]
return _result_function
Now your network function might be obtained by:
test_function = get_prediction_function(model)