I'm trying to adapt the CNN of the tutorial "Build a Convolutional Neural Network using Estimators" to a dataset of mine, and don't know how to fix this error
...well the input files should be fine, as they are already tested and ok, as I am currently running them on another CNN but much much different (it's working fine but I'm willing to change it, adding some extra features like the "dropout")
The fact is that the error (I use Spyder as IDE) is quite meaningless. I have done some tries to see where the error it is but I'm slightly getting more and more confused, so let's try to ask you guys
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
#----- global variables Start ------
nb_of_neurons=1024
model_learning_rate=0.001
#----- global variables End ------
def run_cnn(mymode, last_date, names, mydata, mylabels, run_id):
def cnn_model_fn(cnndata, mylabels, mode):
input_layer = tf.reshape(cnndata, [-1, 4, 5, 1])
conv = tf.layers.conv2d(
inputs=input_layer,
filters=16,
kernel_size=[2, 3],
padding="same",
activation=tf.nn.relu)
print(conv.shape.dims)
pool = tf.layers.max_pooling2d(inputs=conv, pool_size=[2, 2], strides=2)
pool_dims=pool.shape.as_list()[1]*pool.shape.as_list()[2]*pool.shape.as_list()[3]
pool_flat = tf.reshape(pool, [-1, pool_dims])
dense = tf.layers.dense(inputs=pool_flat, units=nb_of_neurons, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
logits = tf.layers.dense(inputs=dropout, units=2)
predictions = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
loss = tf.losses.sparse_softmax_cross_entropy(labels=mylabels, logits=logits)
print(loss)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=model_learning_rate)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=mylabels, predictions=predictions["classes"])
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
if mymode == 'TRAIN':
mode= tf.estimator.ModeKeys.TRAIN
cnn_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn(mydata, mylabels, mode), model_dir="/sess")
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x=mydata,
y=mylabels,
batch_size=100,
num_epochs=None,
shuffle=True)
cnn_classifier.train(
input_fn=train_input_fn,
steps=1,
hooks=[logging_hook])
cnn_classifier.train(input_fn=train_input_fn, steps=1000)
elif mymode == 'PREDICT':
mode= tf.estimator.ModeKeys.PREDICT
cnn_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn(mydata, mylabels, mode), model_dir="/sess")
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x=mydata,
y=mylabels,
num_epochs=1,
shuffle=False)
eval_results = cnn_classifier.evaluate(input_fn=eval_input_fn)
else:
print('**** ->*** ???? ***')
This is called as a module from another python script which passes to all the input data, as follows:
mymode: in ['PREDICT', 'TRAIN']
last_date: not relevant
names: not relevant
mydata: np array of shape (3195,20), of values in [0., 1.] (float)
mylabels: np array of shape (3195,), of values in [0, 1] (int)
run_i: not relevant
Finally, the error appears after the train_op (i.e. in the tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)), as follows:
...
File "C:\Users\Fulviooo\Anaconda3\lib\site-packages\tensorflow\python\util\function_utils.py", line 56, in fn_args
args = tf_inspect.getfullargspec(fn).args
File "C:\Users\Fulviooo\Anaconda3\lib\site-packages\tensorflow\python\util\tf_inspect.py", line 216, in getfullargspec
if d.decorator_argspec is not None), _getfullargspec(target))
File "C:\Users\Fulviooo\Anaconda3\lib\inspect.py", line 1095, in getfullargspec
raise TypeError('unsupported callable') from ex
TypeError: unsupported callable
I hope that someone can enlighten me about where's the error and how to fix it.
Furthermore, I'd be pleased to receive any other suggests for improvements.
Thanks
In practice, the problem is that this estimator is quite rigid and expects variables with predefined names and formats.
ie setting up the expected names:
train_data=mydata
train_labels=mylabels
and formats (dict):
x={"x": train_data}
then it runs
Related
In this tutorial, they successfully log the softmax function by giving a name to the tf.nn.softmax node.
tf.nn.softmax(logits, name="softmax_tensor") # giving name to the node
.
.
.
tensors_to_log = {"probadfabilities": "softmax_tensor"} # logging the node
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
Now, instead of the softmax, I would like to also log the output of the last Dense layer.
logits = tf.layers.dense(inputs=dropout, units=10, name='logits')
.
.
.
tensors_to_log = {"last_layer": "logits"}
But it gives me the following error
KeyError: "The name 'logits:0' refers to a Tensor which does not
exist. The operation, 'logits', does not exist in the graph."
My question is: how to log the layer output in tensorflow?
My code
import tensorflow as tf
import numpy as np
import os
tf.logging.set_verbosity(tf.logging.INFO)
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=128,
kernel_size=[7, 7],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=256,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 256])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10, name='logits')
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
((train_data, train_labels),
(eval_data, eval_labels)) = tf.keras.datasets.mnist.load_data()
train_data = train_data/np.float32(255)
train_labels = train_labels.astype(np.int32) # not required
eval_data = eval_data/np.float32(255)
eval_labels = eval_labels.astype(np.int32)
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="./mnist_convnet_model")
# Set up logging for predictions
tensors_to_log = {"last_layer": "logits"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
# train one step and display the probabilties
mnist_classifier.train(
input_fn=train_input_fn,
steps=10,
hooks=[logging_hook])
In the tf.official ResNet implementation, they use tf.identity for this purpose:
logits = tf.identity(logits, 'logits')
I'm trying to implement a network for MNIST dataset using custom estimators.
Here is my input function:
def input_train_fn():
train, test = tf.keras.datasets.mnist.load_data()
mnist_x, mnist_y = train
mnist_y = tf.cast(mnist_y, tf.int32)
mnist_x = tf.cast(mnist_x, tf.int32)
features = {'image': mnist_x}
labels = mnist_y
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
return dataset
Here is how I define my model:
def my_model(features, labels, mode, params):
# create net
net = tf.feature_column.input_layer(features, params['feature_columns'])
# create hidden layers
for unit in params['hidden_units']:
net = tf.layers.dense(net, unit, tf.nn.relu)
# create output layer
legits = tf.layers.dense(net, params['n_classes'], activation=None)
# predict (if in predict mode)
predicted_classes = tf.arg_max(legits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes,
'probabilities': tf.nn.softmax(legits),
'logits': legits
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# define loss function
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=legits)
# evaluation metrics
accuracy = tf.metrics.accuracy(labels=labels,
predictions=predicted_classes,
name='acc_op')
metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
And this is how I call the train function:
feature_columns = [tf.feature_column.numeric_column('image', shape=[28, 28], dtype=tf.int32), ]
classifier = tf.estimator.Estimator(model_fn=my_model,
params={
'feature_columns': feature_columns,
'hidden_units': [10, 10],
'n_classes': 10,
}, model_dir='/model')
classifier.train(input_fn=input_train_fn, steps=10)
As far as I can see i'm doing everything by the book both for estimators and feature_columns but I get the error:
ValueError: Cannot reshape a tensor with 784 elements to shape [28,784] (21952 elements) for 'input_layer/image/Reshape' (op: 'Reshape') with input shapes: [28,28], 2 and with input tensors computed as partial shapes: input1 = [28,784].
Is there anything I'm missing?
thanks in advance and any help appreciated.
First, you need to produce batches. For more detail see https://www.tensorflow.org/guide/datasets
...
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset = dataset.batch(size)
return dataset
Then reshape your image and cast to float. -1 is for batch_size, it will be substituted during training. Cast labels to float is optional depending on the datatype provided.
net = tf.cast(tf.reshape(features, [-1, 28*28]), tf.float32)
labels = tf.cast(labels, tf.int64)
net = tf.layers.dense(net, 10, tf.nn.relu)
legits = tf.layers.dense(net, 10, activation=None)
predicted_classes = tf.arg_max(legits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes,
'probabilities': tf.nn.softmax(legits),
'logits': legits
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=legits)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
classifier = tf.estimator.Estimator(model_fn=my_model)
classifier.train(input_fn=lambda: input_train_fn(), steps=10)
I am trying to feed serialized data to my saved model.
I suspect either the model is not exported correctly, or serialization of the data is not done correctly. Any hints or suggestions would be appreciated.
import tensorflow as tf
import numpy as np
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': np.array([[1,2,3,4,3], [1,3,4,2,4], [10,2,4,1.3,4], [1,3,5.2,9, 0.3]]).astype(np.float32)},
y=np.array([0,0,1,1]).astype(np.float32),
batch_size=2,
shuffle=True
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': np.array([[1,2,3,1,4], [1,23,4,1,90]]).astype(np.float32)},
y=np.array([0,1]).astype(np.float32),
batch_size=2,
num_epochs=1,
shuffle=False
)
def my_model(features, labels, mode, params):
net = features['x']
net = tf.layers.dense(net, 32, activation=tf.nn.relu)
logits = tf.layers.dense(net, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=logits,
export_outputs=
{'logits':tf.estimator.export.PredictOutput(logits)}
)
loss = tf.reduce_sum(tf.square(labels-logits))
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.02)
train_op = optimizer.minimize(loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=5)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
my_estimator = tf.estimator.Estimator(model_fn=my_model)
tf.estimator.train_and_evaluate(my_estimator, train_spec, eval_spec)
saved_model = my_estimator.export_savedmodel('foo', tf.estimator.export.build_parsing_serving_input_receiver_fn({'x': tf.FixedLenFeature([5],tf.float32)}))
predict_fn = tf.contrib.predictor.from_saved_model(saved_model,)
features=tf.train.Features(feature={'x':tf.train.Feature(float_list=tf.train.FloatList(value=[1,2,3,4,1]))})
ex = tf.train.Example(features=features)
prediction = predict_fn({'examples':ex.SerializeToString()})
Here is the error message:
ValueError: Cannot feed value of shape () for Tensor 'input_example_tensor:0', which has shape '(?,)'
Turned out that the list of tf.train.Examples.SerializeToString() had to be provided.
In the last line, prediction = predict_fn({'examples': ex.SerializeToString()}) is changed to prediction = predict_fn({'examples': [ex.SerializeToString()]}).
I figured this out thanks to this nice tutorial:
http://shzhangji.com/blog/2018/05/14/serve-tensorflow-estimator-with-savedmodel/
I have this code which just Mnist tesorflow example and I would to do get the prediction for test data
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
# Our application logic will be added here
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
train_op=train_op)
# Add evaluation metrics (for EVAL mode)
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes[:, tf.newaxis],
'probabilities': tf.nn.softmax(logits),
'logits': logits,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
def main(unused_argv):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images[:54000] # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)[:54000]
eval_data = train_data[:500] # Returns np.array
eval_labels = train_labels[:500] # np.asarray(mnist.test.labels,
dtype=np.int32)
test_data = train_data[1000:]
test_label = train_labels[1000:]
# eval_data = mnist.test.images # Returns np.array
# eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="./tmp/mnist_convnet_model")
# Set up logging for predictions
tensors_train_to_log = {"probabilities": "softmax_tensor"}
# tensors_eval_to_log = {"accuracy": "classes"}
logging_train_hook = tf.train.LoggingTensorHook(
tensors=tensors_train_to_log, every_n_iter=6000)
# logging_eval_hook = tf.train.LoggingTensorHook(
# tensors=tensors_eval_to_log, every_n_iter=1000)
# Train the model
print("Training Data length:", np.shape(train_data))
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=10,
num_epochs=1,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=True)
# input_fn=train_input_fn,
# steps=20000,
# hooks=[logging_hook])
# Evaluate the model and print results
# eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
# print(eval_results)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=6500)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
tf.estimator.train_and_evaluate(estimator=mnist_classifier,
train_spec=train_spec,eval_spec=eval_spec)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_data[0]},
y=test_label,
num_epochs=1,
shuffle=True)
# mnist_classifier.train(
test_spec = tf.estimator.EvalSpec(input_fn=test_input_fn)
predictions = mnist_classifier.predict(test_spec)
print(predictions["logits"][0])
# print(predictions["logits"])
#I got an error when I tried to print this
if __name__ == "__main__":
tf.app.run()
the code work will like I got trained model but still when I tried to print the prediction I could find a way to do that. So, any one did this example and print the pridected data not just the evaluation accuracy.
try this:
training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
It is a generator object and, to print it, you should use
print(list(predictions)[0])
Following should print all the predictions -
for i in range(300):
print(list(predictions)[0])
This should work
outputs = [list(next(predictions).values())[0] for i in range(300)]
I have a model function which accepts Features, targets and mode but when I add tf.keras layers I'm currently getting Exception pred must be a Tensor, a Variable, or a Python bool.
But, When I run the same code with out using tf.keras but directly from keras(i.e. from keras.layers), It's working.
Code :
def model_fn(features, labels, mode):
if mode == tf.estimator.ModeKeys.TRAIN:
tf.keras.backend.set_learning_phase(1)
else:
tf.keras.backend.set_learning_phase(0)
input_feature = features['x']
table = lookup.index_table_from_file(vocabulary_file='vocab.txt', num_oov_buckets=1, default_value=-1)
text = tf.squeeze(input_feature, [1])
words = tf.string_split(text)
densewords = tf.sparse_tensor_to_dense(words, default_value=PADWORD)
numbers = table.lookup(densewords)
padding = tf.constant([[0, 0], [0, MAX_FEATURES]])
padded = tf.pad(numbers, padding)
sliced = tf.slice(padded, [0, 0], [-1, MAX_FEATURES])
print('words_sliced={}'.format(words))
#embeds = tf.keras.layers.Embedding(MAX_FEATURES, 50, input_length=MAX_FEATURES)(sliced)
embeds = tf.contrib.layers.embed_sequence(sliced, vocab_size=MAX_FEATURES, embed_dim=50)
print('words_embed={}'.format(embeds))
f1 = tf.keras.layers.Dropout(0.2)(embeds)
f1 = tf.keras.layers.Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)(f1)
f1 = tf.keras.layers.GlobalAveragePooling1D()(f1)
# f1 = layers.BatchNormalization()(f1)
f1 = tf.keras.layers.Dense(hidden_dims)(f1)
f1 = tf.keras.layers.Dropout(0.5)(f1)
f1 = tf.keras.layers.Activation('relu')(f1)
logits = tf.keras.layers.Dense(11)(f1)
predictions_dict = {
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}
prediction_output = tf.estimator.export.PredictOutput({"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits,
name="softmax_tensor")})
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions_dict, export_outputs={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_output
})
# one_hot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=11)
loss = tf.losses.sparse_softmax_cross_entropy(labels, logits=logits)
if mode == tf.contrib.learn.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.get_global_step(), optimizer='Adam',
learning_rate=0.001)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
eval_metrics_ops = {
'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions_dict['class'])
}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics_ops)
When I execute the above script I'm getting an exception
TypeError: pred must be a Tensor, a Variable, or a Python bool.
But also when I used keras(from keras) directly without tf.keras it's working. What is going wrong here ?
Code :
if mode == tf.estimator.ModeKeys.TRAIN:
tf.keras.backend.set_learning_phase(True)
else:
tf.keras.backend.set_learning_phase(False)
Setting learning_phase = True or False is solving the problem.