tensorflow feature_column tries to reshape features - python

I'm trying to implement a network for MNIST dataset using custom estimators.
Here is my input function:
def input_train_fn():
train, test = tf.keras.datasets.mnist.load_data()
mnist_x, mnist_y = train
mnist_y = tf.cast(mnist_y, tf.int32)
mnist_x = tf.cast(mnist_x, tf.int32)
features = {'image': mnist_x}
labels = mnist_y
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
return dataset
Here is how I define my model:
def my_model(features, labels, mode, params):
# create net
net = tf.feature_column.input_layer(features, params['feature_columns'])
# create hidden layers
for unit in params['hidden_units']:
net = tf.layers.dense(net, unit, tf.nn.relu)
# create output layer
legits = tf.layers.dense(net, params['n_classes'], activation=None)
# predict (if in predict mode)
predicted_classes = tf.arg_max(legits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes,
'probabilities': tf.nn.softmax(legits),
'logits': legits
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# define loss function
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=legits)
# evaluation metrics
accuracy = tf.metrics.accuracy(labels=labels,
predictions=predicted_classes,
name='acc_op')
metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
And this is how I call the train function:
feature_columns = [tf.feature_column.numeric_column('image', shape=[28, 28], dtype=tf.int32), ]
classifier = tf.estimator.Estimator(model_fn=my_model,
params={
'feature_columns': feature_columns,
'hidden_units': [10, 10],
'n_classes': 10,
}, model_dir='/model')
classifier.train(input_fn=input_train_fn, steps=10)
As far as I can see i'm doing everything by the book both for estimators and feature_columns but I get the error:
ValueError: Cannot reshape a tensor with 784 elements to shape [28,784] (21952 elements) for 'input_layer/image/Reshape' (op: 'Reshape') with input shapes: [28,28], 2 and with input tensors computed as partial shapes: input1 = [28,784].
Is there anything I'm missing?
thanks in advance and any help appreciated.

First, you need to produce batches. For more detail see https://www.tensorflow.org/guide/datasets
...
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset = dataset.batch(size)
return dataset
Then reshape your image and cast to float. -1 is for batch_size, it will be substituted during training. Cast labels to float is optional depending on the datatype provided.
net = tf.cast(tf.reshape(features, [-1, 28*28]), tf.float32)
labels = tf.cast(labels, tf.int64)
net = tf.layers.dense(net, 10, tf.nn.relu)
legits = tf.layers.dense(net, 10, activation=None)
predicted_classes = tf.arg_max(legits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes,
'probabilities': tf.nn.softmax(legits),
'logits': legits
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=legits)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
classifier = tf.estimator.Estimator(model_fn=my_model)
classifier.train(input_fn=lambda: input_train_fn(), steps=10)

Related

tensorflow:Error reported to Coordinator: Items of feature_columns must be a _FeatureColumn. Given (type <class 'collections.NumericColumn'>)

I tried to use tf estimator to build logistic regression model. I used iris dataset and it ran successfully in my computer. However, when I tried to apply this model in cluster(using train_and_evaluate instead of classfier.train), I came across this problem.
python version:3.6.8
tensorflow version:1.13.1
Here is the code running locally:
iris dataset only contains numeric data. So feature_columns is a list of NumericColumn.
FUTURES = ['SepalLength', 'SepalWidth','PetalLength', 'PetalWidth', 'Species']
feature_columns = []
for key in FUTURES:
feature_columns.append(tf.feature_column.numeric_column(key=key))
define estimator. pass feature_columns into params
classifier = tf.estimator.Estimator(
model_fn=my_model_fn,
model_dir=models_path,
params={
'feature_columns': feature_columns,
'n_classes': 3,
})
define model_fn.
def my_model_fn(features,labels,mode,params):
net = tf.feature_column.input_layer(features, params['feature_columns'])
logits = tf.layers.dense(net, params['n_classes'], activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'logits': logits}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss,global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
accuracy = tf.metrics.accuracy(labels=labels,
predictions=predicted_classes,
name='acc_op')
metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
This code works well and generates some result.
-------------------------------------------------------------
Then I want to train it in cluster. my_model_fn is the same as previous one and self._feature_numeric_col is still a list of NumericColumn.
class LogisticReg():
def __init__(self):
self._feature_col = x.columns.tolist()
self._feature_numeric_col = []
for key in self._feature_col:
self._feature_numeric_col.append(tf.feature_column.numeric_column(key=key))
self.estimator = tf.estimator.Estimator(model_fn=self.my_model_fn,
model_dir=self.model_path,
config=self.config,
params={'feature_columns':self._feature_numeric_col})
def my_model_fn(self, features, labels, mode, params):
net = tf.feature_column.input_layer(features, params['feature_columns'])
logits = tf.layers.dense(net, self.n_class, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'logits': logits}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) !
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
accuracy = tf.metrics.accuracy(labels=labels,predictions=predicted_classes)
metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
use train_and_evaluate function instead of train/eval/predict
# input_fn
def input_fn(self, X, y, mode, batch_size):
y = y.astype(np.int32)
X = X.astype(np.float32)
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y)) # x,y:pandas
if mode == 'train':
dataset = dataset.shuffle(500)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
return dataset
# train_spec
train_spec = tf.estimator.TrainSpec(input_fn=lambda: self.input_fn(x_train,y_train,'train',batch_size),
max_steps=n_epochs)
# eval_spec
eval_spec = tf.estimator.EvalSpec(input_fn=lambda: self.input_fn(x_valid, y_valid, 'valid', batch_size),
start_delay_secs=30, throttle_secs=30, steps=None)
tf.estimator.train_and_evaluate(self.estimator, train_spec, eval_spec)
I expect the cluster version can generate similar output as the local one. However, I get this error.
Traceback (most recent call last):
File "/usr/local/bin/python3/lib/python3.6/site-packages/tensorflow/python/training/coordinator.py", line 297, in stop_on_exception
yield
File "/usr/local/bin/python3/lib/python3.6/site-packages/tensorflow/python/distribute/mirrored_strategy.py", line 852, in run
self.main_result = self.main_fn(*self.main_args, **self.main_kwargs)
File "/usr/local/bin/python3/lib/python3.6/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 1112, in _call_model_fn
model_fn_results = self._model_fn(features=features, **kwargs)
File "/mnt/glusterfs/model-center/train/classify.py", line 51, in my_model_fn
net = tf.feature_column.input_layer(features, params['feature_columns'])
File "/usr/local/bin/python3/lib/python3.6/site-packages/tensorflow/python/feature_column/feature_column.py", line 302, in input_layer
cols_to_output_tensors=cols_to_output_tensors)
File "/usr/local/bin/python3/lib/python3.6/site-packages/tensorflow/python/feature_column/feature_column.py", line 181, in _internal_input_layer
feature_columns = _normalize_feature_columns(feature_columns)
File "/usr/local/bin/python3/lib/python3.6/site-packages/tensorflow/python/feature_column/feature_column.py", line 2263, in _normalize_feature_columns
'Given (type {}): {}.'.format(type(column), column))
ValueError: Items of feature_columns must be a _FeatureColumn. Given (type <class 'collections.NumericColumn'>): NumericColumn(key='sepal_length', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None).

Tensorboard does not show any scalar summary from estimator

Following the instructions on tf custom estimator
I have created a cnn estimator and tried to train it. While training, i initialized tensorboard and was hoping to see some visualizations about training steps. However, tensorboard only showed the graph of my custom estimator but none of the scalar values i have defined.
Here's roughly what I have in code
def model_fn(features, labels, mode, params=None):
tf.logging.set_verbosity(tf.logging.INFO)
n_classes = params['n_classes']
base_learning_rate = params['learning_rate']
decay_rate = params['decay_rate']
embedding_dim = params['embedding_dim']
x = VGG_block1(features, (3, 3), 64, name='block1_1')
x = VGG_block1(x, (3, 3), 128, name='block1_2')
x = VGG_block1(x, (3, 3), 256, name='block1_3', regularizer=tf.contrib.layers.l1_regularizer(.1))
x = VGG_block2(x, (3, 3), 512, name='block2_4')
x = VGG_block2(x, (3, 3), 1024, name='block2_5')
x = conv2d(x, 512, (5, 5), padding='valid', normalizer_fn=batch_norm, activation_fn=tf.nn.leaky_relu,
weights_initializer=he_uniform())
x = flatten(x)
embedding = fully_connected(x, embedding_dim)
logits = fully_connected(embedding, n_classes)
# make predictions
predictions = {
'classes': tf.argmax(logits, axis=1, name='classes'),
'probabilities': tf.nn.softmax(logits, name='softmax'),
'embeddings':embedding
}
# if we are in prediction mode
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# otherwise define losses for training
c_loss, center = center_loss(embedding, labels, .9, n_classes)
xent_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits))
total_loss = xent_loss + 0.5 * c_loss
# evaluation methods
accuracy, update_op = tf.metrics.accuracy(labels=labels, predictions=predictions['classes'], name='accuracy')
batch_acc = tf.reduce_mean(tf.cast(tf.equal(tf.cast(labels, tf.int64), predictions['classes']), tf.float32))
tf.summary.scalar('batch_acc', batch_acc)
tf.summary.scalar('streaming_acc', update_op)
tf.summary.scalar('total_loss', total_loss)
tf.summary.scalar('center_loss', c_loss)
tf.summary.scalar('xent_loss', xent_loss)
# training mode
if mode == tf.estimator.ModeKeys.TRAIN:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
global_step = tf.Variable(0, trainable=False)
global_step_op = tf.assign(global_step, global_step + 1)
learning_rate = tf.train.exponential_decay(base_learning_rate, global_step, 8000, decay_rate, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
with tf.control_dependencies(update_ops+[global_step_op]):
objective = optimizer.minimize(total_loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=total_loss, train_op=objective)
eval_metric_ops = {
'accuracy': (accuracy, update_op)
}
return tf.estimator.EstimatorSpec(mode=mode, loss=total_loss, eval_metric_ops=eval_metric_ops)
X_train, X_test, y_train, y_test = load_data()
epochs = 10
batch_size = 64
n_classes = len(classes)
model_params = {'n_classes':n_classes,
'learning_rate':0.0001,
'decay_rate':0.5,
'embedding_dim':128}
model_dir = 'output'
face_classifier = tf.estimator.Estimator(model_fn=model_fn, params=model_params, model_dir=model_dir)
My Tensorflow version is 1.12.0
Edit
Forgot to mention I was using eager execution for this exercise, for unknown reasons that was the cause of this bug
as was mentioned in the edit, disabling eager execution solved the problem

How to feed serialized data to tf saved model?

I am trying to feed serialized data to my saved model.
I suspect either the model is not exported correctly, or serialization of the data is not done correctly. Any hints or suggestions would be appreciated.
import tensorflow as tf
import numpy as np
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': np.array([[1,2,3,4,3], [1,3,4,2,4], [10,2,4,1.3,4], [1,3,5.2,9, 0.3]]).astype(np.float32)},
y=np.array([0,0,1,1]).astype(np.float32),
batch_size=2,
shuffle=True
)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': np.array([[1,2,3,1,4], [1,23,4,1,90]]).astype(np.float32)},
y=np.array([0,1]).astype(np.float32),
batch_size=2,
num_epochs=1,
shuffle=False
)
def my_model(features, labels, mode, params):
net = features['x']
net = tf.layers.dense(net, 32, activation=tf.nn.relu)
logits = tf.layers.dense(net, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=logits,
export_outputs=
{'logits':tf.estimator.export.PredictOutput(logits)}
)
loss = tf.reduce_sum(tf.square(labels-logits))
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.02)
train_op = optimizer.minimize(loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode, loss=loss)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=5)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
my_estimator = tf.estimator.Estimator(model_fn=my_model)
tf.estimator.train_and_evaluate(my_estimator, train_spec, eval_spec)
saved_model = my_estimator.export_savedmodel('foo', tf.estimator.export.build_parsing_serving_input_receiver_fn({'x': tf.FixedLenFeature([5],tf.float32)}))
predict_fn = tf.contrib.predictor.from_saved_model(saved_model,)
features=tf.train.Features(feature={'x':tf.train.Feature(float_list=tf.train.FloatList(value=[1,2,3,4,1]))})
ex = tf.train.Example(features=features)
prediction = predict_fn({'examples':ex.SerializeToString()})
Here is the error message:
ValueError: Cannot feed value of shape () for Tensor 'input_example_tensor:0', which has shape '(?,)'
Turned out that the list of tf.train.Examples.SerializeToString() had to be provided.
In the last line, prediction = predict_fn({'examples': ex.SerializeToString()}) is changed to prediction = predict_fn({'examples': [ex.SerializeToString()]}).
I figured this out thanks to this nice tutorial:
http://shzhangji.com/blog/2018/05/14/serve-tensorflow-estimator-with-savedmodel/

Tensorflow 1.6 get the prediction output from estimator.predict()

I have this code which just Mnist tesorflow example and I would to do get the prediction for test data
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
# Our application logic will be added here
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits Layer
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
train_op=train_op)
# Add evaluation metrics (for EVAL mode)
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class_ids': predicted_classes[:, tf.newaxis],
'probabilities': tf.nn.softmax(logits),
'logits': logits,
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
def main(unused_argv):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images[:54000] # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)[:54000]
eval_data = train_data[:500] # Returns np.array
eval_labels = train_labels[:500] # np.asarray(mnist.test.labels,
dtype=np.int32)
test_data = train_data[1000:]
test_label = train_labels[1000:]
# eval_data = mnist.test.images # Returns np.array
# eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="./tmp/mnist_convnet_model")
# Set up logging for predictions
tensors_train_to_log = {"probabilities": "softmax_tensor"}
# tensors_eval_to_log = {"accuracy": "classes"}
logging_train_hook = tf.train.LoggingTensorHook(
tensors=tensors_train_to_log, every_n_iter=6000)
# logging_eval_hook = tf.train.LoggingTensorHook(
# tensors=tensors_eval_to_log, every_n_iter=1000)
# Train the model
print("Training Data length:", np.shape(train_data))
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=10,
num_epochs=1,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=True)
# input_fn=train_input_fn,
# steps=20000,
# hooks=[logging_hook])
# Evaluate the model and print results
# eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
# print(eval_results)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=6500)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
tf.estimator.train_and_evaluate(estimator=mnist_classifier,
train_spec=train_spec,eval_spec=eval_spec)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_data[0]},
y=test_label,
num_epochs=1,
shuffle=True)
# mnist_classifier.train(
test_spec = tf.estimator.EvalSpec(input_fn=test_input_fn)
predictions = mnist_classifier.predict(test_spec)
print(predictions["logits"][0])
# print(predictions["logits"])
#I got an error when I tried to print this
if __name__ == "__main__":
tf.app.run()
the code work will like I got trained model but still when I tried to print the prediction I could find a way to do that. So, any one did this example and print the pridected data not just the evaluation accuracy.
try this:
training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
It is a generator object and, to print it, you should use
print(list(predictions)[0])
Following should print all the predictions -
for i in range(300):
print(list(predictions)[0])
This should work
outputs = [list(next(predictions).values())[0] for i in range(300)]

Tensorflow 1.4 Keras issue with respect to model_fn

I have a model function which accepts Features, targets and mode but when I add tf.keras layers I'm currently getting Exception pred must be a Tensor, a Variable, or a Python bool.
But, When I run the same code with out using tf.keras but directly from keras(i.e. from keras.layers), It's working.
Code :
def model_fn(features, labels, mode):
if mode == tf.estimator.ModeKeys.TRAIN:
tf.keras.backend.set_learning_phase(1)
else:
tf.keras.backend.set_learning_phase(0)
input_feature = features['x']
table = lookup.index_table_from_file(vocabulary_file='vocab.txt', num_oov_buckets=1, default_value=-1)
text = tf.squeeze(input_feature, [1])
words = tf.string_split(text)
densewords = tf.sparse_tensor_to_dense(words, default_value=PADWORD)
numbers = table.lookup(densewords)
padding = tf.constant([[0, 0], [0, MAX_FEATURES]])
padded = tf.pad(numbers, padding)
sliced = tf.slice(padded, [0, 0], [-1, MAX_FEATURES])
print('words_sliced={}'.format(words))
#embeds = tf.keras.layers.Embedding(MAX_FEATURES, 50, input_length=MAX_FEATURES)(sliced)
embeds = tf.contrib.layers.embed_sequence(sliced, vocab_size=MAX_FEATURES, embed_dim=50)
print('words_embed={}'.format(embeds))
f1 = tf.keras.layers.Dropout(0.2)(embeds)
f1 = tf.keras.layers.Conv1D(filters, kernel_size, padding='valid', activation='relu', strides=1)(f1)
f1 = tf.keras.layers.GlobalAveragePooling1D()(f1)
# f1 = layers.BatchNormalization()(f1)
f1 = tf.keras.layers.Dense(hidden_dims)(f1)
f1 = tf.keras.layers.Dropout(0.5)(f1)
f1 = tf.keras.layers.Activation('relu')(f1)
logits = tf.keras.layers.Dense(11)(f1)
predictions_dict = {
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}
prediction_output = tf.estimator.export.PredictOutput({"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits,
name="softmax_tensor")})
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions_dict, export_outputs={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: prediction_output
})
# one_hot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=11)
loss = tf.losses.sparse_softmax_cross_entropy(labels, logits=logits)
if mode == tf.contrib.learn.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.get_global_step(), optimizer='Adam',
learning_rate=0.001)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
eval_metrics_ops = {
'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions_dict['class'])
}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics_ops)
When I execute the above script I'm getting an exception
TypeError: pred must be a Tensor, a Variable, or a Python bool.
But also when I used keras(from keras) directly without tf.keras it's working. What is going wrong here ?
Code :
if mode == tf.estimator.ModeKeys.TRAIN:
tf.keras.backend.set_learning_phase(True)
else:
tf.keras.backend.set_learning_phase(False)
Setting learning_phase = True or False is solving the problem.

Categories

Resources