Tensorflow GradientBoostedDecisionTreeClassifier error : "Dense float feature must be a matrix" - python

I am getting the error:
“tensorflow.python.framework.errors_impl.InvalidArgumentError: Dense float feature must be a matrix.” when training with estimator tensorflow.contrib.boosted_trees.estimator_batch.estimator.GradientBoostedDecisionTreeClassifier. I am using Tensorflow version 1.4.0. The same code works correctly if I change estimator to tf.contrib.learn.DNNClassifier. In the code, dictionary of features are passed in “Train_input_fn” in tf.contrib.learn.Experiment.
Anyone faced similar error before?
#'tensorflow==1.4.0'
import tensorflow as tf
import argparse
import sys
import os
from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientBoostedDecisionTreeClassifier
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow_transform.tf_metadata import metadata_io
from tensorflow_transform.saved import input_fn_maker
from tensorflow.contrib.learn.python.learn import learn_runner
RAW_METADATA_DIR="raw_metadata"
CONTRACTED_METADATA_DIR="contracted_metadata"
TRANSFORMED_METADATA_DIR="transformed_metadata"
TRANSFORMED_TRAIN_DATA_FILE_PREFIX="train"
TRANSFORMED_EVAL_DATA_FILE_PREFIX="eval"
DATA_FILE_SUFFIX=".tfrecord.gz"
TRANSFORM_FN_DIR="transform_fn"
TARGET_FEATURE_COLUMN='target_field'
FEATURE_NUMERICAL_COLUMN_NAMES = [
'feature1',
'feature2',
'feature3',
'feature4',
'feature5'
]
FEATURE_INTEGER_COLUMN_NAMES = [ # comment out fields that are not features
'feature6',
'feature7',
'feature8',
'feature9',
'feature10'
]
def _parse_arguments(argv):
"""Parses command line arguments."""
parser = argparse.ArgumentParser(
description="Runs training on data.")
parser.add_argument(
"--model_dir", required=True, type=str,
help="The directory where model outputs will be written")
parser.add_argument(
"--input_dir", required=True, type=str,
help=("GCS or local directory containing tensorflow-transform outputs."))
parser.add_argument(
"--batch_size", default=30, required=False, type=int,
help=("Batch size to use during training."))
parser.add_argument(
"--num_epochs", default=100, required=False, type=int,
help=("Number of epochs through the training set"))
args, _ = parser.parse_known_args(args=argv[1:])
return args
def get_eval_metrics():
return {
"accuracy":
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key=tf.contrib.learn.PredictionKey.CLASSES),
"precision":
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key=tf.contrib.learn.PredictionKey.CLASSES),
"recall":
tf.contrib.learn.MetricSpec(
metric_fn=tf.contrib.metrics.streaming_recall,
prediction_key=tf.contrib.learn.PredictionKey.CLASSES)
}
def read_and_decode_single_record(input_dir, num_epochs,
mode=tf.contrib.learn.ModeKeys.TRAIN):
if mode == tf.contrib.learn.ModeKeys.TRAIN:
num_epochs = num_epochs
file_prefix = TRANSFORMED_TRAIN_DATA_FILE_PREFIX
else:
num_epochs = 1
file_prefix = TRANSFORMED_EVAL_DATA_FILE_PREFIX
transformed_metadata = metadata_io.read_metadata(os.path.join(input_dir,
TRANSFORMED_METADATA_DIR))
input_file_names = tf.train.match_filenames_once(os.path.join(input_dir,
'{}*{}'.format(file_prefix, DATA_FILE_SUFFIX)))
filename_queue = tf.train.string_input_producer(input_file_names,
num_epochs=num_epochs, shuffle=True)
reader = tf.TFRecordReader(options=tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.GZIP))
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized = serialized_example,
features=transformed_metadata.schema.as_feature_spec()
)
return features
def read_dataset(input_dir, num_epochs, batch_size, mode=tf.contrib.learn.ModeKeys.TRAIN):
def _input_fn():
min_after_dequeue = 10000
features = read_and_decode_single_record(input_dir, num_epochs, mode)
features = tf.train.shuffle_batch(
tensors=features,
batch_size=batch_size,
min_after_dequeue=min_after_dequeue,
capacity=(min_after_dequeue + 3) * batch_size)
target = features.pop(TARGET_FEATURE_COLUMN)
return features, target
return _input_fn
def specify_feature_columns():
feature_columns = [
tf.contrib.layers.real_valued_column(column_name = column_name)
for column_name in FEATURE_NUMERICAL_COLUMN_NAMES]
feature_columns.extend([
tf.contrib.layers.real_valued_column(column_name = column_name)
for column_name in FEATURE_INTEGER_COLUMN_NAMES])
return feature_columns
def build_estimator(model_dir, config, params):
print "Using gradient boosted decision trees estimator \n"
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.regularization.l1 = 0.0
learner_config.regularization.l2 = 4.0 / params.batch_size
learner_config.constraints.max_tree_depth = 4
learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE
return GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
examples_per_layer=params.batch_size,
num_trees=100,
center_bias=False,
feature_columns=specify_feature_columns()
# feature_engineering_fn=feature_engineering_fn
)
def get_experiment_fn(args):
config = tf.contrib.learn.RunConfig(save_checkpoints_steps=1000)
def experiment_fn(output_dir):
return tf.contrib.learn.Experiment(
estimator = build_estimator(model_dir = output_dir,
config = config,
params = args),
train_input_fn = read_dataset(args.input_dir,
args.num_epochs, args.batch_size,
mode=tf.contrib.learn.ModeKeys.TRAIN),
eval_input_fn = read_dataset(args.input_dir,
args.num_epochs, args.batch_size,
mode=tf.contrib.learn.ModeKeys.EVAL),
eval_metrics = get_eval_metrics())
return experiment_fn
def run(args):
learn_runner.run(get_experiment_fn(args), args.model_dir)
if __name__ == '__main__':
args = _parse_arguments(sys.argv)
run(args)
The full error trace:
WARNING:tensorflow:Using temporary folder as model directory: /var/folders/mg/sd4_qlyj4_lbh5ggfn6frvcr00fk8_/T/tmpPFhins
WARNING:tensorflow:From /Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/monitors.py:267: __init__ (from tensorflow.contrib.learn.python.learn.monitors) is deprecated and will be removed after 2016-12-05.
Instructions for updating:
Monitors are deprecated. Please use tf.train.SessionRunHook.
WARNING:tensorflow:Casting <dtype: 'int64'> labels to bool.
WARNING:tensorflow:Casting <dtype: 'int64'> labels to bool.
WARNING:tensorflow:Error encountered when serializing resources.
Type is unsupported, or the types of the items don't match field type in CollectionDef.
'_Resource' object has no attribute 'name'
2017-11-16 13:38:39.919664: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
WARNING:tensorflow:Error encountered when serializing resources.
Type is unsupported, or the types of the items don't match field type in CollectionDef.
'_Resource' object has no attribute 'name'
2017-11-16 13:38:48.810825: W tensorflow/core/framework/op_kernel.cc:1192] Invalid argument: Dense float feature must be a matrix.
2017-11-16 13:38:48.810825: W tensorflow/core/framework/op_kernel.cc:1192] Invalid argument: Dense float feature must be a matrix.
Traceback (most recent call last):
File "./trainer/task.py", line 162, in <module>
run(args)
File "./trainer/task.py", line 157, in run
learn_runner.run(get_experiment_fn(args), args.model_dir)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/learn_runner.py", line 218, in run
return _execute_schedule(experiment, schedule)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/learn_runner.py", line 46, in _execute_schedule
return task()
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/experiment.py", line 625, in train_and_evaluate
self.train(delay_secs=0)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/experiment.py", line 367, in train
hooks=self._train_monitors + extra_hooks)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/experiment.py", line 812, in _call_train
monitors=hooks)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/util/deprecation.py", line 316, in new_func
return func(*args, **kwargs)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py", line 480, in fit
loss = self._train_model(input_fn=input_fn, hooks=hooks)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py", line 1040, in _train_model
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 521, in run
run_metadata=run_metadata)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 892, in run
run_metadata=run_metadata)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 967, in run
raise six.reraise(*original_exc_info)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 952, in run
return self._sess.run(*args, **kwargs)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 1024, in run
run_metadata=run_metadata)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 827, in run
return self._sess.run(*args, **kwargs)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 889, in run
run_metadata_ptr)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1120, in _run
feed_dict_tensor, options, run_metadata)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1317, in _do_run
options, run_metadata)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1336, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Dense float feature must be a matrix.
[[Node: gbdt_1/GradientTreesPartitionExamples = GradientTreesPartitionExamples[num_dense_float_features=10, num_sparse_float_features=0, num_sparse_int_features=0, use_locking=true, _device="/job:localhost/replica:0/task:0/device:CPU:0"](ensemble_model, shuffle_batch:16, shuffle_batch:18, shuffle_batch:20, shuffle_batch:21, shuffle_batch:22, shuffle_batch:23, shuffle_batch:24, shuffle_batch:25, shuffle_batch:26, shuffle_batch:27, ^gbdt_1/TreeEnsembleStats)]]
Caused by op u'gbdt_1/GradientTreesPartitionExamples', defined at:
File "./trainer/task.py", line 162, in <module>
run(args)
File "./trainer/task.py", line 157, in run
learn_runner.run(get_experiment_fn(args), args.model_dir)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/learn_runner.py", line 218, in run
return _execute_schedule(experiment, schedule)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/learn_runner.py", line 46, in _execute_schedule
return task()
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/experiment.py", line 625, in train_and_evaluate
self.train(delay_secs=0)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/experiment.py", line 367, in train
hooks=self._train_monitors + extra_hooks)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/experiment.py", line 812, in _call_train
monitors=hooks)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/util/deprecation.py", line 316, in new_func
return func(*args, **kwargs)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py", line 480, in fit
loss = self._train_model(input_fn=input_fn, hooks=hooks)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py", line 986, in _train_model
model_fn_ops = self._get_train_ops(features, labels)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py", line 1202, in _get_train_ops
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py", line 1166, in _call_model_fn
model_fn_results = self._model_fn(features, labels, **kwargs)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/boosted_trees/estimator_batch/model.py", line 98, in model_builder
predictions_dict = gbdt_model.predict(mode)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py", line 463, in predict
ensemble_stamp, mode)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py", line 392, in _predict_and_return_dict
use_locking=True)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/contrib/boosted_trees/python/ops/gen_prediction_ops.py", line 117, in gradient_trees_partition_examples
use_locking=use_locking, name=name)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2956, in create_op
op_def=op_def)
File "/Users/amolsharma/anaconda/envs/oldpython/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1470, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): Dense float feature must be a matrix.
[[Node: gbdt_1/GradientTreesPartitionExamples = GradientTreesPartitionExamples[num_dense_float_features=10, num_sparse_float_features=0, num_sparse_int_features=0, use_locking=true, _device="/job:localhost/replica:0/task:0/device:CPU:0"](ensemble_model, shuffle_batch:16, shuffle_batch:18, shuffle_batch:20, shuffle_batch:21, shuffle_batch:22, shuffle_batch:23, shuffle_batch:24, shuffle_batch:25, shuffle_batch:26, shuffle_batch:27, ^gbdt_1/TreeEnsembleStats)]]

I am guessing that the parsing spec created by tf.transform is different from what we normally get.
Can you share the output of transformed_metadata.schema.as_feature_spec()?
As a work-around try adding this line to your input_fn after features = tf.train.shuffle_batch(...):
features = {feature_name: tf.reshape(feature_value, [-1, 1]) for
feature_name, feature_value in features.items()}

Related

How to load a model with tf.keras?

import tensorflow as tf Tensorflow 2.0
i saw that i can load a model from tensorflow like this
image_model = tf.keras.applications.MobileNet(include_top=True, weights='imagenet', pooling='avg')
Now i want to be able to load models from local machine. My issue is that i can not find an pretrained model that works like this:
image_model = tf.keras.models.load_model('inception_v4.h5') (i used h5 from here https://github.com/titu1994/Inception-v4/releases?fbclid=IwAR0pK_CZaB9RwA92nvawNOha6DjY5xI0vtkc9Ff5HTATcFT9x5vGYBUXt5Q (first h5 model))
future: <Task finished coro=<server_task.<locals>.server_work() done,
defined at ....\x.py:249> exception=ValueError('No model found in config file.')>
Traceback (most recent call last):
File "....\x.py", line 280, in server_work
image_model, layers_indices = init(model_choice, layers_to_see)
File "....\x.py", line 146, in init
image_model = options[choice]()
#tf.keras.applications.MobileNetV2(include_top=True, weights='imagenet',
pooling='avg')
File "....\x.py", line 119, in model_H5_model
image_model = tf.keras.models.load_model('..../inception_v4.h5')
File "...\Python\Python37\lib\site-
packages\tensorflow_core\python\keras\saving\save.py", line 146, in
load_model
return hdf5_format.load_model_from_hdf5(filepath, custom_objects, compile)
File "...\AppData\Local\Programs\Python\Python37\lib\site-
packages\tensorflow_core\python\keras\saving\hdf5_format.py", line 165, in load_model_from_hdf5
raise ValueError('No model found in config file.')
ValueError: No model found in config file.
I also tried with a model like this
image_model = tf.keras.models.load_model('model.pb')
File "....\x.py", line 280, in server_work
image_model, layers_indices = init(model_choice, layers_to_see)
File "....\x.py", line 146, in init
image_model = options[choice]()
#tf.keras.applications.MobileNetV2(include_top=True, weights='imagenet',
pooling='avg')
File "....\x.py", line 119, in model_H5_model
image_model = tf.keras.models.load_model('.../model/inceptionv4.pb')
File "...\AppData\Local\Programs\Python\Python37\lib\site-
packages\tensorflow_core\python\keras\saving\save.py", line 149, in
load_model
loader_impl.parse_saved_model(filepath)
File "...\AppData\Local\Programs\Python\Python37\lib\site-
packages\tensorflow_core\python\saved_model\loader_impl.py", line 83, in
parse_saved_model
constants.SAVED_MODEL_FILENAME_PB))
OSError: SavedModel file does not exist at:
.../model/inceptionv4.pb/{saved_model.pbtxt|saved_model.pb}
What i also tried was smth like this:
image_model = tf.keras.applications.MobileNet(include_top=True,
weights='imagenet', pooling='avg')
image_model.save('test') - > when trying to save i receive this error
File "\Python\Python37\lib\site-
packages\tensorflow_core\python\framework\func_graph.py", line 905, in
wrapper
raise e.ag_error_metadata.to_exception(e)
TypeError: in converted code:
relative to ...\Programs\Python\Python37\lib\site-packages:
tensorflow_core\python\eager\def_function.py:606 initialize_variables *
for v, init in initializer_map.items():
tensorflow_core\python\autograph\impl\api.py:438 converted_call
if not options.user_requested and
conversion.is_whitelisted_for_graph(f):
m = tf_inspect.getmodule(o)
tensorflow_core\python\util\tf_inspect.py:337 getmodule
return _inspect.getmodule(object)
pycallgraph\tracer.py:372 wrapper
if rest not in cache:
TypeError: unhashable type: 'ObjectIdentityDictionary'
tf.keras.models.load_model('test_model')
I am wondering where i can find a h5 file or pb (pretrained model) that actually works with tf.keras.models.load_model()
Based on the first comment :
future: <Task finished coro=<server_task.<locals>.server_work() done, defined at c:\Users\...\Desktop\PrivateStuff\...\...\xx.py:249> exception=TypeError("in converted code:\n relative to C:\\Users\\...\\AppData\\Local\\Programs\\Python\\Python37\\lib\\site-packages:\n\n tensorflow_core\\python\\eager\\def_function.py:606 initialize_variables *\n for v, init in initializer_map.items():\n tensorflow_core\\python\\autograph\\impl\\api.py:438 converted_call\n if not options.user_requested and conversion.is_whitelisted_for_graph(f):\n tensorflow_core\\python\\autograph\\impl\\conversion.py:352 is_whitelisted_for_graph\n m = tf_inspect.getmodule(o)\n tensorflow_core\\python\\util\\tf_inspect.py:337 getmodule\n return _inspect.getmodule(object)\n pycallgraph\\tracer.py:372 wrapper\n if rest not in cache:\n\n TypeError: unhashable type: 'ObjectIdentityDictionary'\n")>
Traceback (most recent call last):
File "c:\Users\...\Desktop\PrivateStuff\...\...\xx.py", line 280, in server_work
image_model, layers_indices = init(model_choice, layers_to_see)
File "c:\Users\...\Desktop\PrivateStuff\...\...\xx.py", line 146, in init
image_model = options[choice]() #tf.keras.applications.MobileNetV2(include_top=True, weights='imagenet', pooling='avg')
File "c:\Users\...\Desktop\PrivateStuff\...\...\xx.py", line 55, in model_VGG16
image_model.save(r'c:\test')
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\network.py", line 975, in save
signatures, options)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\save.py", line 115, in save_model
signatures, options)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\save.py", line 74, in save
save_lib.save(model, filepath, signatures, options)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\saved_model\save.py", line 870, in save
checkpoint_graph_view)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\saved_model\signature_serialization.py", line 64, in find_function_to_export
functions = saveable_view.list_functions(saveable_view.root)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\saved_model\save.py", line 141, in list_functions
self._serialization_cache)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\engine\base_layer.py", line 2422, in _list_functions_for_serialization
.list_functions_for_serialization(serialization_cache))
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\base_serialization.py", line 91, in list_functions_for_serialization
fns = self.functions_to_serialize(serialization_cache)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\layer_serialization.py", line 79, in
functions_to_serialize
serialization_cache).functions_to_serialize)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\layer_serialization.py", line 94, in
_get_serialized_attributes
serialization_cache)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\model_serialization.py", line 47, in
_get_serialized_attributes_internal
default_signature = save_impl.default_save_signature(self.obj)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\keras\saving\saved_model\save_impl.py", line 206, in default_save_signature
fn.get_concrete_function()
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\eager\def_function.py", line 777, in get_concrete_function
self._initialize_uninitialized_variables(initializer_map)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\eager\def_function.py", line 616, in _initialize_uninitialized_variables
return initialize_variables.get_concrete_function()()
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\eager\function.py", line 1891, in get_concrete_function
graph_function, args, kwargs = self._maybe_define_function(args, kwargs)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\eager\function.py", line 2150, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\eager\function.py", line 2041, in _create_graph_function
capture_by_value=self._capture_by_value),
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\framework\func_graph.py", line 915, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow_core\python\framework\func_graph.py", line 905, in wrapper
raise e.ag_error_metadata.to_exception(e)
TypeError: in converted code:
relative to C:\Users\...\AppData\Local\Programs\Python\Python37\lib\site-packages:
tensorflow_core\python\eager\def_function.py:606 initialize_variables *
for v, init in initializer_map.items():
tensorflow_core\python\autograph\impl\api.py:438 converted_call
if not options.user_requested and conversion.is_whitelisted_for_graph(f):
tensorflow_core\python\autograph\impl\conversion.py:352 is_whitelisted_for_graph
m = tf_inspect.getmodule(o)
tensorflow_core\python\util\tf_inspect.py:337 getmodule
return _inspect.getmodule(object)
pycallgraph\tracer.py:372 wrapper
if rest not in cache:
TypeError: unhashable type: 'ObjectIdentityDictionary'
I copied your code to load MobileNet. It works if your provide a full path to save the model. See code below. Note when you load a model with weights='imagenet' the weights are set for the model trained on the imagenet data set. You don't need to load any weights. Now if you want to load weights for the model pre-trained on some other data set first instantiate the model as shown below. Then load the specific weights using model.load_weights.
image_model = tf.keras.applications.MobileNet(include_top=True,
weights='imagenet', pooling='avg')
image_model.save(r'c:\test')

How can I view tensor values that cause a TensorFlow crash?

I am trying to get names of Tensors using a Tensor hashtable but I keep encountering a default value because the key was not found in the table. I have no idea why the key I am looking for is not found in the table. The problem is I have no idea how to actually see which key is giving the error.
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(tensor_ids, indexs), -1)
pids_toget = []
for i in range(FLAGS.batch_size):
name = tf.gather(filenames, i)
index_toget = table.lookup(name)
pids_toget.append(index_toget)
tensor_vals = tf.Variable(np_clinical, dtype=tf.float32)
clinic_features_tensor = tf.gather(tensor_vals, pids_toget)
The error I get
Traceback (most recent call last):
File "/home/ubuntu/scipts/01_training/xClasses/bazel-bin/inception/imagenet_train.runfiles/inception/inception/imagenet_train.py", line 50, in <module>
tf.app.run()
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/tensorflow/python/platform/app.py", line 40, in run
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/absl/app.py", line 299, in run
_run_main(main, args)
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/absl/app.py", line 250, in _run_main
sys.exit(main(argv))
File "/home/ubuntu/scipts/01_training/xClasses/bazel-bin/inception/imagenet_train.runfiles/inception/inception/imagenet_train.py", line 46, in main
inception_train.train(dataset, pids, np_clinical)
File "/home/ubuntu/scipts/01_training/xClasses/bazel-bin/inception/imagenet_train.runfiles/inception/inception/inception_train.py", line 373, in train
_, loss_value = sess.run([train_op, loss])
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 950, in run
run_metadata_ptr)
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1173, in _run
feed_dict_tensor, options, run_metadata)
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1350, in _do_run
run_metadata)
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1370, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: 2 root error(s) found.
(0) Invalid argument: indices[348] = -1 is not in [0, 891)
[[node GatherV2_400 (defined at /scipts/01_training/xClasses/bazel-bin/inception/imagenet_train.runfiles/inception/inception/inception_train.py:253) ]]
[[tower_7/mixed_35x35x288a/branch_pool/Conv/BatchNorm/AssignMovingAvg_1/AssignSub/_6375]]
(1) Invalid argument: indices[348] = -1 is not in [0, 891)
[[node GatherV2_400 (defined at /scipts/01_training/xClasses/bazel-bin/inception/imagenet_train.runfiles/inception/inception/inception_train.py:253) ]]
Input Source operations connected to node GatherV2_400:
Variable/read (defined at /scipts/01_training/xClasses/bazel-bin/inception/imagenet_train.runfiles/inception/inception/inception_train.py:251)
Input Source operations connected to node GatherV2_400:
Variable/read (defined at /scipts/01_training/xClasses/bazel-bin/inception/imagenet_train.runfiles/inception/inception/inception_train.py:251)
Original stack trace for 'GatherV2_400':
File "/scipts/01_training/xClasses/bazel-bin/inception/imagenet_train.runfiles/inception/inception/imagenet_train.py", line 50, in <module>
tf.app.run()
File "/anaconda3/lib/python3.7/site-packages/tensorflow/python/platform/app.py", line 40, in run
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
File "/anaconda3/lib/python3.7/site-packages/absl/app.py", line 299, in run
_run_main(main, args)
File "/anaconda3/lib/python3.7/site-packages/absl/app.py", line 250, in _run_main
sys.exit(main(argv))
File "/scipts/01_training/xClasses/bazel-bin/inception/imagenet_train.runfiles/inception/inception/imagenet_train.py", line 46, in main
inception_train.train(dataset, pids, np_clinical)
File "/scipts/01_training/xClasses/bazel-bin/inception/imagenet_train.runfiles/inception/inception/inception_train.py", line 253, in train
clinic_features_tensor = tf.gather(tensor_vals, pids_toget)
File "/anaconda3/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py", line 180, in wrapper
return target(*args, **kwargs)
File "/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/array_ops.py", line 3475, in gather
return gen_array_ops.gather_v2(params, indices, axis, name=name)
File "/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 4097, in gather_v2
batch_dims=batch_dims, name=name)
File "/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "/anaconda3/lib/python3.7/site-packages/tensorflow/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 3616, in create_op
op_def=op_def)
File "/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 2005, in __init__
self._traceback = tf_stack.extract_stack()
I run the session here. I have tried using printnames = tf.Print(filenames) and print the filenames for that batch but it does not work for the batch that crashes.
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
tbinit = tf.initializers.tables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
sess.run(tbinit)
if FLAGS.pretrained_model_checkpoint_path:
try:
assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path)
variables_to_restore = slim.variables.get_variables_to_retore_except_logits()
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)
print('%s: Pre-trained model restored from %s' %
(datetime.now(), FLAGS.pretrained_model_checkpoint_path))
except:
#restorer = tf.train.import_meta_graph(FLAGS.pretrained_model_checkpoint_path + '.meta')
variables_to_restore = slim.variables.get_variables_to_retore_except_logits()
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(
FLAGS.train_dir,
graph=sess.graph)
for step in range(FLAGS.max_steps):
start_time = time.time()
p2, _, loss_value = sess.run([printnames, train_op, loss,])
print(p2)
duration = time.time() - start_time
I have tried using try/except statements but I cannot get those to work to produce the names I need either. Is it even possible? It seems like sess.run just crashes.

Different 'RuntimeError: Attempted to use a closed Session.'

I am programming a chatbot with 'extended' functionalities, and I keep ending up with this error. I know that many other people have already answered, but my code is entirely different. This started happening when I tried to re-train the model with a new vocabulary. Warning: I found the code online and modified it (the final part, I didn't show that).
I tried changing directory, deleting all the files that he created (the model and the data), deleting the 'model.load("...")' and other things, and I am really desperate.
Some other info:
I use Conda Virtual Env Python 3.6
I work on the C drive but I use another hard disk to store things
Some of the imported modules needs to be downloaded with pip
This is part of the code:
# coding: utf-8
import time, pickle, tflearn, nltk, tensorflow, json, random, numpy, os, platform, sys, pyttsx3, speech_recognition, winsound, webbrowser
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
with open("intents.json") as file:
data = json.load(file)
file = open("configuration.settings", "r", encoding='utf-8')
leggi = file.readlines()
file.close()
def cleaner():
try:
if platform.system().lower() == "linux" or platform.system().lower() == "darwin":
os.system("clear")
elif platform.system().lower() == "windows":
os.system("cls")
except:
pass
try:
with open("data.pickle", "rb") as f:
words, labels, training, output = pickle.load(f)
except:
words = []
labels = []
docs_x = []
docs_y = []
for intent in data["intents"]:
for pattern in intent["patterns"]:
wrds = nltk.word_tokenize(pattern)
words.extend(wrds)
docs_x.append(wrds)
docs_y.append(intent["tag"])
if intent["tag"] not in labels:
labels.append(intent["tag"])
words = [stemmer.stem(w.lower()) for w in words if w not in "?"]
words = sorted(list(set(words)))
labels = sorted(labels)
training = []
output = []
out_empty = [0 for _ in range(len(labels))]
for x, doc in enumerate(docs_x):
bag = []
wrds = [stemmer.stem(w) for w in doc]
for w in words:
if w in wrds:
bag.append(1)
else:
bag.append(0)
output_row = out_empty[:]
output_row[labels.index(docs_y[x])] = 1
training.append(bag)
output.append(output_row)
training = numpy.array(training)
output = numpy.array(output)
with open("data.pickle", "wb") as f:
pickle.dump((words, labels, training, output), f)
tensorflow.reset_default_graph()
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
model = tflearn.DNN(net)
try:
model.load("cbot.tflearn")
except:
model.fit(training, output, n_epoch=1500, batch_size=8, show_metric=True)
model.save("cbot.tflearn")
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return numpy.array(bag)
### OTHER CODE ###
[...]
And this is the complete traceback:
Instructions for updating:
Use standard file APIs to check for files with this prefix.
---------------------------------
Run id: OK6TM7
Log directory: /tmp/tflearn_logs/
---------------------------------
Training samples: 57
Validation samples: 0
--
Traceback (most recent call last):
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\client\session.py", line 1356, in _do_call
return fn(*args)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\client\session.py", line 1341, in _run_fn
options, feed_dict, fetch_list, target_list, run_metadata)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\client\session.py", line 1429, in _call_tf_sessionrun
run_metadata)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Assign requires shapes of both tensors to match. lhs shape= [8,15] rhs shape= [8,12]
[[{{node save_1/Assign_16}}]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saver.py", line 1286, in restore
{self.saver_def.filename_tensor_name: save_path})
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\client\session.py", line 950, in run
run_metadata_ptr)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\client\session.py", line 1173, in _run
feed_dict_tensor, options, run_metadata)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\client\session.py", line 1350, in _do_run
run_metadata)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\client\session.py", line 1370, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Assign requires shapes of both tensors to match. lhs shape= [8,15] rhs shape= [8,12]
[[node save_1/Assign_16 (defined at S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\helpers\trainer.py:147) ]]
Errors may have originated from an input operation.
Input Source operations connected to node save_1/Assign_16:
FullyConnected_2/W (defined at S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\variables.py:65)
Original stack trace for 'save_1/Assign_16':
File "D:\\cbot-tts_stt.py", line 94, in <module>
model = tflearn.DNN(net)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\models\dnn.py", line 65, in __init__
best_val_accuracy=best_val_accuracy)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\helpers\trainer.py", line 147, in __init__
allow_empty=True)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saver.py", line 825, in __init__
self.build()
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saver.py", line 837, in build
self._build(self._filename, build_save=True, build_restore=True)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saver.py", line 875, in _build
build_restore=build_restore)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saver.py", line 508, in _build_internal
restore_sequentially, reshape)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saver.py", line 350, in _AddRestoreOps
assign_ops.append(saveable.restore(saveable_tensors, shapes))
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saving\saveable_object_util.py", line 72, in restore
self.op.get_shape().is_fully_defined())
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\ops\state_ops.py", line 227, in assign
validate_shape=validate_shape)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\ops\gen_state_ops.py", line 69, in assign
use_locking=use_locking, name=name)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\util\deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\framework\ops.py", line 3616, in create_op
op_def=op_def)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\framework\ops.py", line 2005, in __init__
self._traceback = tf_stack.extract_stack()
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "D:\\cbot-tts_stt.py", line 97, in <module>
model.load("cbot.tflearn")
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\models\dnn.py", line 308, in load
self.trainer.restore(model_file, weights_only, **optargs)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\helpers\trainer.py", line 490, in restore
self.restorer.restore(self.session, model_file)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saver.py", line 1322, in restore
err, "a mismatch between the current graph and the graph")
tensorflow.python.framework.errors_impl.InvalidArgumentError: Restoring from checkpoint failed. This is most likely due to a mismatch between the current graph and the graph from the checkpoint. Please ensure that you have not altered the graph expected based on the checkpoint. Original error:
Assign requires shapes of both tensors to match. lhs shape= [8,15] rhs shape= [8,12]
[[node save_1/Assign_16 (defined at S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\helpers\trainer.py:147) ]]
Errors may have originated from an input operation.
Input Source operations connected to node save_1/Assign_16:
FullyConnected_2/W (defined at S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\variables.py:65)
Original stack trace for 'save_1/Assign_16':
File "D:\\cbot-tts_stt.py", line 94, in <module>
model = tflearn.DNN(net)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\models\dnn.py", line 65, in __init__
best_val_accuracy=best_val_accuracy)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\helpers\trainer.py", line 147, in __init__
allow_empty=True)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saver.py", line 825, in __init__
self.build()
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saver.py", line 837, in build
self._build(self._filename, build_save=True, build_restore=True)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saver.py", line 875, in _build
build_restore=build_restore)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saver.py", line 508, in _build_internal
restore_sequentially, reshape)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saver.py", line 350, in _AddRestoreOps
assign_ops.append(saveable.restore(saveable_tensors, shapes))
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\training\saving\saveable_object_util.py", line 72, in restore
self.op.get_shape().is_fully_defined())
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\ops\state_ops.py", line 227, in assign
validate_shape=validate_shape)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\ops\gen_state_ops.py", line 69, in assign
use_locking=use_locking, name=name)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\util\deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\framework\ops.py", line 3616, in create_op
op_def=op_def)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\framework\ops.py", line 2005, in __init__
self._traceback = tf_stack.extract_stack()
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "D:\\cbot-tts_stt.py", line 99, in <module>
model.fit(training, output, n_epoch=1500, batch_size=8, show_metric=True)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\models\dnn.py", line 216, in fit
callbacks=callbacks)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\helpers\trainer.py", line 339, in fit
show_metric)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\helpers\trainer.py", line 816, in _train
tflearn.is_training(True, session=self.session)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tflearn\config.py", line 95, in is_training
tf.get_collection('is_training_ops')[0].eval(session=session)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\framework\ops.py", line 731, in eval
return _eval_using_default_session(self, feed_dict, self.graph, session)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\framework\ops.py", line 5579, in _eval_using_default_session
return session.run(tensors, feed_dict)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\client\session.py", line 950, in run
run_metadata_ptr)
File "S:\WindowsPrograms\Anaconda3\envs\ptg\lib\site-packages\tensorflow\python\client\session.py", line 1096, in _run
raise RuntimeError('Attempted to use a closed Session.')
RuntimeError: Attempted to use a closed Session.
Thank you for reading this, and I am really sorry for some errors in the text.
Thanks for giving me your time!
The Traceback has all the information you need, but you need to read it from the top to bottom because the lowest reported error is not always the actual error message.
Assign requires shapes of both tensors to match. lhs shape= [8,15] rhs shape= [8,12]
and further down:
Restoring from checkpoint failed. This is most likely due to a mismatch between the current graph and the graph from the checkpoint. Please ensure that you have not altered the graph expected based on the checkpoint.
You're running the model in a "dirty" folder (it contains results from previous attempts with a different model). Delete your old checkpoints or change training directory.

Tensorflow handling arrays as feature_columns

I'm trying to build a classifier which takes an array of floats as an input.
Despite following steps here and here to include an array as the input feature I keep getting an TypeError whereby the estimator doesn't recognise the shape of the input.
How do you include an array as a feature for an estimator? Can you simply pass in the numeric_column with an appropriate shape as expected in the docs?
Sample code here:
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import feature_column
z = [[1, 2], [3,4]]
df = pd.DataFrame(z)
df = df.apply(lambda x: np.array(x), axis=1)
feature_columns = []
for col in ['feature']:
feature_columns.append(feature_column.numeric_column(col, shape=(2, )))
df = pd.DataFrame(df)
df.columns = ['feature']
df['target'] = 1
y_train = df.pop('target')
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(20)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(5)
return dataset
return input_fn
train_input_fn = make_input_fn(df, y_train)
linear_est = tf.estimator.LinearRegressor(feature_columns)
linear_est.train(train_input_fn, max_steps=100)
which gives a stack trace of
Traceback (most recent call last):
File "/Applications/PyCharm.app/Contents/helpers/pydev/_pydevd_bundle/pydevd_exec2.py", line 3, in Exec
exec(exp, global_vars, local_vars)
File "<string>", line 39, in <module>
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 359, in train
loss = self._train_model(input_fn, hooks, saving_listeners)
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 1139, in _train_model
return self._train_model_default(input_fn, hooks, saving_listeners)
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 1166, in _train_model_default
input_fn, ModeKeys.TRAIN))
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 1003, in _get_features_and_labels_from_input_fn
self._call_input_fn(input_fn, mode))
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 1094, in _call_input_fn
return input_fn(**kwargs)
File "<string>", line 23, in input_fn
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 279, in from_tensor_slices
return TensorSliceDataset(tensors)
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 2091, in __init__
for i, t in enumerate(nest.flatten(tensors))
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py", line 2091, in <listcomp>
for i, t in enumerate(nest.flatten(tensors))
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 1050, in convert_to_tensor
return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 1108, in convert_to_tensor_v2
as_ref=False)
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 1186, in internal_convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow/python/framework/constant_op.py", line 304, in _constant_tensor_conversion_function
return constant(v, dtype=dtype, name=name)
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow/python/framework/constant_op.py", line 245, in constant
allow_broadcast=True)
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow/python/framework/constant_op.py", line 283, in _constant_impl
allow_broadcast=allow_broadcast))
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow/python/framework/tensor_util.py", line 574, in make_tensor_proto
append_fn(tensor_proto, proto_values)
File "tensorflow/python/framework/fast_tensor_util.pyx", line 127, in tensorflow.python.framework.fast_tensor_util.AppendObjectArrayToTensorProto
File "/Users/nicholashilton/.virtualenvs/fantifi/lib/python3.7/site-packages/tensorflow/python/util/compat.py", line 61, in as_bytes
(bytes_or_text,))
TypeError: Expected binary or unicode string, got array([1, 2])

Output files returned after training a alexnet model...?

Code is written in Python 3.5.X
Please try to make the answer simple for a 3rd year Computer Science Student
The output files from train_model.py seems to be a model.meta file but the test_model.py is asking for a .model file. The tutorial user has a .model file as well I can't seem to understand why i am getting a file with .model.meta
I am trying to play GTA San Andreas through Python or more precisely the car in GTA is driven by the model.
It takes screen frames as Input and recorded the key i Input during training. This training data is used to train the model.
Code for training the model
import numpy as np
from alexnet import alexnet
WIDTH = 80
HEIGHT = 60
LR = 1e-3
EPOCHS = 8
MODEL_NAME = 'pygta_sa-car-{}-{}-{}-epochs.model'.format(LR, 'alextnetv2', EPOCHS)
model = alexnet(WIDTH, HEIGHT, LR)
train_data = np.load('training_data_v2.npy')
train = train_data[:-500]
test = train_data[-500:]
X = np.array([i[0] for i in train]).reshape(-1,WIDTH,HEIGHT,1)
Y = [i[1] for i in train]
test_x = np.array([i[0] for i in test]).reshape(-1,WIDTH,HEIGHT,1)
test_y = [i[1] for i in test]
model.fit({'input': X}, {'targets': Y}, n_epoch=EPOCHS, validation_set=({'input': test_x}, {'targets': test_y}),
snapshot_step=500, show_metric=True, run_id=MODEL_NAME)
# tensorboard --logdir=foo:F:\play_gta_sa\log
model.save(MODEL_NAME)
training completes successfully and returns files
Files returned on the video of the Tutorial i am using to do this project
sent_dex files returned
Content of Checkpoint file
model_checkpoint_path: "F:\play_gta_sa\pygta_sa-car-0.001-alextnetv2-8-epochs.model"
all_model_checkpoint_paths: "F:\play_gta_sa\pygta_sa-car-0.001-alextnetv2-8-epochs.model"
Code for testing the model on the game
import numpy as np
import cv2
import time
from grabscreen import grab_screen
from getkeys import key_check
from directkeys import PressKey, ReleaseKey, W, A, S, D
from alexnet import alexnet
WIDTH = 80
HEIGHT = 60
LR = 1e-3
EPOCHS = 8
MODEL_NAME = 'pygta_sa-car-{}-{}-{}-epochs.model'.format(LR, 'alexnetv2',EPOCHS)
def straight():
PressKey(W)
ReleaseKey(A)
ReleaseKey(D)
def left():
PressKey(W)
PressKey(A)
ReleaseKey(D)
def right():
PressKey(W)
PressKey(D)
ReleaseKey(A)
model = alexnet(WIDTH, HEIGHT, LR)
model.load(MODEL_NAME)
def main():
for i in list(range(10))[::-1]:
print(i+1)
time.sleep(1)
last_time = time.time()
paused = False
while True:
if not paused:
screen = grab_screen(region=(0,40,800,640))
screen = cv2.cvtColor(screen,cv2.COLOR_BGR2GRAY)
screen = cv2.resize(screen,(80,60))
print('Frame took {} seconds'.format(time.time()-last_time))
last_time = time.time()
moves = list(np.around(model.predict([screen.reshape(80,60,1)])[0]))
print(moves, prediction)
if moves == [1,0,0]:
left()
elif moves == [0,1,0]:
straight()
elif moves == [0,0,1]:
right()
keys = key_check()
# p pauses game and can get annoying.
if 'T' in keys:
if paused:
paused = False
time.sleep(1)
else:
paused = True
ReleaseKey(A)
ReleaseKey(W)
ReleaseKey(D)
time.sleep(1)
main()
the error message on running test model
Traceback (most recent call last):
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1039, in _do_call
return fn(*args)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1021, in _run_fn
status, run_metadata)
File "C:\Program Files\Python35\lib\contextlib.py", line 66, in __exit__
next(self.gen)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\framework\errors_impl.py", line 466, in raise_exception_on_not_ok_status
pywrap_tensorflow.TF_GetCode(status))
tensorflow.python.framework.errors_impl.NotFoundError: Unsuccessful TensorSliceReader constructor: Failed to find any matching files for F:\play_gta_sa\pygta_sa-car-0.001-alexnetv2-8-epochs.model
[[Node: save_1/RestoreV2 = RestoreV2[dtypes=[DT_FLOAT], _device="/job:localhost/replica:0/task:0/cpu:0"](_recv_save_1/Const_0, save_1/RestoreV2/tensor_names, save_1/RestoreV2/shape_and_slices)]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "F:\play_gta_sa\test_model.py", line 33, in <module>
model.load(MODEL_NAME)
File "C:\Program Files\Python35\lib\site-packages\tflearn\models\dnn.py", line 282, in load
self.trainer.restore(model_file, weights_only, **optargs)
File "C:\Program Files\Python35\lib\site-packages\tflearn\helpers\trainer.py", line 452, in restore
self.restorer.restore(self.session, model_file)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\training\saver.py", line 1457, in restore
{self.saver_def.filename_tensor_name: save_path})
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\client\session.py", line 778, in run
run_metadata_ptr)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\client\session.py", line 982, in _run
feed_dict_string, options, run_metadata)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1032, in _do_run
target_list, options, run_metadata)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1052, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.NotFoundError: Unsuccessful TensorSliceReader constructor: Failed to find any matching files for F:\play_gta_sa\pygta_sa-car-0.001-alexnetv2-8-epochs.model
[[Node: save_1/RestoreV2 = RestoreV2[dtypes=[DT_FLOAT], _device="/job:localhost/replica:0/task:0/cpu:0"](_recv_save_1/Const_0, save_1/RestoreV2/tensor_names, save_1/RestoreV2/shape_and_slices)]]
Caused by op 'save_1/RestoreV2', defined at:
File "<string>", line 1, in <module>
File "C:\Program Files\Python35\lib\idlelib\run.py", line 124, in main
ret = method(*args, **kwargs)
File "C:\Program Files\Python35\lib\idlelib\run.py", line 351, in runcode
exec(code, self.locals)
File "F:\play_gta_sa\test_model.py", line 32, in <module>
model = alexnet(WIDTH, HEIGHT, LR)
File "F:\play_gta_sa\alexnet.py", line 40, in alexnet
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
File "C:\Program Files\Python35\lib\site-packages\tflearn\models\dnn.py", line 64, in __init__
best_val_accuracy=best_val_accuracy)
File "C:\Program Files\Python35\lib\site-packages\tflearn\helpers\trainer.py", line 147, in __init__
allow_empty=True)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\training\saver.py", line 1056, in __init__
self.build()
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\training\saver.py", line 1086, in build
restore_sequentially=self._restore_sequentially)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\training\saver.py", line 691, in build
restore_sequentially, reshape)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\training\saver.py", line 407, in _AddRestoreOps
tensors = self.restore_op(filename_tensor, saveable, preferred_shard)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\training\saver.py", line 247, in restore_op
[spec.tensor.dtype])[0])
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\ops\gen_io_ops.py", line 669, in restore_v2
dtypes=dtypes, name=name)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 768, in apply_op
op_def=op_def)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\framework\ops.py", line 2336, in create_op
original_op=self._default_original_op, op_def=op_def)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\framework\ops.py", line 1228, in __init__
self._traceback = _extract_stack()
NotFoundError (see above for traceback): Unsuccessful TensorSliceReader constructor: Failed to find any matching files for F:\play_gta_sa\pygta_sa-car-0.001-alexnetv2-8-epochs.model
[[Node: save_1/RestoreV2 = RestoreV2[dtypes=[DT_FLOAT], _device="/job:localhost/replica:0/task:0/cpu:0"](_recv_save_1/Const_0, save_1/RestoreV2/tensor_names, save_1/RestoreV2/shape_and_slices)]]
Thanks for the update.
Edit:
Try add this line before tf.reset_default_graph() loading model. That is
import tensorflow as tf
tf.reset_default_graph()
model = alexnet(WIDTH, HEIGHT, LR)
model.load(MODEL_NAME)
The crux of this error is:
Unsuccessful TensorSliceReader constructor: Failed to find any matching files for F:\play_gta_sa\pygta_sa-car-0.001-alexnetv2-8-epochs.model
Okay, so a typical "file not found." Are we confident that we have this file? Maybe, but, if it was there, it would have been found. Our first guess should be we've typoed or otherwise made a mistake. Let's look at your model files:
For training the model, you have:
MODEL_NAME = 'pygta_sa-car-{}-{}-{}-epochs.model'.format(LR, 'alextnetv2', EPOCHS)
For testing the model, you have:
MODEL_NAME = 'pygta_sa-car-{}-{}-{}-epochs.model'.format(LR, 'alexnetv2',EPOCHS)
Do you see difference yet? There's a typo. alextnetv2 vs alexnetv2
Fix that, and the file will at least be found.

Categories

Resources