In tensorflow, I am experiencing an issue where I try to get the output of my last neuron by running it in a session but it gives me an error. I am running Python 3.8.5 and TensorFlow 1.15 (because of some hardware issues)
Here is my code:
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import tensorflow as tf
def main():
x_inputs = tf.Variable(np.array([[1, 2 ,3], [1, 2, 3]]), dtype=tf.float32)
y_outputs = tf.Variable(np.array([[1, 2, 4], [2, 3, 4]]), dtype=tf.float32)
model = create_model(x_inputs, y_outputs)
def create_model(x: np.array, y: np.array):
var_init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(var_init)
x_val = sess.run(x).flatten()
y_val = sess.run(y).flatten()
# neural network architecture
input_layer = tf.layers.dense(x, units=len(x_val), activation=tf.nn.sigmoid)
hidden_layer = tf.layers.dense(input_layer, units=5, activation=tf.nn.relu)
output_layer = tf.layers.dense(hidden_layer, units=2, activation=tf.nn.softmax)
print(sess.run(output_layer))
if __name__ == "__main__":
main()
Here's the error:
Traceback (most recent call last):
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1350, in _do_call
return fn(*args)
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1327, in _run_fn
return tf_session.TF_Run(session, options,
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/errors_impl.py", line 470, in __exit__
raise _make_specific_exception(
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value dense_2/bias
[[Node: dense_2/bias/read = Identity[T=DT_FLOAT, _class=["loc:#dense_2/bias"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](dense_2/bias)]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "learning.py", line 29, in <module>
main()
File "learning.py", line 10, in main
model = create_model(x_inputs, y_outputs)
File "learning.py", line 26, in create_model
print(sess.run(output_layer))
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 894, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1127, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1343, in _do_run
return self._do_call(_run_fn, self._session, feeds, fetches, targets,
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1363, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value dense_2/bias
[[Node: dense_2/bias/read = Identity[T=DT_FLOAT, _class=["loc:#dense_2/bias"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](dense_2/bias)]]
Caused by op 'dense_2/bias/read', defined at:
File "learning.py", line 29, in <module>
main()
File "learning.py", line 10, in main
model = create_model(x_inputs, y_outputs)
File "learning.py", line 24, in create_model
output_layer = tf.layers.dense(hidden_layer, units=2, activation=tf.nn.softmax)
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/layers/core.py", line 253, in dense
return layer.apply(inputs)
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/layers/base.py", line 762, in apply
return self.__call__(inputs, *args, **kwargs)
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/layers/base.py", line 636, in __call__
self.build(input_shapes)
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/layers/core.py", line 139, in build
self.bias = self.add_variable('bias',
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/layers/base.py", line 498, in add_variable
variable = vs.get_variable(name,
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variable_scope.py", line 1256, in get_variable
return get_variable_scope().get_variable(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variable_scope.py", line 1091, in get_variable
return var_store.get_variable(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variable_scope.py", line 429, in get_variable
return _true_getter(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variable_scope.py", line 399, in _true_getter
return self._get_single_variable(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variable_scope.py", line 798, in _get_single_variable
v = variables.Variable(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variables.py", line 220, in __init__
self._init_from_args(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variables.py", line 376, in _init_from_args
self._snapshot = array_ops.identity(self._variable, name="read")
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py", line 127, in identity
return gen_array_ops.identity(input, name=name)
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/gen_array_ops.py", line 2133, in identity
_, _, _op = _op_def_lib._apply_op_helper(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/op_def_library.py", line 785, in _apply_op_helper
op = g.create_op(op_type_name, inputs, output_types, name=scope,
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/ops.py", line 3152, in create_op
ret = Operation(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/ops.py", line 1625, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value dense_2/bias
[[Node: dense_2/bias/read = Identity[T=DT_FLOAT, _class=["loc:#dense_2/bias"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](dense_2/bias)]]
Please help me I'm really struggling.
Thanks in advance!
Related
I have tensorflow 1.14 and I want to compute some classification metrics.
I am using tf.keras.metrics and I am using it in the follwoing manner:
tf.keras.metrics.Accuracy(tf.argmax(tf.nn.softmax(support_pred, dim=1), axis=1),
tf.argmax(support_y, axis=1))
This gives me the error:
{TypeError}Using a tf.Tensor as a Python bool is not allowed. Use if t is not None: instead of if t: to test if a tensor is defined, and use TensorFlow ops such as tf.cond to execute subgraphs conditioned on the value of a tensor.
I tried to use instead tf.contrib.metrics but it only has precision_at_recall and recall_at_precision instead of stand alone precision and recall.
EDIT 1
I have tried the following but it did not work:
import tensorflow as tf
a = tf.random.uniform((32, 10), 0, 1, dtype=tf.float32)
b = tf.random.uniform((32, 10), 0, 1, dtype=tf.float32)
a_softmax = tf.nn.softmax(a)
b_softmax = tf.nn.softmax(b)
a_argmax = tf.argmax(a_softmax, axis=-1)
b_argmax = tf.argmax(b_softmax, axis=-1)
acc = tf.keras.metrics.Accuracy()(a_argmax, b_argmax)
with tf.Session() as sess:
sess.run([acc])
It gave me the following error:
Traceback (most recent call last):
File "C:\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1356, in _do_call
return fn(*args)
File "C:\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1341, in _run_fn
options, feed_dict, fetch_list, target_list, run_metadata)
File "C:\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1429, in _call_tf_sessionrun
run_metadata)
tensorflow.python.framework.errors_impl.NotFoundError: Container localhost does not exist. (Could not find resource: localhost/total)
[[{{node AssignAddVariableOp}}]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/96171/Desktop/dementia_cleanedup/dementia/maml_finn_copy/try_tf.py", line 15, in <module>
sess.run(acc)
File "C:\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 950, in run
run_metadata_ptr)
File "C:\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1173, in _run
feed_dict_tensor, options, run_metadata)
File "C:\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1350, in _do_run
run_metadata)
File "C:\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1370, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.NotFoundError: Container localhost does not exist. (Could not find resource: localhost/total)
[[node AssignAddVariableOp (defined at /Users/96171/Desktop/dementia_cleanedup/dementia/maml_finn_copy/try_tf.py:12) ]]
Original stack trace for 'AssignAddVariableOp':
File "/Users/96171/Desktop/dementia_cleanedup/dementia/maml_finn_copy/try_tf.py", line 12, in <module>
acc = tf.keras.metrics.Accuracy()(a_argmax, b_argmax)
File "\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\metrics.py", line 170, in __call__
update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable
File "\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\utils\metrics_utils.py", line 73, in decorated
update_op = update_state_fn(*args, **kwargs)
File "\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\metrics.py", line 551, in update_state
matches, sample_weight=sample_weight)
File "\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\metrics.py", line 314, in update_state
update_total_op = self.total.assign_add(value_sum)
File "\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\ops\resource_variable_ops.py", line 1108, in assign_add
name=name)
File "\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\ops\gen_resource_variable_ops.py", line 68, in assign_add_variable_op
"AssignAddVariableOp", resource=resource, value=value, name=name)
File "\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\util\deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\ops.py", line 3616, in create_op
op_def=op_def)
File "\Users\96171\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\ops.py", line 2005, in __init__
self._traceback = tf_stack.extract_stack()
Process finished with exit code 1
tf.metrics.Accuracy creates an object with a state that is usually updated several times. So it can't be called with y_pred and y_true. Try:
tf.keras.metrics.Accuracy()(tf.argmax(tf.nn.softmax(support_pred, dim=1), axis=1),
tf.argmax(support_y, axis=1))
It works if you have the argmax of the softmax of both your output and the labels:
import tensorflow as tf
tf.random.set_seed(0)
a = tf.random.uniform((32, 10), 0, 1, dtype=tf.float32)
b = tf.random.uniform((32, 10), 0, 1, dtype=tf.float32)
a_softmax = tf.nn.softmax(a)
b_softmax = tf.nn.softmax(b)
a_argmax = tf.argmax(a_softmax, axis=-1)
b_argmax = tf.argmax(b_softmax, axis=-1)
tf.keras.metrics.Accuracy()(a_argmax, b_argmax)
<tf.Tensor: shape=(), dtype=float32, numpy=0.1875>
I made an estimation model using tensorflow and keras, and made this model into a def function with tf.train.Saver.
However, repeated use of this def function occurs the following error.
What is the reason for this error and how can I fix it?
the def function of estimation model in 'Model.py'
def LSTM_Model(input_data):
x = tf.placeholder(tf.float32, [None, 20, 121], name="input")
with tf.variable_scope('LSTM'):
y_pred = Lsss(x)
with tf.Session() as sess:
saver = tf.train.Saver(max_to_keep=4)
tf.global_variables_initializer().run()
ckpt = tf.train.get_checkpoint_state(
'C:\LSTM\LSTM_model3\LSTM_paper_2_mean20_4_3_epoch_31_look_back_20_1\Result3')
saver.restore(sess, ckpt.model_checkpoint_path)
K.set_learning_phase(False)
height = sess.run([y_pred], {x: input_data})
return height[0][0]
main function
from Model import LSTM_Model
from numpy import *
import hdf5storage
import numpy as np
def main():
filename = 'Datafile'
mat_file = hdf5storage.loadmat('C:/LSTM/Data/'+filename+'.mat')
TrainSet = mat_file['Trainingset'][0]
Train_time = mat_file['Train_time'][0]
trainnum = 0
data1 = TrainSet[trainnum]
before_data = data1[Train_time[trainnum]-20:Train_time[trainnum], 0:121]
input_data = np.reshape(before_data, (1, 20, 121))
height = LSTM_Model(input_data)*1000
print(height)
height = LSTM_Model(input_data)*1000
print(height)
Occurred error
C:\Users\username\Anaconda3\python.exe C:/LSTM/main.py
[544.562]
Traceback (most recent call last):
File "C:\Users\username\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1356, in _do_call
return fn(*args)
File "C:\Users\username\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1341, in _run_fn
options, feed_dict, fetch_list, target_list, run_metadata)
File "C:\Users\username\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1429, in _call_tf_sessionrun
run_metadata)
tensorflow.python.framework.errors_impl.NotFoundError: 2 root error(s) found.
(0) Not found: Key LSTM_1/cu_dnnlstm_3/bias not found in checkpoint
[[{{node save_1/RestoreV2}}]]
(1) Not found: Key LSTM_1/cu_dnnlstm_3/bias not found in checkpoint
[[{{node save_1/RestoreV2}}]]
[[save_1/RestoreV2/_37]]
0 successful operations.
0 derived errors ignored.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 1286, in restore
{self.saver_def.filename_tensor_name: save_path})
File "C:\Users\username\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 950, in run
run_metadata_ptr)
File "C:\Users\username\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1173, in _run
feed_dict_tensor, options, run_metadata)
File "C:\Users\username\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1350, in _do_run
run_metadata)
File "C:\Users\username\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1370, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.NotFoundError: 2 root error(s) found.
(0) Not found: Key LSTM_1/cu_dnnlstm_3/bias not found in checkpoint
[[node save_1/RestoreV2 (defined at \LSTM\Model.py:35) ]]
(1) Not found: Key LSTM_1/cu_dnnlstm_3/bias not found in checkpoint
[[node save_1/RestoreV2 (defined at \LSTM\Model.py:35) ]]
[[save_1/RestoreV2/_37]]
0 successful operations.
0 derived errors ignored.
Original stack trace for 'save_1/RestoreV2':
File "/LSTM/main.py", line 114, in <module>
main()
File "/LSTM/main.py", line 96, in main
height = LSTM_Model(input_data)*1000
File "\LSTM\Model.py", line 35, in LSTM_Model
saver = tf.train.Saver(max_to_keep=4)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 825, in __init__
self.build()
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 837, in build
self._build(self._filename, build_save=True, build_restore=True)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 875, in _build
build_restore=build_restore)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 508, in _build_internal
restore_sequentially, reshape)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 328, in _AddRestoreOps
restore_sequentially)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 575, in bulk_restore
return io_ops.restore_v2(filename_tensor, names, slices, dtypes)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_io_ops.py", line 1780, in restore_v2
name=name)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\util\deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 3616, in create_op
op_def=op_def)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 2005, in __init__
self._traceback = tf_stack.extract_stack()
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 1296, in restore
names_to_keys = object_graph_key_mapping(save_path)
File "C:\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 1614, in object_graph_key_mapping
object_graph_string = reader.get_tensor(trackable.OBJECT_GRAPH_PROTO_KEY)
File "C:\Users\username\Anaconda3\lib\site-packages\tensorflow\python\pywrap_tensorflow_internal.py", line 678, in get_tensor
return CheckpointReader_GetTensor(self, compat.as_bytes(tensor_str))
tensorflow.python.framework.errors_impl.NotFoundError: Key _CHECKPOINTABLE_OBJECT_GRAPH not found in checkpoint
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/LSTM/main.py", line 114, in <module>
main()
File "C:/LSTM/main.py", line 96, in main
height = LSTM_Model(input_data)*1000
File "C:\LSTM\Model.py", line 39, in LSTM_Model
saver.restore(sess, ckpt.model_checkpoint_path)
File "C:\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 1302, in restore
err, "a Variable name or other graph key that is missing")
tensorflow.python.framework.errors_impl.NotFoundError: Restoring from checkpoint failed. This is most likely due to a Variable name or other graph key that is missing from the checkpoint. Please ensure that you have not altered the graph expected based on the checkpoint. Original error:
2 root error(s) found.
(0) Not found: Key LSTM_1/cu_dnnlstm_3/bias not found in checkpoint
[[node save_1/RestoreV2 (defined at \LSTM\Model.py:35) ]]
(1) Not found: Key LSTM_1/cu_dnnlstm_3/bias not found in checkpoint
[[node save_1/RestoreV2 (defined at \LSTM\Model.py:35) ]]
[[save_1/RestoreV2/_37]]
0 successful operations.
0 derived errors ignored.
Original stack trace for 'save_1/RestoreV2':
File "/LSTM/main.py", line 114, in <module>
main()
File "/LSTM/main.py", line 96, in main
height = LSTM_Model(input_data)*1000
File "\LSTM\Model.py", line 35, in LSTM_Model
saver = tf.train.Saver(max_to_keep=4)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 825, in __init__
self.build()
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 837, in build
self._build(self._filename, build_save=True, build_restore=True)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 875, in _build
build_restore=build_restore)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 508, in _build_internal
restore_sequentially, reshape)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 328, in _AddRestoreOps
restore_sequentially)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 575, in bulk_restore
return io_ops.restore_v2(filename_tensor, names, slices, dtypes)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_io_ops.py", line 1780, in restore_v2
name=name)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\util\deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 3616, in create_op
op_def=op_def)
File "\Users\username\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 2005, in __init__
self._traceback = tf_stack.extract_stack()
Process finished
I am trying to build a model that uses transposed convolution operation ,But when I try to pass the weights and bias as a parameter to the model function it gives an error.
import tensorflow as tf
import cv2
class WeighsTest:
def __model_1(self, plh_var1,weights,bias):
conv = tf.nn.conv2d(plh_var1, weights["v1"], [1, 1, 1, 1], padding="SAME")
conv = tf.add(conv, bias["b1"])
conv = tf.nn.relu(conv)
tran_conv = tf.layers.conv2d_transpose(conv,32, 4, 3, padding="valid")
return tran_conv
def train(self, input_img):
plh = tf.placeholder(dtype=tf.float32, shape=(None, 84, 150, 3), name="input_img")
with tf.variable_scope("test", reuse=tf.AUTO_REUSE):
var_dict_1 = {
"v1": tf.get_variable("v1", shape=(2, 2, 3, 32), initializer=tf.contrib.layers.xavier_initializer())
}
bias_1 = {
"b1": tf.get_variable("b1", shape=32, initializer=tf.contrib.layers.xavier_initializer())
}
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
out_p = sess.run([self.__model_1(plh, var_dict_1, bias_1)], feed_dict={plh: [input_img]})
return out_p
if __name__ == '__main__':
obj = WeighsTest()
img = cv2.imread('./1.jpg')
output = obj.train(img)
traceback is given below
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 1365, in _do_call
return fn(*args)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 1350, in _run_fn
target_list, run_metadata)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 1443, in _call_tf_sessionrun
run_metadata)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value conv2d_transpose/bias
[[{{node conv2d_transpose/bias/read}}]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/strange/DEV/Python_Projects/slmv/testing_unit.py", line 43, in <module>
output = obj.train(img)
File "/home/strange/DEV/Python_Projects/slmv/testing_unit.py", line 36, in train
out_p = sess.run([self.__model_1(x1, var_dict_1, bias_1)], feed_dict={x1: [input_img]})
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 956, in run
run_metadata_ptr)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 1180, in _run
feed_dict_tensor, options, run_metadata)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 1359, in _do_run
run_metadata)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value conv2d_transpose/bias
[[node conv2d_transpose/bias/read (defined at usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/ops.py:1748) ]]
Original stack trace for 'conv2d_transpose/bias/read':
File "home/strange/DEV/Python_Projects/slmv/testing_unit.py", line 43, in <module>
output = obj.train(img)
File "home/strange/DEV/Python_Projects/slmv/testing_unit.py", line 36, in train
out_p = sess.run([self.__model_1(x1, var_dict_1, bias_1)], feed_dict={x1: [input_img]})
File "home/strange/DEV/Python_Projects/slmv/testing_unit.py", line 10, in __model_1
tran_conv = tf.layers.conv2d_transpose(conv,32, 4, 3, padding="valid")
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/deprecation.py", line 324, in new_func
return func(*args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/layers/convolutional.py", line 1279, in conv2d_transpose
return layer.apply(inputs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/deprecation.py", line 324, in new_func
return func(*args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 1700, in apply
return self.__call__(inputs, *args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/layers/base.py", line 548, in __call__
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 824, in __call__
self._maybe_build(inputs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 2146, in _maybe_build
self.build(input_shapes)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/layers/convolutional.py", line 787, in build
dtype=self.dtype)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/layers/base.py", line 461, in add_weight
**kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 529, in add_weight
aggregation=aggregation)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/tracking/base.py", line 712, in _add_variable_with_custom_getter
**kwargs_for_getter)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 1500, in get_variable
aggregation=aggregation)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 1243, in get_variable
aggregation=aggregation)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 567, in get_variable
aggregation=aggregation)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 519, in _true_getter
aggregation=aggregation)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 933, in _get_single_variable
aggregation=aggregation)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variables.py", line 258, in __call__
return cls._variable_v1_call(*args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variables.py", line 219, in _variable_v1_call
shape=shape)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variables.py", line 197, in <lambda>
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 2519, in default_variable_creator
shape=shape)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variables.py", line 262, in __call__
return super(VariableMetaclass, cls).__call__(*args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variables.py", line 1688, in __init__
shape=shape)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variables.py", line 1872, in _init_from_args
self._snapshot = array_ops.identity(self._variable, name="read")
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/dispatch.py", line 180, in wrapper
return target(*args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/array_ops.py", line 203, in identity
ret = gen_array_ops.identity(input, name=name)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/gen_array_ops.py", line 4239, in identity
"Identity", input=input, name=name)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/op_def_library.py", line 794, in _apply_op_helper
op_def=op_def)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/ops.py", line 3357, in create_op
attrs, op_def, compute_device)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/ops.py", line 3426, in _create_op_internal
op_def=op_def)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/ops.py", line 1748, in __init__
self._traceback = tf_stack.extract_stack()
It is important to pass the bias and weights as parameters to the model.
I used tensorflow-cpu 1.15.2 for the model.
Any idea how to solve this ?
Thank you
The model needed to be called before the tf.global_variabels_initializer() is used
ie. the train function is changed as below
def train(self, input_img):
plh = tf.placeholder(dtype=tf.float32, shape=(None, 84, 150, 3), name="input_img")
with tf.variable_scope("test", reuse=tf.AUTO_REUSE):
var_dict_1 = {
"v1": tf.get_variable("v1", shape=(2, 2, 3, 32), initializer=tf.contrib.layers.xavier_initializer())
}
bias_1 = {
"b1": tf.get_variable("b1", shape=32, initializer=tf.contrib.layers.xavier_initializer())
}
"""model is called before variable initialization"""
model = self.__model_1(plh, var_dict_1, bias_1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
out_p = sess.run([model], feed_dict={plh: [input_img]})
return out_p
the line given below:
out_p = sess.run([self.__model_1(plh, var_dict_1, bias_1)], feed_dict={plh: [input_img]})
is changed into
out_p = sess.run([model], feed_dict={plh: [input_img]})
I am trying to use my laptop as server for faceRecognition and speechSynthesis for my project on raspberry pi. So I created a program which loads both the models initially and then wait for the request to come. But when I start the program, initially faceRecognition model loads successfully but then at the time of loading speechSynthesis model, it gives me as error regarding the tf.saver.
code:
Server-
def findFaceMatch():
image_file = request.files.get("imagefile")
image_file.save("image.jpg")
print("sent for check")
response = face_match_demo.recognizeFace(os.path.join(os.getcwd(),"image.jpg"))
return response, 200
#api.route("/synthesize/<string:text>")
def synthesizeVoice(text):
print(text)
with open("F:/file.wav", 'wb') as f:
f.write(synthesizer.synthesize(text))
return send_from_directory("F:/","file.wav", as_attachment=True), 200
Face Recognition-
import tensorflow as tf
import numpy as np
from . import facenet
from .align import detect_face
import cv2
import imutils
import os
import pickle
import time
minsize = 20
threshold = [0.6, 0.7, 0.7]
factor = 0.709
margin = 44
input_image_size = 160
def load_models(session):
global sess
sess = session
global pnet, rnet, onet
pnet, rnet, onet = detect_face.create_mtcnn(sess, os.path.join(os.getcwd(),"Face_recognition","align"))
facenet.load_model(os.path.join(os.getcwd(),"Face_recognition","20170512-110547\\20170512-110547.pb"))
global images_placeholder
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
global embeddings
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
global phase_train_placeholder
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
global embedding_size
embedding_size = embeddings.get_shape()[1]
def getFace(img):
faces = []
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
if not len(bounding_boxes) == 0:
for face in bounding_boxes:
if face[4] > 0.50:
det = np.squeeze(face[0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
resized = cv2.resize(cropped, (input_image_size,input_image_size),interpolation=cv2.INTER_CUBIC)
prewhitened = facenet.prewhiten(resized)
faces.append(getEmbedding(prewhitened))
return faces
def getEmbedding(resized):
reshaped = resized.reshape(-1,input_image_size,input_image_size,3)
feed_dict = {images_placeholder: reshaped, phase_train_placeholder: False}
embedding = sess.run(embeddings, feed_dict=feed_dict)
return embedding
def compare2face(img1):
print("checking")
face2 = getFace(img1)
face1 = []
with open(os.path.join(os.getcwd(),"Face_recognition","trained_knn_model.PB"), 'rb') as f:
for i in range(4):
face1.append(pickle.load(f))
names = ["x","y","z","p"]
print("verifying name")
for i in range(0,len(face1)):
if face1[i] and face2:
# calculate Euclidean distance
dist = np.sqrt(np.sum(np.square(np.subtract(face1[i], face2[0]))))
if dist <= 0.8:
return "dist: "+str(dist)+"\nhello "+names[i]
return "Person not found"
def recognizeFace(image_path):
image = cv2.imread(image_path)
response = compare2face(image)
return response
Speech Synthesis:
import io
import numpy as np
import tensorflow as tf
from .hparams import hparams
from librosa import effects
from .models import create_model
from .text import text_to_sequence
from .util import audio
class Synthesizer:
def load(self, checkpoint_path, sess, model_name='tacotron'):
print('Constructing model: %s' % model_name)
inputs = tf.placeholder(tf.int32, [1, None], 'inputs')
input_lengths = tf.placeholder(tf.int32, [1], 'input_lengths')
with tf.variable_scope('model') as scope:
self.model = create_model(model_name, hparams)
self.model.initialize(inputs, input_lengths)
self.wav_output = audio.inv_spectrogram_tensorflow(self.model.linear_outputs[0])
print('Loading checkpoint: %s' % checkpoint_path)
# self.session = tf.Session()
self.session = sess
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(self.session, checkpoint_path)
def synthesize(self, text):
cleaner_names = [x.strip() for x in hparams.cleaners.split(',')]
seq = text_to_sequence(text, cleaner_names)
feed_dict = {
self.model.inputs: [np.asarray(seq, dtype=np.int32)],
self.model.input_lengths: np.asarray([len(seq)], dtype=np.int32)
}
wav = self.session.run(self.wav_output, feed_dict=feed_dict)
wav = audio.inv_preemphasis(wav)
wav = wav[:audio.find_endpoint(wav)]
out = io.BytesIO()
audio.save_wav(wav, out)
return out.getvalue()
error is as follows:
2019-11-11 21:48:04.408636: W tensorflow/core/framework/op_kernel.cc:1502] OP_REQUIRES failed at save_restore_v2_ops.cc:184 : Not found: Key onet/conv1/biases not found in checkpoint
Traceback (most recent call last):
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1356, in _do_call
return fn(*args)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1341, in _run_fn
options, feed_dict, fetch_list, target_list, run_metadata)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1429, in _call_tf_sessionrun
run_metadata)
tensorflow.python.framework.errors_impl.NotFoundError: 2 root error(s) found.
(0) Not found: Key onet/conv1/biases not found in checkpoint
[[{{node save/RestoreV2}}]]
[[save/RestoreV2/_617]]
(1) Not found: Key onet/conv1/biases not found in checkpoint
[[{{node save/RestoreV2}}]]
0 successful operations.
0 derived errors ignored.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 1286, in restore
{self.saver_def.filename_tensor_name: save_path})
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 950, in run
run_metadata_ptr)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1173, in _run
feed_dict_tensor, options, run_metadata)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1350, in _do_run
run_metadata)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1370, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.NotFoundError: 2 root error(s) found.
(0) Not found: Key onet/conv1/biases not found in checkpoint
[[node save/RestoreV2 (defined at F:\Backend\Text_To_Speech\synthesizer.py:25) ]]
[[save/RestoreV2/_617]]
(1) Not found: Key onet/conv1/biases not found in checkpoint
[[node save/RestoreV2 (defined at F:\Backend\Text_To_Speech\synthesizer.py:25) ]]
0 successful operations.
0 derived errors ignored.
Original stack trace for 'save/RestoreV2':
File "commonServer.py", line 38, in <module>
synthesizer.load(model_path,sess)
File "F:\Backend\Text_To_Speech\synthesizer.py", line 25, in load
saver = tf.train.Saver()
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 825, in __init__
self.build()
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 837, in build
self._build(self._filename, build_save=True, build_restore=True)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 875, in _build
build_restore=build_restore)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 508, in _build_internal
restore_sequentially, reshape)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 328, in _AddRestoreOps
restore_sequentially)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 575, in bulk_restore
return io_ops.restore_v2(filename_tensor, names, slices, dtypes)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_io_ops.py", line 1696, in restore_v2
name=name)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\util\deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 3616, in create_op
op_def=op_def)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 2005, in __init__
self._traceback = tf_stack.extract_stack()
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 1296, in restore
names_to_keys = object_graph_key_mapping(save_path)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 1614, in object_graph_key_mapping
object_graph_string = reader.get_tensor(trackable.OBJECT_GRAPH_PROTO_KEY)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\pywrap_tensorflow_internal.py", line 678, in get_tensor
return CheckpointReader_GetTensor(self, compat.as_bytes(tensor_str))
tensorflow.python.framework.errors_impl.NotFoundError: Key _CHECKPOINTABLE_OBJECT_GRAPH not found in checkpoint
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "commonServer.py", line 38, in <module>
synthesizer.load(model_path,sess)
File "F:\Backend\Text_To_Speech\synthesizer.py", line 26, in load
saver.restore(self.session, checkpoint_path)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 1302, in restore
err, "a Variable name or other graph key that is missing")
tensorflow.python.framework.errors_impl.NotFoundError: Restoring from checkpoint failed. This is most likely due to a Variable name or other graph key that is missing from the checkpoint. Please ensure that you have not altered the graph expected based on the checkpoint. Original error:
2 root error(s) found.
(0) Not found: Key onet/conv1/biases not found in checkpoint
[[node save/RestoreV2 (defined at F:\Backend\Text_To_Speech\synthesizer.py:25) ]]
[[save/RestoreV2/_617]]
(1) Not found: Key onet/conv1/biases not found in checkpoint
[[node save/RestoreV2 (defined at F:\Backend\Text_To_Speech\synthesizer.py:25) ]]
0 successful operations.
0 derived errors ignored.
Original stack trace for 'save/RestoreV2':
File "commonServer.py", line 38, in <module>
synthesizer.load(model_path,sess)
File "F:\Backend\Text_To_Speech\synthesizer.py", line 25, in load
saver = tf.train.Saver()
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 825, in __init__
self.build()
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 837, in build
self._build(self._filename, build_save=True, build_restore=True)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 875, in _build
build_restore=build_restore)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 508, in _build_internal
restore_sequentially, reshape)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 328, in _AddRestoreOps
restore_sequentially)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\training\saver.py", line 575, in bulk_restore
return io_ops.restore_v2(filename_tensor, names, slices, dtypes)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_io_ops.py", line 1696, in restore_v2
name=name)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\util\deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 3616, in create_op
op_def=op_def)
File "C:\Users\Jaydip Bari\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 2005, in __init__
self._traceback = tf_stack.extract_stack()
At least up to TF 2.0 (not sure about the version you have), loading multiple models into one graph could cause problems: See
load multiple models in Tensorflow
I hope this helps.
I want to test a new network structure which requires changing some of the elements of a tensor in a keras model. If I could find a way to convert/copy the tensor to a numpy array and then later transform it back into a tensor, then I should be able to make the model work.
I tried using the .eval() method to convert the tensor to a numpy array, but it gives me errors. I am also using this model with a DQN Agent from keras-rl, so it is possible the error is from how keras-rl uses the model. Here is my code:
def Filter_Features(F):
sess = Session()
with sess.as_default():
F_np = F.eval()
min_pos = np.argmin(F_np)
F_np[min_pos] = 0
return convert_to_tensor(F_np)
def create_model(nb_actions, num_frames=4):
inputs = Input(shape = (num_frames,84,84))
F = Conv2D(16,(8,8), activation='relu', strides=(4,4), data_format = "channels_first")(inputs)
...
F_k = Lambda(Filter_Features)(F)
actions = Dense(nb_actions, activation = 'linear')(F_k)
nnf_model = Model(inputs = inputs, outputs = actions)
return nnf_model
Note that the code runs if I remove the lambda layer, so the issue must be originating there. I received the error:
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1356, in _do_call
return fn(*args)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1341, in _run_fn
options, feed_dict, fetch_list, target_list, run_metadata)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1429, in _call_tf_sessionrun
run_metadata)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value conv2d_1/bias
[[{{node conv2d_1/bias/read}}]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "Atari_Test.py", line 32, in <module>
model = Model_StackExchange.create_model(nb_actions = nb_actions)
File "/Users/j/deep-rl/Model_StackExchange.py", line 26, in create_model
F_k = Lambda(Filter_Features)(F)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/keras/engine/base_layer.py", line 457, in __call__
output = self.call(inputs, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/keras/layers/core.py", line 687, in call
return self.function(inputs, **arguments)
File "/Users/j/deep-rl/Model_StackExchange.py", line 11, in Filter_Features
F_np = F.eval()
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 731, in eval
return _eval_using_default_session(self, feed_dict, self.graph, session)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 5579, in _eval_using_default_session
return session.run(tensors, feed_dict)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 950, in run
run_metadata_ptr)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1173, in _run
feed_dict_tensor, options, run_metadata)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1350, in _do_run
run_metadata)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1370, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value conv2d_1/bias
[[node conv2d_1/bias/read (defined at /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:402) ]]
Original stack trace for 'conv2d_1/bias/read':
File "Atari_Test.py", line 32, in <module>
model = Model_StackExchange.create_model(nb_actions = nb_actions)
File "/Users/j/deep-rl/Model_StackExchange.py", line 21, in create_model
F = Conv2D(16,(8,8), activation='relu', strides=(4,4), data_format = "channels_first")(inputs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/keras/engine/base_layer.py", line 431, in __call__
self.build(unpack_singleton(input_shapes))
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/keras/layers/convolutional.py", line 147, in build
constraint=self.bias_constraint)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/keras/legacy/interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/keras/engine/base_layer.py", line 252, in add_weight
constraint=constraint)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py", line 402, in variable
v = tf.Variable(value, dtype=tf.as_dtype(dtype), name=name)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/ops/variables.py", line 259, in __call__
return cls._variable_v1_call(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/ops/variables.py", line 220, in _variable_v1_call
shape=shape)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/ops/variables.py", line 198, in <lambda>
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/ops/variable_scope.py", line 2511, in default_variable_creator
shape=shape)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/ops/variables.py", line 263, in __call__
return super(VariableMetaclass, cls).__call__(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/ops/variables.py", line 1568, in __init__
shape=shape)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/ops/variables.py", line 1755, in _init_from_args
self._snapshot = array_ops.identity(self._variable, name="read")
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/util/dispatch.py", line 180, in wrapper
return target(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/ops/array_ops.py", line 86, in identity
ret = gen_array_ops.identity(input, name=name)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 4253, in identity
"Identity", input=input, name=name)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/framework/op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 3616, in create_op
op_def=op_def)
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 2005, in __init__
self._traceback = tf_stack.extract_stack()
Please let me know if you know how to access and change the elements of a tensor within a keras model. Thank you.