I'm having difficulty when I try to run this LSTM model in TensorFlow. I'm relatively new to the library, so forgive me.
from tensorflow.contrib import ffmpeg, rnn
import tensorflow as tf
import os
import time
# hyperparameters
learning_rate = 0.01
total_iterations = 100
layer_width = 5
network_depth = 10
# path variables
mp3_output_path = "/Users/espresso/Documents/Projects/Martin/ouput"
mp3_input_path = "/Users/espresso/Documents/Projects/Martin/input/Caravan.mp3"
# Function Definitions
def import_audio(mp3_location):
binary_audio = tf.read_file(mp3_location)
audio_tensor = ffmpeg.decode_audio(binary_audio, file_format="mp3", samples_per_second=int(44100/2), channel_count=1)
reformatted_audio_tensor = tf.reshape(audio_tensor, [1, tf.shape(audio_tensor)[0], 1])
return reformatted_audio_tensor
def export_audio(audio_tensor, mp3_target_dest):
encoded_audio = ffmpeg.encode_audio(audio_tensor, file_format="mp3", samples_per_second=(44100/2))
mus_file = open(mp3_target_dest, 'wb+')
mus_file.write(encoded_audio)
mus_file.close()
print("Export complete. File located at {}".format(mp3_target_dest))
return 0
def lstm_layer(cell_quantity):
return rnn.LSTMCell(cell_quantity)
def stacked_lstm(layer_width, depth):
if type(layer_width) == list:
lstm_layer_lib = [lstm_layer(layer_width[element]) for element in layer_width]
else:
lstm_layer_lib = [lstm_layer(layer_width) for element in range(depth)]
lstm_layer_lib.insert(0, lstm_layer(3))
lstm_layer_lib.append(lstm_layer(1))
return rnn.MultiRNNCell(lstm_layer_lib)
def inaccuracy(net_loss):
return net_loss
# variable definitions
input = tf.placeholder(dtype=tf.float32, shape=[1, 5282816, 1], name="Audio_Input")
y_labels = tf.placeholder(dtype=tf.float32, shape=[1, 5282816, 1], name="Y_Labels")
init_op = tf.global_variables_initializer()
network_outline = stacked_lstm(layer_width, network_depth)
hypothesis, state = tf.nn.dynamic_rnn(network_outline, input, dtype=tf.float32)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_labels, logits=hypothesis))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
with tf.Session() as sess:
sess.run(init_op)
input_mp3_tensor = sess.run(import_audio(mp3_input_path))
vector_size = int(sess.run(tf.shape(input_mp3_tensor))[1]) - 1
y_labels_tensor = sess.run(tf.convert_to_tensor([input_mp3_tensor[0][element+1][0] for element in range(vector_size)] + [0]))
reformatted_y_labels_tensor = sess.run(tf.reshape(y_labels_tensor, [1, tf.shape(y_labels_tensor)[0], 1]))
for iteration in range(total_iterations):
loss, _ = sess.run([cross_entropy, optimizer], feed_dict={input: input_mp3_tensor, y_labels: reformatted_y_labels_tensor})
As you can see, I have declared tf.global_variables_intilizer() in the variable init_op and I execute it before executing any another operation in the Session. Still, once the code completes, the following error message is returned:
Caused by op 'rnn/multi_rnn_cell/cell_4/lstm_cell/bias/read', defined at:
File "martin.py", line 41, in <module>
hypothesis, state = tf.nn.dynamic_rnn(network_outline, input, dtype=tf.float32)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/rnn.py", line 635, in dynamic_rnn
dtype=dtype)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/rnn.py", line 832, in _dynamic_rnn_loop
swap_memory=swap_memory)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/control_flow_ops.py", line 3202, in while_loop
result = loop_context.BuildLoop(cond, body, loop_vars, shape_invariants)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2940, in BuildLoop
pred, body, original_loop_vars, loop_vars, shape_invariants)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2877, in _BuildLoop
body_result = body(*packed_vars_for_body)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/control_flow_ops.py", line 3178, in <lambda>
body = lambda i, lv: (i + 1, orig_body(*lv))
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/rnn.py", line 803, in _time_step
(output, new_state) = call_cell()
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/rnn.py", line 789, in <lambda>
call_cell = lambda: cell(input_t, state)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 191, in __call__
return super(RNNCell, self).__call__(inputs, state)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 714, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 1242, in call
cur_inp, new_state = cell(cur_inp, cur_state)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 298, in __call__
*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 696, in __call__
self.build(input_shapes)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 730, in build
initializer=init_ops.zeros_initializer(dtype=self.dtype))
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 546, in add_variable
partitioner=partitioner)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/training/checkpointable.py", line 415, in _add_variable_with_custom_getter
**kwargs_for_getter)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py", line 1297, in get_variable
constraint=constraint)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py", line 1093, in get_variable
constraint=constraint)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py", line 431, in get_variable
return custom_getter(**custom_getter_kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/rnn_cell_impl.py", line 194, in _rnn_get_variable
variable = getter(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py", line 408, in _true_getter
use_resource=use_resource, constraint=constraint)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py", line 800, in _get_single_variable
use_resource=use_resource)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py", line 2157, in variable
use_resource=use_resource)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py", line 2147, in <lambda>
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/variable_scope.py", line 2130, in default_variable_creator
constraint=constraint)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/variables.py", line 235, in __init__
constraint=constraint)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/variables.py", line 388, in _init_from_args
self._snapshot = array_ops.identity(self._variable, name="read")
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 142, in identity
return gen_array_ops.identity(input, name=name)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 3053, in identity
"Identity", input=input, name=name)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3290, in create_op
op_def=op_def)
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1654, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value rnn/multi_rnn_cell/cell_4/lstm_cell/bias
[[Node: rnn/multi_rnn_cell/cell_4/lstm_cell/bias/read = Identity[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](rnn/multi_rnn_cell/cell_4/lstm_cell/bias)]]
I'm not entirely sure if I'm actually supposed to create weights and pass them into TensorFlow functions as a parameter. But I have no other idea as to why the program is unsuccessful.
The reason of the error is that you defined the init_op before defining your variables (i.e. before calling stacked_lstm and tf.nn.dynamic_rnn. This means that the variables you defined after having called init_op won't be initialised.
You should do something like:
network_outline = stacked_lstm(layer_width, network_depth)
hypothesis, state = tf.nn.dynamic_rnn(network_outline, input, dtype=tf.float32)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_labels, logits=hypothesis))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
init_op = tf.global_variables_initializer()
Related
In tensorflow, I am experiencing an issue where I try to get the output of my last neuron by running it in a session but it gives me an error. I am running Python 3.8.5 and TensorFlow 1.15 (because of some hardware issues)
Here is my code:
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import tensorflow as tf
def main():
x_inputs = tf.Variable(np.array([[1, 2 ,3], [1, 2, 3]]), dtype=tf.float32)
y_outputs = tf.Variable(np.array([[1, 2, 4], [2, 3, 4]]), dtype=tf.float32)
model = create_model(x_inputs, y_outputs)
def create_model(x: np.array, y: np.array):
var_init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(var_init)
x_val = sess.run(x).flatten()
y_val = sess.run(y).flatten()
# neural network architecture
input_layer = tf.layers.dense(x, units=len(x_val), activation=tf.nn.sigmoid)
hidden_layer = tf.layers.dense(input_layer, units=5, activation=tf.nn.relu)
output_layer = tf.layers.dense(hidden_layer, units=2, activation=tf.nn.softmax)
print(sess.run(output_layer))
if __name__ == "__main__":
main()
Here's the error:
Traceback (most recent call last):
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1350, in _do_call
return fn(*args)
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1327, in _run_fn
return tf_session.TF_Run(session, options,
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/errors_impl.py", line 470, in __exit__
raise _make_specific_exception(
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value dense_2/bias
[[Node: dense_2/bias/read = Identity[T=DT_FLOAT, _class=["loc:#dense_2/bias"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](dense_2/bias)]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "learning.py", line 29, in <module>
main()
File "learning.py", line 10, in main
model = create_model(x_inputs, y_outputs)
File "learning.py", line 26, in create_model
print(sess.run(output_layer))
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 894, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1127, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1343, in _do_run
return self._do_call(_run_fn, self._session, feeds, fetches, targets,
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1363, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value dense_2/bias
[[Node: dense_2/bias/read = Identity[T=DT_FLOAT, _class=["loc:#dense_2/bias"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](dense_2/bias)]]
Caused by op 'dense_2/bias/read', defined at:
File "learning.py", line 29, in <module>
main()
File "learning.py", line 10, in main
model = create_model(x_inputs, y_outputs)
File "learning.py", line 24, in create_model
output_layer = tf.layers.dense(hidden_layer, units=2, activation=tf.nn.softmax)
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/layers/core.py", line 253, in dense
return layer.apply(inputs)
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/layers/base.py", line 762, in apply
return self.__call__(inputs, *args, **kwargs)
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/layers/base.py", line 636, in __call__
self.build(input_shapes)
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/layers/core.py", line 139, in build
self.bias = self.add_variable('bias',
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/layers/base.py", line 498, in add_variable
variable = vs.get_variable(name,
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variable_scope.py", line 1256, in get_variable
return get_variable_scope().get_variable(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variable_scope.py", line 1091, in get_variable
return var_store.get_variable(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variable_scope.py", line 429, in get_variable
return _true_getter(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variable_scope.py", line 399, in _true_getter
return self._get_single_variable(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variable_scope.py", line 798, in _get_single_variable
v = variables.Variable(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variables.py", line 220, in __init__
self._init_from_args(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/variables.py", line 376, in _init_from_args
self._snapshot = array_ops.identity(self._variable, name="read")
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py", line 127, in identity
return gen_array_ops.identity(input, name=name)
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/gen_array_ops.py", line 2133, in identity
_, _, _op = _op_def_lib._apply_op_helper(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/op_def_library.py", line 785, in _apply_op_helper
op = g.create_op(op_type_name, inputs, output_types, name=scope,
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/ops.py", line 3152, in create_op
ret = Operation(
File "/Applications/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/ops.py", line 1625, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value dense_2/bias
[[Node: dense_2/bias/read = Identity[T=DT_FLOAT, _class=["loc:#dense_2/bias"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](dense_2/bias)]]
Please help me I'm really struggling.
Thanks in advance!
I am trying to build a model that uses transposed convolution operation ,But when I try to pass the weights and bias as a parameter to the model function it gives an error.
import tensorflow as tf
import cv2
class WeighsTest:
def __model_1(self, plh_var1,weights,bias):
conv = tf.nn.conv2d(plh_var1, weights["v1"], [1, 1, 1, 1], padding="SAME")
conv = tf.add(conv, bias["b1"])
conv = tf.nn.relu(conv)
tran_conv = tf.layers.conv2d_transpose(conv,32, 4, 3, padding="valid")
return tran_conv
def train(self, input_img):
plh = tf.placeholder(dtype=tf.float32, shape=(None, 84, 150, 3), name="input_img")
with tf.variable_scope("test", reuse=tf.AUTO_REUSE):
var_dict_1 = {
"v1": tf.get_variable("v1", shape=(2, 2, 3, 32), initializer=tf.contrib.layers.xavier_initializer())
}
bias_1 = {
"b1": tf.get_variable("b1", shape=32, initializer=tf.contrib.layers.xavier_initializer())
}
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
out_p = sess.run([self.__model_1(plh, var_dict_1, bias_1)], feed_dict={plh: [input_img]})
return out_p
if __name__ == '__main__':
obj = WeighsTest()
img = cv2.imread('./1.jpg')
output = obj.train(img)
traceback is given below
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 1365, in _do_call
return fn(*args)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 1350, in _run_fn
target_list, run_metadata)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 1443, in _call_tf_sessionrun
run_metadata)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value conv2d_transpose/bias
[[{{node conv2d_transpose/bias/read}}]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/strange/DEV/Python_Projects/slmv/testing_unit.py", line 43, in <module>
output = obj.train(img)
File "/home/strange/DEV/Python_Projects/slmv/testing_unit.py", line 36, in train
out_p = sess.run([self.__model_1(x1, var_dict_1, bias_1)], feed_dict={x1: [input_img]})
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 956, in run
run_metadata_ptr)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 1180, in _run
feed_dict_tensor, options, run_metadata)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 1359, in _do_run
run_metadata)
File "/usr/local/lib/python3.7/dist-packages/tensorflow_core/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.FailedPreconditionError: Attempting to use uninitialized value conv2d_transpose/bias
[[node conv2d_transpose/bias/read (defined at usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/ops.py:1748) ]]
Original stack trace for 'conv2d_transpose/bias/read':
File "home/strange/DEV/Python_Projects/slmv/testing_unit.py", line 43, in <module>
output = obj.train(img)
File "home/strange/DEV/Python_Projects/slmv/testing_unit.py", line 36, in train
out_p = sess.run([self.__model_1(x1, var_dict_1, bias_1)], feed_dict={x1: [input_img]})
File "home/strange/DEV/Python_Projects/slmv/testing_unit.py", line 10, in __model_1
tran_conv = tf.layers.conv2d_transpose(conv,32, 4, 3, padding="valid")
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/deprecation.py", line 324, in new_func
return func(*args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/layers/convolutional.py", line 1279, in conv2d_transpose
return layer.apply(inputs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/deprecation.py", line 324, in new_func
return func(*args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 1700, in apply
return self.__call__(inputs, *args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/layers/base.py", line 548, in __call__
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 824, in __call__
self._maybe_build(inputs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 2146, in _maybe_build
self.build(input_shapes)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/layers/convolutional.py", line 787, in build
dtype=self.dtype)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/layers/base.py", line 461, in add_weight
**kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 529, in add_weight
aggregation=aggregation)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/training/tracking/base.py", line 712, in _add_variable_with_custom_getter
**kwargs_for_getter)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 1500, in get_variable
aggregation=aggregation)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 1243, in get_variable
aggregation=aggregation)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 567, in get_variable
aggregation=aggregation)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 519, in _true_getter
aggregation=aggregation)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 933, in _get_single_variable
aggregation=aggregation)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variables.py", line 258, in __call__
return cls._variable_v1_call(*args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variables.py", line 219, in _variable_v1_call
shape=shape)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variables.py", line 197, in <lambda>
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variable_scope.py", line 2519, in default_variable_creator
shape=shape)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variables.py", line 262, in __call__
return super(VariableMetaclass, cls).__call__(*args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variables.py", line 1688, in __init__
shape=shape)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/variables.py", line 1872, in _init_from_args
self._snapshot = array_ops.identity(self._variable, name="read")
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/dispatch.py", line 180, in wrapper
return target(*args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/array_ops.py", line 203, in identity
ret = gen_array_ops.identity(input, name=name)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/ops/gen_array_ops.py", line 4239, in identity
"Identity", input=input, name=name)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/op_def_library.py", line 794, in _apply_op_helper
op_def=op_def)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/ops.py", line 3357, in create_op
attrs, op_def, compute_device)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/ops.py", line 3426, in _create_op_internal
op_def=op_def)
File "usr/local/lib/python3.7/dist-packages/tensorflow_core/python/framework/ops.py", line 1748, in __init__
self._traceback = tf_stack.extract_stack()
It is important to pass the bias and weights as parameters to the model.
I used tensorflow-cpu 1.15.2 for the model.
Any idea how to solve this ?
Thank you
The model needed to be called before the tf.global_variabels_initializer() is used
ie. the train function is changed as below
def train(self, input_img):
plh = tf.placeholder(dtype=tf.float32, shape=(None, 84, 150, 3), name="input_img")
with tf.variable_scope("test", reuse=tf.AUTO_REUSE):
var_dict_1 = {
"v1": tf.get_variable("v1", shape=(2, 2, 3, 32), initializer=tf.contrib.layers.xavier_initializer())
}
bias_1 = {
"b1": tf.get_variable("b1", shape=32, initializer=tf.contrib.layers.xavier_initializer())
}
"""model is called before variable initialization"""
model = self.__model_1(plh, var_dict_1, bias_1)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
out_p = sess.run([model], feed_dict={plh: [input_img]})
return out_p
the line given below:
out_p = sess.run([self.__model_1(plh, var_dict_1, bias_1)], feed_dict={plh: [input_img]})
is changed into
out_p = sess.run([model], feed_dict={plh: [input_img]})
I compiled tensorflow from source with MKL in order to accelerate my DNN learning progress. And I have a ResNet model which is copy from tensorflow/models. The dataset is CIFAR-10. When I run the model with channels last format, everything goes ok. But in order to use MKL which is said only accelerate only for channels first format, I add some code to transpose the data into NCHW format, and run it. Then I get:
Caused by op 'stage/residual_v1/conv2d/Conv2D', defined at:
File "main.py", line 182, in <module>
main(args)
File "main.py", line 83, in main
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
File "/home/holmescn/.pyenv/versions/anaconda35.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/training.py", line 447, in train_and_evaluate
return executor.run()
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/training.py", line 531, in run
return self.run_local()
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/training.py", line 669, in run_local
hooks=train_hooks)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 366, in train
loss = self._train_model(input_fn, hooks, saving_listeners)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 1119, in _train_model
return self._train_model_default(input_fn, hooks, saving_listeners)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 1132, in _train_model_default
features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 1107, in _call_model_fn
model_fn_results = self._model_fn(features=features, **kwargs)
File "/home/holmescn/Work/deep-learning-practice/tensorflow/estimator/utils.py", line 18, in _model_fn
logits = build_model(input_layer, mode == tf.estimator.ModeKeys.TRAIN, params=params, args=args)
File "/home/holmescn/Work/deep-learning-practice/tensorflow/estimator/estimators/resnet.py", line 175, in build_model
return resnet.build_model(input_layer, args.num_layers)
File "/home/holmescn/Work/deep-learning-practice/tensorflow/estimator/estimators/resnet.py", line 56, in build_model
x = res_func(x, 3, filters[i], filters[i + 1], strides[i])
File "/home/holmescn/Work/deep-learning-practice/tensorflow/estimator/estimators/resnet.py", line 79, in _residual_v1
x = self._conv(x, kernel_size, out_filter, stride)
File "/home/holmescn/Work/deep-learning-practice/tensorflow/estimator/estimators/base.py", line 59, in _conv
name=name)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/layers/convolutional.py", line 427, in conv2d
return layer.apply(inputs)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py", line 759, in apply
return self.__call__(inputs, *args, **kwargs)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 329, in __call__
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py", line 688, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/keras/layers/convolutional.py", line 184, in call
outputs = self._convolution_op(inputs, self.kernel)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/nn_ops.py", line 868, in __call__
return self.conv_op(inp, filter)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/nn_ops.py", line 520, in __call__
return self.call(inp, filter)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/nn_ops.py", line 204, in __call__
name=self.name)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/ops/gen_nn_ops.py", line 956, in conv2d
data_format=data_format, dilations=dilations, name=name)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3414, in create_op
op_def=op_def)
File "/home/holmescn/.pyenv/versions/anaconda3-5.2.0/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1740, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): input and filter must have the same depth: 32 vs 16
[[Node: stage/residual_v1/conv2d/Conv2D = _MklConv2D[T=DT_FLOAT, _kernel="MklOp", data_format="NCHW", dilations=[1, 1, 1, 1], padding="SAME", strides=[1, 1, 1, 1], use_cudnn_on_gpu=true, _device="/job:localhost/replica:0/task:0/device:CPU:0"](Relu, conv2d/kernel/read, Relu:1, DMT/_6)]]
the last trackback said input and filter must have the same depth, which IMO means the depth dim for both input tensor and filter should be same? But how could I do that if I want to generate more feature maps? What should I do?
No need to transpose the data for changing the dataformat.
You can pass the data format as channels first or channels last as argument
For example,
python cifar10_main.py --data-dir=${PWD}/cifar-10-data --data-format=channels_first --job-dir=/tmp/cifar10
I have two dataset in TFRecords, one holds around 20,000 entries, other hold 1.2 million.
This code perfectly work when I use TFRecord with 20,000 entries but give out of range error when I use 1.2 million .
def parse(serialized):
features = \
{
'train/image': tf.FixedLenFeature([], tf.string),
'train/label': tf.FixedLenFeature([], tf.int64)
}
parsed_example = tf.parse_single_example(serialized=serialized,
features=features)
image_raw = parsed_example['train/image']
image = tf.decode_raw(image_raw, tf.uint8)
image = tf.cast(image, tf.float32)
label = parsed_example['train/label']
return image, label
def input_fn(filenames, train, batch_size=32, buffer_size=2048):
dataset = tf.data.TFRecordDataset(filenames=filenames)
dataset = dataset.map(parse)
if train:
dataset = dataset.shuffle(buffer_size=buffer_size)
num_repeat = None
else:
num_repeat = 1
dataset = dataset.repeat(num_repeat)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
images_batch, labels_batch = iterator.get_next()
x = {'image':images_batch}
y = labels_batch
return x, y
x,y = input_fn('train.tfrecords',False)
print(x)
with tf.Session() as sess:
for i in range(10):
print(sess.run(x))
The error is coming is this:
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1374, in _do_call
raise type(e)(node_def, op, message)
OutOfRangeError: End of sequence
[[Node: IteratorGetNext_2 = IteratorGetNext[output_shapes=[[?,?], [?]], output_types=[DT_FLOAT, DT_INT64], _device="/job:localhost/replica:0/task:0/device:CPU:0"](OneShotIterator_2)]]
Caused by op 'IteratorGetNext_2', defined at:
File "C:\ProgramData\Anaconda3\lib\site-packages\spyder\utils\ipython\start_kernel.py", line 268, in <module>
main()
File "C:\ProgramData\Anaconda3\lib\site-packages\spyder\utils\ipython\start_kernel.py", line 264, in main
kernel.start()
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelapp.py", line 478, in start
self.io_loop.start()
File "C:\ProgramData\Anaconda3\lib\site-packages\zmq\eventloop\ioloop.py", line 177, in start
super(ZMQIOLoop, self).start()
File "C:\ProgramData\Anaconda3\lib\site-packages\tornado\ioloop.py", line 888, in start
handler_func(fd_obj, events)
File "C:\ProgramData\Anaconda3\lib\site-packages\tornado\stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\zmq\eventloop\zmqstream.py", line 440, in _handle_events
self._handle_recv()
File "C:\ProgramData\Anaconda3\lib\site-packages\zmq\eventloop\zmqstream.py", line 472, in _handle_recv
self._run_callback(callback, msg)
File "C:\ProgramData\Anaconda3\lib\site-packages\zmq\eventloop\zmqstream.py", line 414, in _run_callback
callback(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\tornado\stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2728, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2856, in run_ast_nodes
if self.run_code(code, result):
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2910, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-3-23a4ed6f3a2e>", line 1, in <module>
runfile('C:/Users/kakus/Desktop/landmark/tfrecord_test_outputv2.py', wdir='C:/Users/kakus/Desktop/landmark')
File "C:\ProgramData\Anaconda3\lib\site-packages\spyder\utils\site\sitecustomize.py", line 705, in runfile
execfile(filename, namespace)
File "C:\ProgramData\Anaconda3\lib\site-packages\spyder\utils\site\sitecustomize.py", line 102, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/kakus/Desktop/landmark/tfrecord_test_outputv2.py", line 84, in <module>
x,y = input_fn('train.tfrecords',False)
File "C:/Users/kakus/Desktop/landmark/tfrecord_test_outputv2.py", line 76, in input_fn
images_batch, labels_batch = iterator.get_next()
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\data\ops\iterator_ops.py", line 330, in get_next
name=name)), self._output_types,
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_dataset_ops.py", line 895, in iterator_get_next
output_shapes=output_shapes, name=name)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 3271, in create_op
op_def=op_def)
File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 1650, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
OutOfRangeError (see above for traceback): End of sequence
[[Node: IteratorGetNext_2 = IteratorGetNext[output_shapes=[[?,?], [?]], output_types=[DT_FLOAT, DT_INT64], _device="/job:localhost/replica:0/task:0/device:CPU:0"](OneShotIterator_2)]]
dataset.repeat(num_epochs) will repeat the data for the number of epochs specified. In the train mode you have specified it to be none, change it to the number of epochs you want to train the dataset.
I want to create a bidirectional RNN Encoder in
embedding_attention_seq2seq
in
seq2seq_model.py :
here is the code blew
def embedding_attention_seq2seq(encoder_inputs, decoder_inputs, cell,
num_encoder_symbols, num_decoder_symbols,
num_heads=1, output_projection=None,
feed_previous=False, dtype=dtypes.float32,
scope=None, initial_state_attention=False):
with variable_scope.variable_scope(scope or"embedding_attention_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(cell, num_encoder_symbols)
encoder_outputs, encoder_state = rnn.rnn(
encoder_cell, encoder_inputs, dtype=dtype)
# First calculate a concatenation of encoder outputs to put attention on.
top_states = [array_ops.reshape(e, [-1, 1, cell.output_size])
for e in encoder_outputs]
attention_states = array_ops.concat(1, top_states)
....
Here the code I changed.Borrowed from https://github.com/ematvey/tensorflow-seq2seq-tutorials/blob/master/2-seq2seq-advanced.ipynb
# Encoder.
encoder_cell = copy.deepcopy(cell)
encoder_cell = core_rnn_cell.EmbeddingWrapper(
encoder_cell,
embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
(encoder_outputs,
encoder_fw_final_state,
encoder_bw_final_state)=rnn.bidirectional_rnn(
cell_fw=encoder_cell,
cell_bw=encoder_cell,
encoder_inputs,
dtype=dtype)
encoder_final_state_c = tf.concat(
(encoder_fw_final_state.c, encoder_bw_final_state.c), 1)
encoder_final_state_h = tf.concat(
(encoder_fw_final_state.h, encoder_bw_final_state.h), 1)
encoder_state = LSTMStateTuple(
c=encoder_final_state_c,
h=encoder_final_state_h)
list of errors:
Traceback (most recent call last):
File "translate.py", line 301, in <module>
tf.app.run()
File "/home/tensorflow/anaconda3/envs/tf/lib/python3.4/site-packages/tensorflow/python/platform/app.py", line 43, in run
sys.exit(main(sys.argv[:1] + flags_passthrough))
File "translate.py", line 297, in main
train()
File "translate.py", line 156, in train
model = create_model(sess, False)
File "translate.py", line 134, in create_model
dtype=dtype)
File "/home/tensorflow/Downloads/NMT-jp-ch-master/seq2seq_model.py", line 185, in __init__
softmax_loss_function=softmax_loss_function)
File "/home/tensorflow/Downloads/NMT-jp-ch-master/seq2seq.py", line 628, in model_with_buckets
decoder_inputs[:bucket[1]])
File "/home/tensorflow/Downloads/NMT-jp-ch-master/seq2seq_model.py", line 184, in <lambda>
lambda x, y: seq2seq_f(x, y, False),
File "/home/tensorflow/Downloads/NMT-jp-ch-master/seq2seq_model.py", line 148, in seq2seq_f
dtype=dtype)
File "/home/tensorflow/Downloads/NMT-jp-ch-master/seq2seq.py", line 432, in embedding_attention_seq2seq
inputs=encoder_inputs
File "/home/tensorflow/anaconda3/envs/tf/lib/python3.4/site-packages/tensorflow/python/ops/rnn.py", line 652, in bidirectional_dynamic_rnn
time_major=time_major, scope=fw_scope)
File "/home/tensorflow/anaconda3/envs/tf/lib/python3.4/site-packages/tensorflow/python/ops/rnn.py", line 789, in dynamic_rnn
for input_ in flat_input)
File "/home/tensorflow/anaconda3/envs/tf/lib/python3.4/site-packages/tensorflow/python/ops/rnn.py", line 789, in <genexpr>
for input_ in flat_input)
File "/home/tensorflow/anaconda3/envs/tf/lib/python3.4/site-packages/tensorflow/python/ops/array_ops.py", line 1280, in transpose
ret = gen_array_ops.transpose(a, perm, name=name)
File "/home/tensorflow/anaconda3/envs/tf/lib/python3.4/site-packages/tensorflow/python/ops/gen_array_ops.py", line 3656, in transpose
result = _op_def_lib.apply_op("Transpose", x=x, perm=perm, name=name)
File "/home/tensorflow/anaconda3/envs/tf/lib/python3.4/site-packages/tensorflow/python/framework/op_def_library.py", line 759, in apply_op
op_def=op_def)
File "/home/tensorflow/anaconda3/envs/tf/lib/python3.4/site-packages/tensorflow/python/framework/ops.py", line 2242, in create_op
set_shapes_for_outputs(ret)
File "/home/tensorflow/anaconda3/envs/tf/lib/python3.4/site-packages/tensorflow/python/framework/ops.py", line 1617, in set_shapes_for_outputs
shapes = shape_func(op)
File "/home/tensorflow/anaconda3/envs/tf/lib/python3.4/site-packages/tensorflow/python/framework/ops.py", line 1568, in call_with_requiring
return call_cpp_shape_fn(op, require_shape_fn=True)
File "/home/tensorflow/anaconda3/envs/tf/lib/python3.4/site-packages/tensorflow/python/framework/common_shapes.py", line 610, in call_cpp_shape_fn
debug_python_shape_fn, require_shape_fn)
File "/home/tensorflow/anaconda3/envs/tf/lib/python3.4/site-packages/tensorflow/python/framework/common_shapes.py", line 675, in _call_cpp_shape_fn_impl
raise ValueError(err.message)
ValueError: Dimension must be 1 but is 3 for 'model_with_buckets/embedding_attention_seq2seq/BiRNN/FW/transpose' (op: 'Transpose') with input shapes: [?], [3].
I use py3.4 and tf-v0.12
How to create the proper bidirectional RNN Encoder with the https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/ops/rnn.py in seq2seq model ?
Thank you in advance.
The problem solved by
top_states = [array_ops.reshape(e, [-1, 1, cell.output_size*2])
Yes,reshape should *2 .