Cannot take the length of Shape with unknown rank - python

I have a neural network, from a tf.data data generator and a tf.keras model, as follows (a simplified version-because it would be too long):
dataset = ...
A tf.data.Dataset object that with the next_x method calls the get_next for the x_train iterator and for the next_y method calls the get_next for the y_train iterator. Each label is a (1, 67) array in one-hot form.
Layers:
input_tensor = tf.keras.layers.Input(shape=(240, 240, 3)) # dim of x
output = tf.keras.layers.Flatten()(input_tensor)
output= tf.keras.Dense(67, activation='softmax')(output) # 67 is the number of classes
Model:
model = tf.keras.models.Model(inputs=input_tensor, outputs=prediction)
model.compile(optimizer=tf.train.AdamOptimizer(), loss=tf.losses.softmax_cross_entropy, metrics=['accuracy'])
model.fit_generator(gen(dataset.next_x(), dataset.next_y()), steps_per_epochs=100)
gen is defined like this:
def gen(x, y):
while True:
yield(x, y)
My problem is that when I try to run it, I get an error in the model.fit part:
ValueError: Cannot take the length of Shape with unknown rank.
Any ideas are appreciated!

Could you post a longer stack-trace? I think your problem might be related to this recent tensorflow issue:
https://github.com/tensorflow/tensorflow/issues/24520
There's also a simple PR that fixes it (not yet merged). Maybe try it out yourself?
EDIT
Here is the PR:
open tensorflow/python/keras/engine/training_utils.py
replace the following (line 232 at the moment):
if (x.shape is not None
and len(x.shape) == 1
with this:
if tensor_util.is_tensor(x):
x_shape_ndims = x.shape.ndims if x.shape is not None else None
else:
x_shape_ndims = len(x.shape)
if (x_shape_ndims == 1

I found out what was wrong. Actually I have to run next batch in a tf.Session before yielding it.
Here is how it works (I don't write the rest of the code, since it stays the same):
model.fit_generator(gen(), steps_per_epochs=100)
def gen():
with tf.Session() as sess:
next_x = dataset.next_x()
next_y = dataset.next_y()
while True:
x_batch = sess.run(next_x)
y_batch = sess.run(next_y)
yield x_batch, y_batch

For the issue Cannot take the length of Shape with unknown rank,
Thanks to above answer, I solved by add output_shape to from_generator according to this issue comment.
In my case, I was using Dataset.from_generator for dataset pipeline.
Before:
Dataset.from_generator(_generator_factory,
output_types=(tf.float32, tf.int8))
Working code for me:
Dataset.from_generator(_generator_factory,
output_types = (tf.float32, tf.int8),
output_shapes = (
tf.TensorShape([2, 224, 224, 3]),
tf.TensorShape([1,])
))
Also found this dataset official guide from tensorflow indicates that:
...
The output_shapes argument is not required but is highly recomended as many tensorflow operations do not support tensors with unknown rank. If the length of a particular axis is unknown or variable, set it as None in the output_shapes.
...

Related

Keras, data adapter that can handle input: <class 'function'>, <class 'NoneType'> " in Batch Training

I am trying to batch-train my model as my dataset is quite large. However when calling
autoencoder_train = autoencoder.fit(my_training_batch_generator,
steps_per_epoch=steps_per_epoch,
epochs=nb_epoch,
verbose=1,
validation_data=my_testing_batch_generator,
validation_steps=validation_steps)
I get the following error:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/data_adapter.py in select_data_adapter(x, y)
962 "Failed to find data adapter that can handle "
963 "input: {}, {}".format(
--> 964 _type_name(x), _type_name(y)))
965 elif len(adapter_cls) > 1:
966 raise RuntimeError(
ValueError: Failed to find data adapter that can handle input: <class 'function'>, <class 'NoneType'>
The functions my_training_batch_generator and my_testing_batch_generator are defined identically as:
def my_training_batch_generator(Train_df,batch_size,
steps):
idx=1
while True:
yield load_train_data(Train_df,idx-1,batch_size)## Yields data
if idx<steps:
idx+=1
else:
idx=1
dataDir = "/..."
def load_train_data(Train_df,idx,
batch_size):
i = 1
x = np.zeros([batch_size, 100, 100, 100, 3])
for n in range(idx*batch_size, idx*batch_size + batch_size):
data = loadmat( Train_df+'volume'+str(n))
x[i] = np.array(data['tensor'])
i = i + 1
return (np.asarray(x),np.asarray(x))
so I am quite sure that the generator function passes numpy arrays to the autoencoder, hence I don't understand why the data adaptor can't handle input? I am new to batch training and the tutorial I followed (here) was meant for a classification task, while here I am using it on image to image regression via autoencoder. Any help would be very much appreciated!
i can't reproduce the problem so i will share what i do for generator training.
First thing, i suggest you to try and print the output of the generator outside the training loop. Check that the shape matches the input to your model.
Second thing you are passing a function object to the fit method. I don't know if that syntax will ever work (and in fact keras complains about the "fucntion" type.
Hoping that this could be useful i will share what does work for me (batch size of 1) (tf 2.0)
def generate_data():
i = -1
while True:
i += 1
if i == len(x_train): i = 0
#print(x_train[i], y_train[i])
#print(x_train[i].shape, y_train[i].shape)
yield x_train[i], y_train[i]
def generate_val():
i = -1
while True:
i += 1
if i == len(x_test): i = 0
#print(x_test[i], y_test[i])
#print(x_test[i].shape, y_test[i].shape)
yield x_test[i], y_test[i]
#....model definition and so on ...
history = model.fit(generate_data(), steps_per_epoch=len(x_train), epochs=100,
callbacks = [callback],class_weight={0:4, 1:1},
validation_data=generate_val(), validation_steps=len(x_test))
The issues were two:
Most importantly, I forgot to define the generator object before defining the model as follows:
my_training_batch_generator = batch_generator(Train_df, 256, steps_per_epoch)
my_testing_batch_generator = batch_generator(Test_df, 256, validation_steps)
Reason for which 2) I found myself with two generator and load functions (one for training and one for testing) and why I got the error 'impossible to handle the class "function"': I was passing a function to the autoencoder rather than a generator object.

Keras custom data generator giving dimension errors with multi input and multi output( functional api model)

I have written a generator function with Keras, before returning X,y from __getitem__ I have double check the shapes of the X's and Y's and they are alright, but generator is giving dimension mismatch array and warnings.
(Colab Code to reproduce: https://colab.research.google.com/drive/1bSJm44MMDCWDU8IrG2GXKBvXNHCuY70G?usp=sharing)
My training and validation generators are pretty much same as
class ValidGenerator(Sequence):
def __init__(self, df, batch_size=64):
self.batch_size = batch_size
self.df = df
self.indices = self.df.index.tolist()
self.num_classes = num_classes
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
return int(len(self.indices) // self.batch_size)
def __getitem__(self, index):
index = self.index[index * self.batch_size:(index + 1) * self.batch_size]
batch = [self.indices[k] for k in index]
X, y = self.__get_data(batch)
return X, y
def on_epoch_end(self):
self.index = np.arange(len(self.indices))
if self.shuffle == True:
np.random.shuffle(self.index)
def __get_data(self, batch):
#some logic is written here
#hat prepares 3 X features and 3 Y outputs
X = [input_array_1,input_array_2,input_array_3]
y = [out_1,out_2,out_3]
#print(len(X))
return X, y
I am return tupple of X,y from which has 3 input features and 3 output features each, so shape of X is (3,32,10,1)
I am using functional api to build model(I have things like concatenation, multi input/output, which isnt possible with sequential) with following structure
When I try to fit the model with generator with following code
train_datagen = TrainGenerator(df=train_df, batch_size=32, num_classes=None, shuffle=True)
valid_datagen = ValidGenerator(df=train_df, batch_size=32, num_classes=None, shuffle=True)
model.fit(train_datagen, epochs=2,verbose=1,callbacks=[checkpoint,es])
I get these warnings and errors, that dont go away
Epoch 1/2
WARNING:tensorflow:Model was constructed with shape (None, 10) for input >Tensor("input_1:0", shape=(None, 10), dtype=float32), but it was called >on an input with incompatible shape (None, None, None).
WARNING:tensorflow:Model was constructed with shape (None, 10) for input
Tensor("input_2:0", shape=(None, 10), dtype=float32), but it was
called on an input with incompatible shape (None, None, None).
WARNING:tensorflow:Model was constructed with shape (None, 10) for
input Tensor("input_3:0", shape=(None, 10), dtype=float32), but it was
called on an input with incompatible shape (None, None, None).
...
...
call
return super(RNN, self).call(inputs, **kwargs)
/home/eduardo/.virtualenvs/kgpu3/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:975
call
input_spec.assert_input_compatibility(self.input_spec, inputs,
/home/eduardo/.virtualenvs/kgpu3/lib/python3.8/site-packages/tensorflow/python/keras/engine/input_spec.py:176
assert_input_compatibility
raise ValueError('Input ' + str(input_index) + ' of layer ' +
ValueError: Input 0 of layer lstm is incompatible with the layer: expected ndim=3, found ndim=4. Full shape received: [None, None, None, 88]
I have rechecked whole code and it isnt possible to have input (None,None,None) like in warning or in error, my input dimension is (3,32,10,1)
Update
I have also tried to write a generator function with python and got exactly same error.
My generator function
def generate_arrays_from_file(batchsize,df):
#print(bat)
inputs = []
targets = []
batchcount = 0
while True:
df3 = df.loc[np.arange(batchcount*batchsize,(batchcount*batchsize)+batchsize)]
#Some pre processing
X = [input_array_1,input_array_2,input_array_3]
y = [out_1,out_2,out_3]
yield X,y
batchcount = batchcount +1
It seems like it is something wrong internally wit keras (may be due to the fact I am using functional API)
Update 2
I also tried to output tuple
X = (input1_X,input2_X,input3_X)
y = (output1_y,output2_y,output3_y)
and also named input/output, but it doesnt work
X = {"input_1": input1_X, "input_2": input2_X,"input_3": input3_X}
y = {"output_1": output1_y, "output_2": output2_y,"output_3": output3_y}
Note about problem formulation:
Changing the individual X features to shape (32,10) instead of (32,10,1) might help to get rid of this error but that is not what I want, it changes my problem(I no longer have 10 time steps with one feature each)
Keras use 'None' for dynamic dimensions.
As you can see on the model.summary() chart - the model expecting shape(None, 10) for all of your inputs, which is two dimensional. With batch dimension - you should feed three dimensional data to the model.
But you are feeding four dimensional data.
I would guess that your model doesn't split your input list by three inputs. Try to change your inputs to tuple:
X = (input_array_1,input_array_2,input_array_3)
In order to resolve this error:
ValueError: Input 0 of layer lstm is incompatible with the layer: expected ndim=3, found ndim=4. Full shape received: [None, None, None, 88]
TrainGenerator should be changed in the following way.
Current code:
input1_X = np.array(df3['input1_X'].to_list()).reshape(dlen,pad_len,1)
input2_X = np.array(df3['input2_X'].to_list()).reshape(dlen,pad_len,1)
input3_X = np.array(df3['input3_X'].to_list()).reshape(dlen,pad_len,1)
Should be changed to:
input1_X = np.array(df3['input1_X'].to_list()).reshape(dlen,pad_len)
input2_X = np.array(df3['input2_X'].to_list()).reshape(dlen,pad_len)
input3_X = np.array(df3['input3_X'].to_list()).reshape(dlen,pad_len)
The reason is that each of the 3 Inputs expects a 2-dimensional array, but the generator provides a 3-dimensional one. The expected shape is (batch_size, 10).
I had a similar issue with a custom generator that just had to pass a numpy array of size 10 as input and one single output.
To solve this problem i had to trasform the shape of the 2 vectors passed to the neural network like this:
def slides_generator(integer_list):
# stuff happens
x = np_ts[np_index:np_index+10] # numpy array
y = np_ts[np_index+10] # numpy array
yield tf.convert_to_tensor(x)[np.newaxis, ...], tf.convert_to_tensor(y)[np.newaxis, ...]
doge_gen = slides_generator(integer_list) #next(doge_gen)
basically you need to pass the 2 arrays with shape (None,size),
so in my case were (None,10) and (None,1), and to achieve this i just passed 2 reshaped tensors.
you need the None dimension as the batch size.

Keras: Trying to model.predict() gives "ValueError: Tensor's shape is not compatible with supplied shape"

I'm following the TensorFlow Keras tutorial for text generation. The training part works perfectly, but when I try to predict the next token, I get an error.
Here's all the important code:
Making the vocabulary and dataset.
vocab = sorted(set(text))
char2index = { c:i for i, c in enumerate(vocab) }
index2char = np.array(vocab)
chars_to_int = np.array([char2index[c] for c in text])
char_dataset = tf.data.Dataset.from_tensor_slices(chars_to_int)
sequences = char_dataset.batch(seq_length + 1, drop_remainder=True)
def split_input_and_target(sequence):
input_ = sequence[:-1]
target_ = sequence[1:]
return input_, target_
dataset = sequences.map(split_input_and_target)
dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
Building the model
(important part here is that BATCH_SIZE = 64):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Embedding(len(vocab), EMBEDDING_DIM,
batch_input_shape=[BATCH_SIZE, None]))
# here are a few more layers
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
model.fit(dataset, epochs=EPOCHS)
Actually trying to generate text (this one was copied almost directly from the tutorial after I started getting desperate):
num_tokens = 100
seed = "some text"
input_eval = [char2index[c] for c in seed]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = []
model.reset_states()
for i in range(num_tokens):
predictions = model(input_eval)
predictions = tf.squeeze(predictions, 0)
# more stuff
Then, I first get a warning:
WARNING:tensorflow:Model was constructed with shape (64, None) for input Tensor("embedding_14_input:0", shape=(64, None), dtype=float32), but it was called on an input with incompatible shape (1, 9).
Then it gives me an error:
---->3 predictions = model(input_eval)
...
ValueError: Tensor's shape (9, 64, 256) is not compatible with supplied shape [9, 1, 256]
The second number, 64, is my batch size. If I change BATCH_SIZE to 1, everything works and all is fine, but this is obviously not the solution I am hoping for.
(I somehow managed to miss a step in the tutorial despite reading it several times over the past few hours.)
Here's the relevant passage:
To keep this prediction step simple, use a batch size of 1.
Because of the way the RNN state is passed from timestep to timestep, the model only accepts a fixed batch size once built.
To run the model with a different batch_size, we need to rebuild the model and restore the weights from the checkpoint.
tf.train.latest_checkpoint(checkpoint_dir)
model = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1)
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))
model.build(tf.TensorShape([1, None]))
I hope my silly mistake will help somebody to remember to reload the model in the future!

Variable batch_size in call function

I am trying to implement an attention network with TensorFlow 2. Thus, for every image, I want to take only some glimpses, i.e. a small part from the image. For this I have implemented a subclass from tensorflow.keras.models.Model, here is a snippet out of it.
class RecurrentAttentionModel(models.Model):
# ...
def call(self, inputs):
l = tf.random.uniform((40,2,), minval=0, maxval=1)
for _ in range(0, self.glimpses):
glimpse = tf.image.extract_glimpse(inputs, size=(self.retina_size, self.retina_size), offsets=l, centered=False, normalized=True)
# some other code...
# update l to take a glimpse somewhere else
return result
Now, the code above works and trains perfectly, but my issue is, that I have the hardcoded 40 in it, the batch_size which I have defined in my dataset. I am not able to read/get the batch_size in the call method since the variable "inputs" is of the form Tensor("input_1_77:0", shape=(None, 250, 500, 1), dtype=float32) where the None for the batch_size seems to be expected behavior.
When I just initialize l with the following code (without the batch_size)
l = tf.random.uniform((2,), minval=0, maxval=1)
it throws this error
ValueError: Shape must be rank 2 but is rank 1 for 'recurrent_attention_model_86/ExtractGlimpse' (op: 'ExtractGlimpse') with input shapes: [?,250,500,1], [2], [2]
what I totally understand but I have no idea how I could implement the initial values according to the batch_size.
You can extract the batch size dimension dynamically by using tf.shape.
l = tf.random.normal(tf.stack([tf.shape(inputs)[0], 2]), minval=0, maxval=1))

How to change dimension of input during TensorFlow import_graph_def

My scenario:
Define an RNN model structure and train it using an input with fixed batch size and sequence length.
Freeze the model (i.e. converting all trainable variables into constants) producing a GraphDef containing everything one needs to use the model at test-time (via tf.graph_util.convert_variables_to_constants).
Import the GraphDef via tf.import_graph_def and replace the input using the input_map argument. The new input needs to have arbitrary batch size and sequence length.
The problem: All of the above works until I pass in an input to the test-time graph that uses a batch size or sequence length that differs from the original sizes used at training-time. At that point I get an error like this:
InvalidArgumentError (see above for traceback): ConcatOp : Dimensions of inputs should match: shape[0] = [1,5] vs. shape[1] = [2,7]
[[Node: import/rnn/while/basic_rnn_cell/basic_rnn_cell_1/concat = ConcatV2[N=2, T=DT_FLOAT, Tidx=DT_INT32, _device="/job:localhost/replica:0/task:0/cpu:0"](import/rnn/while/TensorArrayReadV3, import/rnn/while/Identity_2, import/rnn/while/basic_rnn_cell/basic_rnn_cell_1/concat/axis)]]
To illustrate and reproduce the problem, please consider the following minimal examples.
v1: a graph is created with arbitrary batch size and sequence length. This works fine but unfortunately I must use a fixed batch size and sequence length at training-time and must use an arbitrary batch size and sequence length at test-time so I can't use this simple approach.
v2a: we simulate creating the training-time graph with fixed batch size (2) and sequence length (3) and freeze the graph.
v2ba: we demonstrate that loading the frozen model in unchanged still produces the same results.
v2bb: we demonstrate that loading the frozen model in with a replaced input that still uses a fixed batch size and sequence length still produces the same results.
v2bc: we demonstrate that loading the frozen model in with a replaced input that uses arbitrary batch size and sequence length still produces the same results, as long as the input is shaped according to the original batch size and sequence length. It works with data but fails with data2 -- the only difference being the batch size of the former is 2 and the batch size of the latter is 1.
Is it possible to change an RNN graph via the input_map argument to tf.import_graph_def such that the input no longer has a fixed batch size and sequence length?
The following code works with TensorFlow 1.1 RC2 and may work with TensorFlow 1.0.
import numpy
import tensorflow as tf
from tensorflow import graph_util as tf_graph_util
from tensorflow.contrib import rnn as tfc_rnn
def v1(data):
with tf.Graph().as_default():
tf.set_random_seed(1)
x = tf.placeholder(tf.float32, shape=(None, None, 5))
_, s = tf.nn.dynamic_rnn(tfc_rnn.BasicRNNCell(7), x, dtype=tf.float32)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
print session.run(s, feed_dict={x: data})
def v2a():
with tf.Graph().as_default():
tf.set_random_seed(1)
x = tf.placeholder(tf.float32, shape=(2, 3, 5), name="x")
_, s = tf.nn.dynamic_rnn(tfc_rnn.BasicRNNCell(7), x, dtype=tf.float32)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
return tf_graph_util.convert_variables_to_constants(
session, session.graph_def, [s.op.name]), s.name
def v2ba((graph_def, s_name), data):
with tf.Graph().as_default():
x, s = tf.import_graph_def(graph_def,
return_elements=["x:0", s_name])
with tf.Session() as session:
print '2ba', session.run(s, feed_dict={x: data})
def v2bb((graph_def, s_name), data):
with tf.Graph().as_default():
x = tf.placeholder(tf.float32, shape=(2, 3, 5))
[s] = tf.import_graph_def(graph_def, input_map={"x:0": x},
return_elements=[s_name])
with tf.Session() as session:
print '2bb', session.run(s, feed_dict={x: data})
def v2bc((graph_def, s_name), data):
with tf.Graph().as_default():
x = tf.placeholder(tf.float32, shape=(None, None, 5))
[s] = tf.import_graph_def(graph_def, input_map={"x:0": x},
return_elements=[s_name])
with tf.Session() as session:
print '2bc', session.run(s, feed_dict={x: data})
def main():
data1 = numpy.random.random_sample((2, 3, 5))
data2 = numpy.random.random_sample((1, 3, 5))
v1(data1)
model = v2a()
v2ba(model, data1)
v2bb(model, data1)
v2bc(model, data1)
v2bc(model, data2)
if __name__ == "__main__":
main()
This is a bug in tensorflow that has been going on for a while: you cannot reliably replace a placeholder with a defined shape with another one with (partially) undefined shape.
You will find a related issue filed here, which apparently did not get much attention.

Categories

Resources