MultiClass Object Detection and Classification using Fast R-CNN - python

I'm trying to make model (VGG-16) that uses Fast R-CNN for object detection.
In short, I want to find object on image and put bounding box where object is.
I already tried multiple way's of getting that, but all the time I'm getting some error's, basically most of them are with RoiPoolingLayer and loss function's.
Can you guys guide what I'm doing wrong?
So let me introduce you:
This is my code atm:
import pickle
import numpy
import tensorflow
from keras import Input, Model
from keras.initializers.initializers_v1 import RandomNormal
from keras.layers import Flatten, TimeDistributed, Dense, Dropout
from sklearn.preprocessing import LabelBinarizer
from tensorflow.keras.optimizers import Adam
from tensorflow.python.keras.regularizers import l2
from data import get_data, get_train_data
from rcnn.config import Config
import tensorflow as tf
from tensorflow.keras.layers import Layer
class RoiPoolingConv(Layer):
def __init__(self, pool_size, **kwargs):
self.pool_size = pool_size
super(RoiPoolingConv, self).__init__(**kwargs)
def build(self, input_shape):
self.nb_channels = input_shape[0][3]
super(RoiPoolingConv, self).build(input_shape)
def compute_output_shape(self, input_shape):
return None, None, self.pool_size, self.pool_size, self.nb_channels
def crop_and_resize(self, image, boxes):
box_ind = tf.range(tf.shape(boxes)[0])
box_ind = tf.reshape(box_ind, (-1, 1))
box_ind = tf.tile(box_ind, [1, tf.shape(boxes)[1]])
boxes = tf.keras.backend.cast(
tf.reshape(boxes, (-1, 4)), "float32"
)
box_ind = tf.reshape(box_ind, (1, -1))[0]
result = tf.image.crop_and_resize(image, boxes, box_ind, [self.pool_size, self.pool_size])
result = tf.reshape(result, (tf.shape(image)[0], -1, self.pool_size, self.pool_size, self.nb_channels))
return result
def call(self, x, mask=None):
assert (len(x) == 2)
img = x[0]
rois = x[1]
print(x)
print(img)
print(rois)
x1 = rois[:, 0]
y1 = rois[:, 1]
x2 = rois[:, 2]
y2 = rois[:, 3]
boxes = tf.stack([y1, x1, y2, x2], axis=-1)
print(boxes)
rs = self.crop_and_resize(img, boxes)
print(rs)
return rs
def get_config(self):
config = {'pool_size': self.pool_size}
base_config = super(RoiPoolingConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
PROPERTIES = Config()
def prepare_model(
model_path="model\\FastRCNN.h5"
):
roi_input = Input(shape=(None, 4), name="input_2")
model_cnn = tensorflow.keras.applications.VGG16(
include_top=True,
weights='imagenet'
)
model_cnn.trainable = True
x = model_cnn.layers[17].output
x = RoiPoolingConv(7)([x, roi_input])
x = TimeDistributed(Flatten())(x)
softmaxhead = Dense(4096, activation='relu', kernel_initializer=RandomNormal(stddev=0.01), kernel_regularizer=l2(0.0005), bias_regularizer=l2(0.0005))(x)
softmaxhead = Dropout(0.5)(softmaxhead)
softmaxhead = Dense(4096, activation='relu', kernel_initializer=RandomNormal(stddev=0.01), kernel_regularizer=l2(0.0005), bias_regularizer=l2(0.0005))(softmaxhead)
softmaxhead = Dropout(0.5)(softmaxhead)
softmaxhead = Dense(20, activation='softmax', kernel_initializer='zero', name='class_label')(softmaxhead)
bboxhead = Dense(128, activation='relu')(x)
bboxhead = Dense(64, activation='relu')(bboxhead)
bboxhead = Dense(32, activation='relu')(bboxhead)
bboxhead = Dense(4, activation='sigmoid', name='bounding_box')(bboxhead)
model_final = Model(inputs=[model_cnn.input, roi_input], outputs=(bboxhead, softmaxhead))
opt = Adam(learning_rate=0.0001)
losses = {
"class_label": PROPERTIES.CLASS_LABEL_LOSSES,
"bounding_box": PROPERTIES.BOUNDING_BOX_LOSSES
}
lossWeights = {
"class_label": PROPERTIES.LOSS_WEIGHTS,
"bounding_box": PROPERTIES.LOSS_WEIGHTS
}
model_final.compile(
loss=losses,
optimizer=opt,
metrics=["accuracy"],
loss_weights=lossWeights
)
tensorflow.keras.utils.plot_model(
model_final,
"model.png",
show_shapes=True,
show_layer_names=False,
rankdir='TB'
)
model_final.save(model_path)
return model_final
def train_RCNN_VGG(path):
# get voc data
all_data, classes_count, class_mapping = get_data(path)
tr_images, tr_labels_rois, tr_bboxes_rois, tr_bboxes_gt = get_train_data(all_data)
#val_images, val_labels, val_bboxes = get_validation_data(all_data)
# delete unnecessary data
del classes_count
del class_mapping
del all_data
# convert to numpy array
tr_images = numpy.array(tr_images, dtype="float32")
tr_bboxes_rois = numpy.array(tr_bboxes_rois, dtype="float32")
tr_bboxes_gt = numpy.array(tr_bboxes_gt, dtype="float32")
tr_labels_rois = numpy.array(tr_labels_rois)
print(tr_images.shape)
print(tr_bboxes_rois.shape)
print(tr_bboxes_gt.shape)
print(tr_labels_rois.shape)
# same for validation data
#val_images = numpy.array(val_images, dtype="float32")
#val_bboxes = numpy.array(val_bboxes, dtype="float32")
#val_labels = numpy.array(val_labels)
# use label binarizer for signing which class/label if for image
labelBinarizer = LabelBinarizer()
tr_labels_rois = labelBinarizer.fit_transform(tr_labels_rois)
#val_labels = labelBinarizer.fit_transform(val_labels)
classes = len(labelBinarizer.classes_)
# load model, provide number of classes
#model_vgg = load_model_or_construct(classes)
model_vgg = prepare_model()
# define a dictionary to set the loss methods
losses = {
"class_label": PROPERTIES.CLASS_LABEL_LOSSES,
"bounding_box": PROPERTIES.BOUNDING_BOX_LOSSES
}
# define a dictionary that specifies the weights per loss
lossWeights = {
"class_label": PROPERTIES.LOSS_WEIGHTS,
"bounding_box": PROPERTIES.LOSS_WEIGHTS
}
# initialize the optimizer, compile the model, and show the model
opt = Adam(learning_rate=PROPERTIES.LEARNING_RATE)
model_vgg.compile(loss=losses, optimizer=opt, metrics=["accuracy"], loss_weights=lossWeights)
# construct a dictionary for our target training outputs, for our target testing
trainTargets = {
"class_label": tr_labels_rois,
"bounding_box": tr_bboxes_gt
}
#validationTargets = {
# "class_label": val_labels,
# "bounding_box": val_bboxes
#}
# train the network for bounding box regression and class label
H = model_vgg.fit(
[tr_images, tr_bboxes_rois], trainTargets,
# validation_data=(val_images, validationTargets),
batch_size=PROPERTIES.BATCH_SIZE,
epochs=PROPERTIES.EPOCHS,
verbose=PROPERTIES.VERBOSE)
# save model, print summary
model_vgg.save(PROPERTIES.RCNN_MODEL_NAME, save_format=PROPERTIES.RCNN_MODEL_FORMAT)
model_vgg.summary()
# save binarizer
f = open(PROPERTIES.BINARIZER_NAME, "wb")
f.write(pickle.dumps(labelBinarizer))
f.close()
if __name__ == '__main__':
# load rcnn
train_RCNN_VGG(PROPERTIES.DATASET_PATH)
I'm creating RoiPooling Layer, VGG-16 architecture, loading pre-trained weights, making my own output layers, cause I have 20 classes (basing on VOC Data from 2012) that's why first output has 20, second has 4 - cause of bounding box's coordinates.
In train method, you can see I'm printing shape's of data I'm delivering, they are:
(1048, 224, 224, 3)
(1048, 4)
(1048, 4)
(1048,)
First one, it's 1048 images of 224x224 rgb
Second, it's 1048 rois coordinates prepared for 224x224
Third, it's 1048 ground truth's bboxes
Fourth, it's 1048 times 20 labels. Label's are like this: [[0, 0, 0, 0, 0, 0, ... 1, 0, 0,](19's zeros, and one 1 - correct label), [0, ....]]
I was basing on this: https://www.pyimagesearch.com/2020/10/12/multi-class-object-detection-and-bounding-box-regression-with-keras-tensorflow-and-deep-learning/
Currently I have this error:
Traceback (most recent call last):
File "C:\Users\Karol\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\Karol\anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py", line 1129, in autograph_handler
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
File "C:\Users\Karol\anaconda3\lib\site-packages\keras\engine\training.py", line 878, in train_function *
return step_function(self, iterator)
File "C:\Users\Karol\anaconda3\lib\site-packages\keras\engine\training.py", line 867, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\Karol\anaconda3\lib\site-packages\keras\engine\training.py", line 860, in run_step **
outputs = model.train_step(data)
File "C:\Users\Karol\anaconda3\lib\site-packages\keras\engine\training.py", line 809, in train_step
loss = self.compiled_loss(
File "C:\Users\Karol\anaconda3\lib\site-packages\keras\engine\compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "C:\Users\Karol\anaconda3\lib\site-packages\keras\losses.py", line 141, in __call__
losses = call_fn(y_true, y_pred)
File "C:\Users\Karol\anaconda3\lib\site-packages\keras\losses.py", line 245, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "C:\Users\Karol\anaconda3\lib\site-packages\keras\losses.py", line 1664, in categorical_crossentropy
return backend.categorical_crossentropy(
File "C:\Users\Karol\anaconda3\lib\site-packages\keras\backend.py", line 4994, in categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
ValueError: Shapes (None, 20) and (None, None, 20) are incompatible
python-BaseException
So, my question is: What am I missing, is my preprocessing-data incorrect? I'm trying to teach my model recognition 20 classes and pointing where on image this object probably is. But I have to make wrong data delivering I guess.
Just to make something clear, I'm using categorical cross entropy and mean average precision for "class label" and "bounding boxes".
Maybe I'm just using wrong loss function's?
Please help.

Try to use the loss tf.keras.losses.SparseCategoricalCrossEntropy instead, and make sure you have labels in one hot encoding format, for the reasons pointed here:
Getting a ValueError in tensorflow saying that my shapes are incompatible

The explanation of RoiPooligLayer said that
Shape of inputs must be:
[(batch_size, pooled_height, pooled_width, n_channels), for featur map
and (batch_size, num_rois, 4)] for region of interest
but in your work you did not add the batch_size dimension
try with this:
model_cnn.trainable = True
x = model_cnn.layers[17].output
x = np.expand_dims(x, axis=0)
x = RoiPoolingConv(7)([x, roi_input])
x = TimeDistributed(Flatten())(x)

Related

"Got OperatorNotAllowedInGraphError: iterating over tf.Tensor" while not obviously iterating over tensor

OS: Manjaro Linux x64
CUDA: 11.0.3
TF/Keras: 2.4(.1)
Py: 3.8
Hello,
I'm trying to build some kind of W-VAE-GAN. I keep un running into that very same error over and over again and I already had that problem with Keras 2.3, interestingly NOT using TF/K 2.2. Unfortunately I need to use Keras 2.4 because I'm supposed to run my code on our university server with these exact TF/K and CUDA-Versions. At this point I'm just trying to make sure my code works as intended.
The error I get is the following, with the exact lines commented in my code:
...
Epoch 1/20
Traceback (most recent call last):
File "/home/peer/Programmierkram/BA/Metal_GAN/wvaegan.py", line 282, in <module>
gan.fit(gen_trans, batch_size=batch_size, epochs=epochs, callbacks=[callback])
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py", line 1100, in fit
tmp_logs = self.train_function(iterator)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 828, in __call__
result = self._call(*args, **kwds)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 871, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 725, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 2969, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 3361, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 3196, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 990, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 634, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File "/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 977, in wrapper
raise e.ag_error_metadata.to_exception(e)
tensorflow.python.framework.errors_impl.OperatorNotAllowedInGraphError: in user code:
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:805 train_function *
return step_function(self, iterator)
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:795 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:1259 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2730 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:3417 _call_for_each_replica
return fn(*args, **kwargs)
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:788 run_step **
outputs = model.train_step(data)
/home/peer/Programmierkram/BA/Metal_GAN/wvaegan.py:190 train_step
z_mean, z_log_var, z = self.encoder(clip_img) # <------ WHY NO ERROR HERE?
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:505 __iter__
self._disallow_iteration()
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:498 _disallow_iteration
self._disallow_when_autograph_enabled("iterating over `tf.Tensor`")
/home/peer/Programmierkram/BA/Metal_GAN/venv/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:474 _disallow_when_autograph_enabled
raise errors.OperatorNotAllowedInGraphError(
OperatorNotAllowedInGraphError: iterating over `tf.Tensor` is not allowed: AutoGraph did convert this function. This might indicate you are trying to use an unsupported feature.
What I do not understand is how it comes to this error in the first place, as I successfully executed the VAE-part using TF2.2 without any error like this, and, more importantly, there is no iteration over any tensor obvious to me. Even if I commented out the for-loop in my train_step the same error occurs few lines later in the same context. I have also tried decorating the def train_step() with #tf.function, yet nothing changed.
The code I used is the following:
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow.keras.backend as K
from keras.preprocessing.image import ImageDataGenerator
import itertools
import scipy.io
import matplotlib.pyplot as plt
import matplotlib.image as PIL
runOnGPU = 0
if runOnGPU==1:
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
path_clipped_train = os.path.join('./sinograms/clip')
path_transparent_train = os.path.join('./sinograms/transparent')
img_width, img_height = 512, 512
bottleneck = 1024 * 2
filters = (1024, 512, 256, 64)
filter_size = (3, 3, 3, 3)
batch_size = 4
epochs = 20
dsc_steps = 1
gp_w = 10.0
beta_v = 2
learning_rate = 125e-5
latent_dim = 2
input_shape = (1, img_width, img_height, 1)
dataset_gen1 = ImageDataGenerator(rescale=1 / 255, dtype="float32")
dataset_gen2 = ImageDataGenerator(rescale=1 / 255, dtype="float32")
gen_trans = dataset_gen1.flow_from_directory(path_transparent_train,
target_size=(img_width, img_height),
color_mode='grayscale',
classes=[''],
class_mode=None,
batch_size=batch_size,
shuffle=False,
)
gen_clip = dataset_gen2.flow_from_directory(path_clipped_train,
target_size=(img_width, img_height),
color_mode='grayscale',
classes=[''],
class_mode=None,
batch_size=batch_size,
shuffle=False,
)
class Sampling(layers.Layer):
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
def get_encoder():
encoder_inputs = keras.Input(shape=input_shape[1:], name="encoder_input")
enc = encoder_inputs
for (numFilters, szFilters) in zip(filters, filter_size):
enc = layers.Conv2D(numFilters, szFilters, activation='relu', strides=2, padding='same')(enc)
enc = layers.BatchNormalization()(enc)
enc = layers.Dropout(0.2)(enc)
conv_shape = K.int_shape(enc)[1:]
enc = layers.Flatten()(enc)
enc = layers.Dense(bottleneck, activation='relu', name="bottleneck")(enc)
enc = layers.BatchNormalization()(enc)
z_mean = layers.Dense(latent_dim, name="z_mean")(enc)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(enc)
latent_z = Sampling()([z_mean, z_log_var])
encoder_model = keras.models.Model(encoder_inputs, latent_z, name="encoder")
return encoder_model, conv_shape
enc_model, conv_shape = get_encoder()
enc_model.summary()
def get_decoder():
latent_input = keras.Input(shape=(latent_dim,))
dec = layers.Dense(conv_shape[0] * conv_shape[1] * conv_shape[2], activation='relu')(latent_input)
dec = layers.Reshape(conv_shape)(dec)
for (numFilters, szFilters) in zip(reversed(filters), reversed(filter_size)):
dec = layers.Conv2DTranspose(numFilters, szFilters, activation='relu', strides=2, padding='same')(dec)
dec = layers.BatchNormalization()(dec)
dec = layers.Dropout(0.2)(dec)
decoder_outputs = layers.Conv2DTranspose(1, 3, activation='relu', padding='same')(dec)
decoder_model = keras.models.Model(latent_input, decoder_outputs, name="decoder")
return decoder_model
dec_model = get_decoder()
dec_model.summary()
def get_discriminator():
dscr_input = keras.Input(shape=input_shape[1:])
dscr = dscr_input
for numFilters in filters:
dscr = layers.Conv2D(numFilters, kernel_size=5, activation='relu', strides=2, padding='same')(dscr)
dscr = layers.Flatten()(dscr)
dscr = layers.Dense(1, activation="relu", name="dsc_end")(dscr)
discriminator_model = keras.models.Model(dscr_input, dscr, name="discriminator")
return discriminator_model
dsc_model = get_discriminator()
dsc_model.summary()
class GAN(keras.Model):
def __init__(self,
discriminator,
encoder,
decoder,
latent_dim,
dsc_steps=dsc_steps,
gp_w=gp_w,
):
super(GAN, self).__init__()
self.discriminator = discriminator
self.encoder = encoder
self.decoder = decoder
self.latent_dim = latent_dim
self.dsc_steps = dsc_steps
self.gp_w = gp_w
def compile(self,
dsc_optimizer, enc_optimizer, dec_optimizer,
dsc_loss_fn, enc_loss_fn, dec_loss_fn):
super(GAN, self).compile()
self.dsc_optimizer = dsc_optimizer
self.enc_optimizer = enc_optimizer
self.dec_optimizer = dec_optimizer
self.dsc_loss_fn = dsc_loss_fn
self.enc_loss_fn = enc_loss_fn
self.dec_loss_fn = dec_loss_fn
def call(self, data):
ds = self.discriminator(data)
e = self.encoder(data)
d = self.decoder(e)
def gradient_penalty(self, batch_size, ref_img, gen_img):
alpha = tf.random_normal([batch_size, 1, 1, 1], 0.0, 1.0)
diff = gen_img - ref_img
interpolated = ref_img + alpha * diff
with tf.GradientTape() as gp_tape:
gp_tape.watch(interpolated)
pred = self.discriminator(interpolated, training=True)
grads = gp_tape.gradient(pred, [interpolated])[0]
norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2, 3]))
gp = tf.reduce_mean((norm - 1.0) ** 2)
return gp
#tf.function # doesn't make any difference if decorating with that
def train_step(self, data):
trans_img = data
clip_img = data
batch_size = tf.shape(trans_img)[:1]
for i in range(self.dsc_steps):
with tf.GradientTape() as tape:
z_mean, z_log_var, z = self.encoder(clip_img) # <------ ERROR HERE
gen_img = self.decoder(z)
gen_logits = self.discriminator(gen_img)
ref_logits = self.discriminator(trans_img)
dsc_cost = self.dsc_loss_fn(ref_img=ref_logits, gen_img=gen_logits)
gp = self.gradient_penalty(batch_size, trans_img, gen_img)
dsc_loss = dsc_cost + gp * self.gp_w
dsc_gradient = tape.gradient(dsc_loss, self.discriminator.trainable_variables)
self.dsc_optimizer.apply_gradients(zip(dsc_gradient, self.discriminator.trainable_variables))
with tf.GradientTape() as tape:
z_mean, z_log_var, z = self.encoder(clip_img) # <------ ERROR ALSO HERE IF dsc_steps = 0
gen_img = self.decoder(z)
gen_img_logits = self.discriminator(gen_img)
dec_loss = self.dec_loss_fn(gen_img_logits)
kl_loss = self.kl_loss(z_mean, z_log_var)
enc_gradient = tape.gradient(kl_loss, self.encoder.trainable_variables)
self.enc_optimizer.apply_gradients(zip(enc_gradient, self.encoder.trainable_variables))
dec_gradient = tape.gradient(dec_loss, self.decoder.trainable_variables)
self.dec_optimizer.apply_gradients(zip(dec_gradient, self.decoder.trainable_variables))
return {"dsc_loss": dsc_loss, "KL-Loss": kl_loss, "dec_loss": dec_loss}
class GANMonitor(keras.callbacks.Callback):
def __init__(self, num_img=6, latent_dim=latent_dim):
self.num_img = num_img
self.latent_dim = latent_dim
def on_epoch_end(self, epoch, logs=None):
generated_images = self.model.decoder()
generated_images = (generated_images * 127.5) + 127.5
for i in range(self.num_img):
img = generated_images[i].np()
img = keras.preprocessing.image.array_to_img(img)
img.save("generated_img_{i}_{epoch}.png".format(i=i, epoch=epoch))
encoder_optimizer = keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5, beta_2=0.9)
decoder_optimizer = keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5, beta_2=0.9)
discriminator_optimizer = keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5, beta_2=0.9)
def discriminator_loss(real_img, fake_img):
real_loss = tf.reduce_mean(real_img)
fake_loss = tf.reduce_mean(fake_img)
return fake_loss - real_loss
def generator_loss(fake_img):
return -tf.reduce_mean(fake_img)
def kl_loss(z_mean, z_log_var):
kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))
return beta_v * kl_loss
def reconstruction_loss(data, reconstruction):
rec_loss = tf.reduce_mean(
tf.reduce_sum(keras.losses.mse(data, reconstruction), axis=(1, 2))
)
return rec_loss
callback = GANMonitor(num_img=3, latent_dim=latent_dim)
gan = GAN(
discriminator=dsc_model,
encoder=enc_model,
decoder=dec_model,
latent_dim=latent_dim,
dsc_steps=dsc_steps,
)
gan.compile(
dsc_optimizer=discriminator_optimizer,
enc_optimizer=encoder_optimizer,
dec_optimizer=decoder_optimizer,
dsc_loss_fn=discriminator_loss,
enc_loss_fn=kl_loss,
dec_loss_fn=generator_loss,
)
gan.fit(gen_trans, batch_size=batch_size, epochs=epochs, callbacks=[callback])
I'd be very thankful for any help because I haven't been able to find or work any solution to this. I've read that in TF2.5 some errors like this shouldn't occur anymore, but using TF2.5 is not an option.
I found the problem.
In the original Keras example for an VAE, the encoder ends in three layers, namely z_mean, z_log_var and latent_z. While it was possible to access all terminal layers of a model in TF 2.2, as I did in my train_step
z_mean, z_log_var, z = encoder(data)
only (lantent_) z is committed as defined in the encoder model initialization.
By defining the model with
encoder_model = keras.models.Model(encoder_inputs, ([z_mean, z_log_var, latent_z]), name="encoder")
with a list of output layers, all z* are accessible.
I assume that getting multiple variables from a single tensor with a singular output
x1, x2, x3 = model(data)
results in a loop that might look something like:
for i in x:
x{i} = model(data)
which is the only explaination for an iteration over a tensor I can think of.
However, reading code might be useful, I'll try to remember that.

model(features) does not return EagerTensor

I have built a model which I can only train with a custom loss which I am trying to debug.
For this I have this simple loop here:
for (mel_specs, pred_inp), labels in train_dataset:
enc_predictions = model((mel_specs, pred_inp)) # <--- Returns a Tensor, not an EagerTensor
input_lengths = get_padded_length(mel_specs[:, :, 0])
label_lengths = get_padded_length(labels)
print(enc_predictions)
loss_value = rnnt_loss(enc_predictions, labels, input_lengths, label_lengths)
print(loss_value)
The model is just:
model = tf.keras.Model(
inputs=[mel_specs, pred_inp],
outputs=[outputs]
)
The problem is that model((mel_specs, pred_inp)) just gives me a regular Tensor but not a EagerTensor and I don't understand why. mel_specs and pred_inpu are EagerTensors coming from train_dataset which is a tf.data.Dataset.
What am I missing here?
Environment
$ pip freeze | grep tensorflow
tensorflow==2.2.0
tensorflow-addons==0.10.0
tensorflow-datasets==3.1.0
tensorflow-estimator==2.2.0
tensorflow-metadata==0.22.2
warprnnt-tensorflow==0.1
Update: MVCE
I was able to boil it down to the encoder part of the model. If I run this it will fail and print:
Calling model(x) didn't return EagerTensor
Traceback (most recent call last):
...
return loss_value, tape.gradient(loss_value, model.trainable_variables)
File "/home/sfalk/miniconda3/envs/asr2/lib/python3.8/site-packages/tensorflow/python/eager/backprop.py", line 1042, in gradient
flat_grad = imperative_grad.imperative_grad(
File "/home/sfalk/miniconda3/envs/asr2/lib/python3.8/site-packages/tensorflow/python/eager/imperative_grad.py", line 71, in imperative_grad
return pywrap_tfe.TFE_Py_TapeGradient(
File "/home/sfalk/miniconda3/envs/asr2/lib/python3.8/site-packages/tensorflow/python/eager/backprop.py", line 157, in _gradient_function
return grad_fn(mock_op, *out_grads)
File "/home/sfalk/miniconda3/envs/asr2/lib/python3.8/site-packages/tensorflow/python/ops/math_grad.py", line 252, in _MeanGrad
sum_grad = _SumGrad(op, grad)[0]
File "/home/sfalk/miniconda3/envs/asr2/lib/python3.8/site-packages/tensorflow/python/ops/math_grad.py", line 211, in _SumGrad
output_shape_kept_dims = math_ops.reduced_shape(input_shape,
File "/home/sfalk/miniconda3/envs/asr2/lib/python3.8/site-packages/tensorflow/python/ops/math_ops.py", line 3735, in reduced_shape
input_shape = input_shape.numpy()
AttributeError: 'Tensor' object has no attribute 'numpy'
The code:
import numpy as np
import tensorflow as tf
from tensorflow.python.framework.ops import EagerTensor
class TimeReduction(tf.keras.layers.Layer):
def __init__(self,
reduction_factor,
batch_size=None,
**kwargs):
super(TimeReduction, self).__init__(**kwargs)
self.reduction_factor = reduction_factor
self.batch_size = batch_size
def call(self, inputs):
input_shape = tf.shape(inputs)
batch_size = self.batch_size
if batch_size is None:
batch_size = input_shape[0]
max_time = input_shape[1]
num_units = inputs.get_shape().as_list()[-1]
outputs = inputs
paddings = [[0, 0], [0, tf.math.floormod(max_time, self.reduction_factor)], [0, 0]]
outputs = tf.pad(outputs, paddings)
return tf.reshape(outputs, (batch_size, -1, num_units * self.reduction_factor))
def make_encoder_model(
input_shape: tuple,
out_dim: int,
num_layers: int,
d_model: int,
proj_size,
initializer=None,
dtype=tf.float32,
stateful: bool = False,
dropout=0.5,
reduction_index=1,
reduction_factor=2,
):
def lstm_cell():
return tf.compat.v1.nn.rnn_cell.LSTMCell(
d_model,
num_proj=proj_size,
initializer=initializer,
dtype=dtype
)
batch_size = None if not stateful else 1
inputs = tf.keras.Input(
shape=input_shape,
batch_size=batch_size,
dtype=tf.float32
)
x = tf.keras.layers.BatchNormalization()(inputs)
for i in range(num_layers):
rnn_layer = tf.keras.layers.RNN(lstm_cell(), return_sequences=True, stateful=stateful)
x = rnn_layer(x)
x = tf.keras.layers.Dropout(dropout)(x)
x = tf.keras.layers.LayerNormalization(dtype=dtype)(x)
if i == reduction_index:
x = TimeReduction(reduction_factor, batch_size=batch_size)(x)
outputs = tf.keras.layers.Dense(out_dim)(x)
return tf.keras.Model(
inputs=[inputs],
outputs=[outputs],
name='encoder'
)
def gradient(model, loss, inputs, y_true):
y_true = tf.transpose(y_true, perm=(0, 2, 1))
with tf.GradientTape() as tape:
y_pred = model(inputs, training=True)
loss_value = loss(y_true=y_true, y_pred=y_pred)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
def main():
X, Y = [
np.random.rand(100, 512),
np.random.rand(100, 512)
], [[[0]*50], [[1]*50]]
# assert len(X) == len(Y)
encoder_model = make_encoder_model(
input_shape=(None, 512),
out_dim=1,
num_layers=2,
d_model=10,
proj_size=23,
dropout=0.5,
reduction_index=1,
reduction_factor=2
)
enc_dataset = tf.data.Dataset.from_generator(
lambda: zip(X, Y),
output_types=(tf.float32, tf.int32),
output_shapes=([None, 512], [None, None]),
).batch(2)
loss = tf.keras.losses.MeanSquaredError()
for x, y in enc_dataset:
from_predict = encoder_model.predict(x)
from_call = encoder_model(x)
if not isinstance(from_predict, np.ndarray):
print("Calling model.predict(x) didn't return np.ndarray")
if not isinstance(from_call, EagerTensor):
print("Calling model(x) didn't return EagerTensor")
loss_value, gradients = gradient(encoder_model, loss, x, y)
print(loss_value)
print(gradients)
print('All done.')
if __name__ == '__main__':
main()
Why do you use the LSTM cell from compat.v1? I would imagine this leads to compatibility issues.
Most importantly, those "pure Tensorflow" RNN cells are not made to be used with the keras RNN anyway -- they were used with tf.nn.dymanic_rnn for example, which is now deprecated and also found only in the compat.v1 module.
I would recommend that you simply use tf.keras.layers.LSTM directly as it's much faster anyway -- it allows for the use of highly optimized GPU kernels. Alternatively, you can replace the compat.v1.LSTMCell with a tf.keras.layers.LSTMCell and put this into the RNN.

Research Question (Help): RuntimeError: Input type (torch.FloatTensor) and weight type (torch.cuda.FloatTensor) should be the same

No idea what's happening in my case:
Error message:
Traceback (most recent call last):
File "plot_parametric_pytorch.py", line 127, in <module>
ops = opfun(X_train[smpl])
File "plot_parametric_pytorch.py", line 81, in <lambda>
opfun = lambda X: model.forward(Variable(torch.from_numpy(X)))
File "/mnt_home/klee/LBSBGenGapSharpnessResearch/vgg.py", line 43, in forward
x = self.features(x).to(device)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/torch/nn/modules/container.py", line 100, in forward
input = module(input)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/torch/nn/modules/conv.py", line 349, in forward
return self._conv_forward(input, self.weight)
File "/home/klee/anaconda3/envs/sharpenv/lib/python3.7/site-packages/torch/nn/modules/conv.py", line 346, in _conv_forward
self.padding, self.dilation, self.groups)
RuntimeError: Input type (torch.FloatTensor) and weight type (torch.cuda.FloatTensor) should be the same
Source code:
[import statements]
cudnn.benchmark = True
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype('float32')
X_train = np.transpose(X_train, axes=(0, 3, 1, 2))
X_test = X_test.astype('float32')
X_test = np.transpose(X_test, axes=(0, 3, 1, 2))
X_train /= 255
X_test /= 255
device = torch.device('cuda:0')
# This is where you can load any model of your choice.
# I stole PyTorch Vision's VGG network and modified it to work on CIFAR-10.
# You can take this line out and add any other network and the code
# should run just fine.
model = vgg.vgg11_bn()
model.to(device)
# Forward pass
opfun = lambda X: model.forward(Variable(torch.from_numpy(X))) <------
# Forward pass through the network given the input
predsfun = lambda op: np.argmax(op.data.numpy(), 1)
# Do the forward pass, then compute the accuracy
accfun = lambda op, y: np.mean(np.equal(predsfun(op), y.squeeze()))*100
# Initial point
x0 = deepcopy(model.state_dict())
# Number of epochs to train for
# Choose a large value since LB training needs higher values
# Changed from 150 to 30
nb_epochs = 30
batch_range = [25, 40, 50, 64, 80, 128, 256, 512, 625, 1024, 1250, 1750, 2048, 2500, 3125, 4096, 5000]
# parametric plot (i.e., don't train the network)
hotstart = False
if not hotstart:
for batch_size in batch_range:
optimizer = torch.optim.Adam(model.parameters())
model.load_state_dict(x0)
model.to(device)
average_loss_over_epoch = '-'
print('Optimizing the network with batch size %d' % batch_size)
np.random.seed(1337) #So that both networks see same sequence of batches
for e in range(nb_epochs):
model.eval()
print('Epoch:', e, ' of ', nb_epochs, 'Average loss:', average_loss_over_epoch)
average_loss_over_epoch = 0
# Checkpoint the model every epoch
torch.save(model.state_dict(), "./models/30EpochC3ExperimentBatchSize" + str(batch_size) + ".pth")
array = np.random.permutation(range(X_train.shape[0]))
slices = X_train.shape[0] // batch_size
beginning = 0
end = 1
# Training loop!
for _ in range(slices):
start_index = batch_size * beginning
end_index = batch_size * end
smpl = array[start_index:end_index]
model.train()
optimizer.zero_grad()
ops = opfun(X_train[smpl]) <-----
tgts = Variable(torch.from_numpy(y_train[smpl]).long().squeeze())
loss_fn = F.nll_loss(ops, tgts)
average_loss_over_epoch += loss_fn.data.numpy() / fractions_of_dataset
loss_fn.backward()
optimizer.step()
beginning += 1
end += 1
And here is where the model is constructed (and the forward method) [vgg.py]
import torch
import torch.nn as nn
F = nn.functional
import torch.utils.model_zoo as model_zoo
import math
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://s3.amazonaws.com/pytorch/models/vgg11-fb7e83b2.pth',
'vgg13': 'https://s3.amazonaws.com/pytorch/models/vgg13-58758d87.pth',
'vgg16': 'https://s3.amazonaws.com/pytorch/models/vgg16-82412952.pth',
'vgg19': 'https://s3.amazonaws.com/pytorch/models/vgg19-341d7465.pth',
}
class VGG(nn.Module):
def __init__(self, features):
super(VGG, self).__init__()
self.features = features.cuda()
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, 10),
)
self._initialize_weights()
def forward(self, x):
device = torch.device('cuda:0')
x.cuda()
x.to(device)
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x)
My full source code is here https://github.com/kristyelee/LBSBGenGapSharpnessResearch/blob/master/plot_parametric_pytorch.py
https://github.com/kristyelee/LBSBGenGapSharpnessResearch/blob/master/vgg.py
I read this for some information, RuntimeError: Input type (torch.FloatTensor) and weight type (torch.cuda.FloatTensor) should be the same but I'm not sure how to put the input tensors on the GPU.. I kind of tried to do this with x.cuda() not sure if this is right or not though. I really want to put my model on the GPU to train faster (because I'm currently just using the CPU otherwise, which is slow)

How to read multiple .mat files (which are too large to fit in memory) using tensorflow dataset

I have around 550K samples, each sample being 200x50x1. The size of this dataset is around 57GB.
I want to train a network on this set but I am having trouble reading it.
batch_size=8
def _read_py_function(filename,labels_slice):
with h5py.File(filename, 'r') as f:
data_slice = np.asarray(f['feats'])
print(data_slice.shape)
return data_slice, labels_slice
placeholder_files = tf.placeholder(tf.string, [None])
placeholder_labels = tf.placeholder(tf.int32, [None])
dataset = tf.data.Dataset.from_tensor_slices((placeholder_files,placeholder_labels))
dataset = dataset.map(
lambda filename, label: tuple(tf.py_func(
_read_py_function, [filename,label], [tf.uint8, tf.int32])))
dataset = dataset.shuffle(buffer_size=50000)
dataset = dataset.batch(batch_size)
iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
data_X, data_y = iterator.get_next()
data_y = tf.cast(data_y, tf.int32)
net = conv_layer(inputs=data_X,num_outputs=8, kernel_size=3, stride=2, scope='rcl_0')
net = pool_layer(inputs=net,kernel_size=2,scope='pl_0')
net = dropout_layer(inputs=net,scope='dl_0')
net = flatten_layer(inputs=net,scope='flatten_0')
net = dense_layer(inputs=net,num_outputs=256,scope='dense_0')
net = dense_layer(inputs=net,num_outputs=64,scope='dense_1')
out = dense_layer(inputs=net,num_outputs=10,scope='dense_2')
And I run the session using :
sess.run(train_iterator, feed_dict = {placeholder_files: filenames, placeholder_labels: ytrain})
try:
while True:
_, loss, acc = sess.run([train_op, loss_op, accuracy_op])
train_loss += loss
train_accuracy += acc
except tf.errors.OutOfRangeError:
pass
But I am getting the error even before running the session :
Traceback (most recent call last):
File "SFCC-trial-134.py", line 297, in <module>
net = rcnn_layer(inputs=data_X,num_outputs=8, kernel_size=3, stride=2, scope='rcl_0')
File "SFCC-trial-134.py", line 123, in rcnn_layer
reuse=False)
File "SFCC-trial-134.py", line 109, in conv_layer
reuse = reuse
File "/home/priyam.jain/tensorflow-gpu-python3/lib/python3.5/site-packages/tensorflow/contrib/framework/python/ops/arg_scope.py", line 183, in func_with_args
return func(*args, **current_args)
File "/home/priyam.jain/tensorflow-gpu-python3/lib/python3.5/site-packages/tensorflow/contrib/layers/python/layers/layers.py", line 1154, in convolution2d
conv_dims=2)
File "/home/priyam.jain/tensorflow-gpu-python3/lib/python3.5/site-packages/tensorflow/contrib/framework/python/ops/arg_scope.py", line 183, in func_with_args
return func(*args, **current_args)
File "/home/priyam.jain/tensorflow-gpu-python3/lib/python3.5/site-packages/tensorflow/contrib/layers/python/layers/layers.py", line 1025, in convolution
(conv_dims + 2, input_rank))
TypeError: %d format: a number is required, not NoneType
I though about using TFRecords but had a hard time creating those. Couldn't find a good post where I learn to create them for my kind of dataset.
conv_layer is defined as follows :
def conv_layer(inputs, num_outputs, kernel_size, stride, normalizer_fn=None, activation_fn=nn.relu, trainable=True, scope='noname', reuse=False):
net = slim.conv2d(inputs = inputs,
num_outputs = num_outputs,
kernel_size = kernel_size,
stride = stride,
normalizer_fn = normalizer_fn,
activation_fn = activation_fn,
trainable = trainable,
scope = scope,
reuse = reuse
)
return net
Do not pass tf.py_func inside your map function. You can read the file image by passing the function name directly inside your map function. I am posing only the relevant parts of the code.
def _read_py_function(filename, label):
return tf.zeros((224, 224, 3), dtype=tf.float32), tf.ones((1,), dtype=tf.int32)
dataset = dataset.map(lambda filename, label: _read_py_function(filename, label))
Another change is your iterator will expect only floating point of input. So you will have to change your tf.uint8 type of output to float.

TensorFlow Training CNN on Custom Images

All the tensorflow tutorials do a great job, however, they all use preprocessed downloadable datasets that work out of the box. Their tutorial on MNIST is the perfect example.
For a school project, 4 others and I have been assigned to train a CNN on supplied data in the form of PNG images. It's just a directory with 150 images. The labels are contained in the image file names.
The way the codes sits now we are getting an error which I will include below.
We followed the MNIST code found here: https://github.com/tensorflow/tensorflow/blob/r1.3/tensorflow/examples/tutorials/layers/cnn_mnist.py
So we are fairly certain our problem is in how we have processed the image data.
We have been trying to get this to work for roughly 3 days. (Many errors that we have worked through, this is just the latest).
Any help or feedback would be greatly appreciated!
Also, if anybody has questions about this please comment.
import os
import tensorflow as tf
import numpy as np
#from PIL import Image
# a function
def cnn_model_fn(features,labels,mode):
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features['x'], [-1, 128, 128, 3])
# Convolutional Layer #1
conv_1 = tf.layers.conv2d(
inputs=input_layer,
filters=64,
kernel_size=[7, 7],
strides=2,
padding="same",
activation=tf.nn.relu)
conv_2 = tf.layers.conv2d(
inputs=conv_1,
filters=128,
kernel_size=[5, 5],
padding="same",
strides = 2,
activation=tf.nn.relu)
max_pool_1 = tf.layers.max_pooling2d(
inputs = conv_2,
pool_size = 3,
strides = 1
)
conv_3 = tf.layers.conv2d(
inputs=max_pool_1,
filters=96,
kernel_size=[3, 3],
activation=tf.nn.relu
)
max_pool_2 = tf.layers.max_pooling2d(
inputs = conv_3,
pool_size = 2,
strides = 1
)
dropout_1 = tf.layers.dropout(
inputs = max_pool_2,
rate=0.5
)
fully_connected_1 = tf.contrib.layers.fully_connected(
inputs = dropout_1,
num_outputs = 1024,
)
dropout_2 = tf.layers.dropout(
inputs = fully_connected_1,
rate=0.5
)
fully_connected_2 = tf.contrib.layers.fully_connected(
inputs = dropout_2,
num_outputs = 1024,
)
fully_connected_3 = tf.contrib.layers.fully_connected(
inputs = fully_connected_2,
num_outputs = 15,
)
softmax_layer = tf.contrib.layers.softmax(
logits = fully_connected_3
)
#------------------------------------------------------------------------MAIN--------------------------------------------------------------------------------------------------
def getLabels():
imagelabels_arr = []
image_files = os.listdir("../assets/CNN-Data/")
for image in image_files:
imagelabels_arr.append(image.split('.')[len(image.split('.'))-2])
return imagelabels_arr
def getTrainImages():
filenames = []
image_files = os.listdir("../assets/CNN-Data/")
for image in image_files:
filenames.append(image)
filename_queue = tf.train.string_input_producer(filenames)
reader = tf.WholeFileReader()
filename, content = reader.read(filename_queue)
image = tf.image.decode_png(content, channels=3)
images = np.asarray(image)
image = tf.cast(image, tf.float64)
resize_image = tf.image.resize_images(image, (128, 128))
# image_batch = tf.train.batch([resize_image], batch_size=10)
print(resize_image)
return resize_image
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="./test")
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x':np.array(getTrainImages())},
y=np.array(getLabels()),
batch_size=10,
num_epochs=None,
shuffle=True)
classifier.train(
input_fn=train_input_fn,
steps=20,
)
The Error:
Traceback (most recent call last):
File "CNN.py", line 134, in <module>
steps=20,
File "C:\Users\Tyler\Desktop\tensorFlowPratice\flowenv\lib\site-packages\tensorflow\python\estimator\estimator.py", line 241, in train
loss = self._train_model(input_fn=input_fn, hooks=hooks)
File "C:\Users\Tyler\Desktop\tensorFlowPratice\flowenv\lib\site-packages\tensorflow\python\estimator\estimator.py", line 628, in _train_model
input_fn, model_fn_lib.ModeKeys.TRAIN)
File "C:\Users\Tyler\Desktop\tensorFlowPratice\flowenv\lib\site-packages\tensorflow\python\estimator\estimator.py", line 499, in _get_features_and_labels_from_input_fn
result = self._call_input_fn(input_fn, mode)
File "C:\Users\Tyler\Desktop\tensorFlowPratice\flowenv\lib\site-packages\tensorflow\python\estimator\estimator.py", line 585, in _call_input_fn
return input_fn(**kwargs)
File "C:\Users\Tyler\Desktop\tensorFlowPratice\flowenv\lib\site-packages\tensorflow\python\estimator\inputs\numpy_io.py", line 109, in input_fn
if len(set(v.shape[0] for v in ordered_dict_x.values())) != 1:
File "C:\Users\Tyler\Desktop\tensorFlowPratice\flowenv\lib\site-packages\tensorflow\python\estimator\inputs\numpy_io.py", line 109, in <genexpr>
if len(set(v.shape[0] for v in ordered_dict_x.values())) != 1:
IndexError: tuple index out of range
classifier.train function expects numpy arrays, but not tensors. Hence you need to convert example_batch, label batch by evaluating them with a session, but not wrapping them using np.array() function. (Explanation)
sess.run(tf.initialize_all_variables())
tf.train.start_queue_runners(sess)
classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="./test")
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x':getTrainImages().eval()},
y=getLabels().eval(),
batch_size=10,
num_epochs=None,
shuffle=True)
classifier.train(
input_fn=train_input_fn,
steps=20,
)
I recommended to apply tools on top of tensorflow. You might consider to code it through roNNie, Theano, Keras, Torch or Caffe.

Categories

Resources