Tensorflow input function with batch size and shuffle - python

I am trying to build tensorflow input function with tf.train.batch(). I have dataframe for train, eval and prediction. So input_fn should take argument for df, batch_size. In df there are continuous and categorical columns.
Revised code:
COLUMNS = ['atemp', 'holiday', 'humidity', 'season', 'temp', 'weather', 'windspeed', 'workingday', 'hour', 'weekday', 'month', 'label']
CONTINUOUS_COLUMNS = ['atemp', 'humidity', 'temp', 'windspeed',]
CATEGORICAL_COLUMNS =[ 'holiday', 'season', 'weather',
'workingday', 'weekday', 'month', 'hour' ]
LEARNING_RATE = 0.1
LABEL_COLUMN = 'label'
batch_size = 128
data_set = pd.read_excel('bike_str.xlsx')
# Split the data into a training set, an eval set and a pred set.
train_set = data_set[:9500]
eval_set = data_set[9500:10800]
pred_set = data_set[10800:]
## Eval and Prediction labels:
eval_label = eval_set['label']
pred_label = pred_set['label']
Input_fn:
def batch_input_fn(df, batch_size):
def input_fn():
"""Input builder function."""
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.constant(df[k].values) for k in CONTINUOUS_COLUMNS}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
x = dict(continuous_cols)
x.update(categorical_cols)
# Converts the label column into a constant Tensor.
y = tf.constant(df[LABEL_COLUMN].values)
# Returns the feature columns and the label.
sliced_input = tf.train.slice_input_producer([x, y], shuffle = shuffle)
# So i'm trying to shuffle data for train and not shuffle for eval and pred
return tf.train.batch(sliced_input, batch_size=batch_size, num_threads= 3)
return input_fn
## Continuous base columns.
atemp = tf.contrib.layers.real_valued_column('atemp')
humidity = tf.contrib.layers.real_valued_column('humidity')
temp = tf.contrib.layers.real_valued_column('temp')
windspeed = tf.contrib.layers.real_valued_column('windspeed')
## Categoric base columns:
### To define a feature column for a categorical feature, we can create a SparseColumn
holiday = tf.contrib.layers.sparse_column_with_keys(column_name="holiday", keys=["no", "yes"])
season = tf.contrib.layers.sparse_column_with_keys(column_name="season", keys=["spring", "summer", "fall","winter"])
feat_dnn = [atemp_b, humidity_b, windspeed_b, temp_b,
tf.contrib.layers.embedding_column(holiday, dimension=1)
]
dnnregressor = tf.contrib.learn.DNNRegressor(feature_columns= feat_dnn,
hidden_units=[512,256, 512],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.250, l1_regularization_strength=0.8, l2_regularization_strength=0.8),
activation_fn =tf.nn.relu, dropout = 0.08)
dnnregressor.fit(input_fn= lambda: batch_input_fn(train_set, batch_size, shuffle = True), steps=1000 )
When calling directly batch_input_fn, error is:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-8-9c356159093d> in <module>()
----> 1 dnnregressor.fit(input_fn= lambda: batch_input_fn(train_set, batch_size), steps=15000 )
C:\Python\Anaconda\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
287 'in a future version' if date is None else ('after %s' % date),
288 instructions)
--> 289 return func(*args, **kwargs)
290 return tf_decorator.make_decorator(func, new_func, 'deprecated',
291 _add_deprecated_arg_notice_to_docstring(
C:\Python\Anaconda\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py in fit(self, x, y, input_fn, steps, batch_size, monitors, max_steps)
453 hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
454
--> 455 loss = self._train_model(input_fn=input_fn, hooks=hooks)
456 logging.info('Loss for final step: %s.', loss)
457 return self
C:\Python\Anaconda\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py in _train_model(self, input_fn, hooks)
951 random_seed.set_random_seed(self._config.tf_random_seed)
952 global_step = contrib_framework.create_global_step(g)
--> 953 features, labels = input_fn()
954 self._check_inputs(features, labels)
955 model_fn_ops = self._get_train_ops(features, labels)
TypeError: 'function' object is not iterable
From this code it's seems to work but here tensors are not list of dict:
def batched_input_fn(dataset_x, dataset_y, batch_size):
def _input_fn():
all_x = tf.constant(dataset_x, shape=dataset_x.shape, dtype=tf.float32)
all_y = tf.constant(dataset_y, shape=dataset_y.shape, dtype=tf.float32)
sliced_input = tf.train.slice_input_producer([all_x, all_y])
return tf.train.batch(sliced_input, batch_size=batch_size)
return _input_fn

Related

InternalError: Dst tensor is not initialized when doing KFold Cross Validation in TensorFlow

I am trying to get mean absolute error (MAE) for each split of data using 5-fold (KFold) cross validation. I have built a custom model using Xception that takes a X-ray hand image as an input and outputs estimated age in months. When I run the for loop for kf.split(X_train) in the code below (Under cv_mae part), I get an output for the first CV run. However, after the first CV run, I get the following error:
640/640 [==============================] - 86s 114ms/step - loss: 0.3346 - mae_months: 17.8703
---------------------------------------------------------------------------
InternalError Traceback (most recent call last)
Input In [15], in <cell line: 3>()
3 for train_index, val_index in kf.split(X_train):
4 model.fit(X_train[train_index], y_train[train_index], batch_size = 10)
----> 5 pred = model.predict(X_train[val_index], batch_size = 2)
6 err = mean_absolute_error(y_train[val_index], pred)
7 cv_mae.append(err)
File ~\anaconda3\lib\site-packages\keras\wrappers\scikit_learn.py:364, in KerasRegressor.predict(self, x, **kwargs)
350 """Returns predictions for the given test data.
351
352 Args:
(...)
361 Predictions.
362 """
363 kwargs = self.filter_sk_params(Sequential.predict, kwargs)
--> 364 return np.squeeze(self.model.predict(x, **kwargs))
File ~\anaconda3\lib\site-packages\keras\utils\traceback_utils.py:67, in filter_traceback.<locals>.error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
File ~\anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py:102, in convert_to_eager_tensor(value, ctx, dtype)
100 dtype = dtypes.as_dtype(dtype).as_datatype_enum
101 ctx.ensure_initialized()
--> 102 return ops.EagerTensor(value, ctx.device_name, dtype)
InternalError: Failed copying input tensor from /job:localhost/replica:0/task:0/device:CPU:0 to /job:localhost/replica:0/task:0/device:GPU:0 in order to run _EagerConst: Dst tensor is not initialized.
InternalError: Failed copying input tensor from /job:localhost/replica:0/task:0/device:CPU:0 to
/job:localhost/replica:0/task:0/device:GPU:0 in order to run _EagerConst: Dst tensor is not initialized.
It seems the error appears every time when it comes across model.predict() because the error message states:
----> 5 pred = model.predict(X_train[val_index], batch_size = 2)
Code:
# Checking the GPU availability
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
#---------------------------------------------------------------------------------
# Root path for the image files:
root = 'P:/BoneDataset/0-Dataset/ba-trainset/'
age_df = pd.read_csv(os.path.join(root, 'ba-training-dataset.csv'))
# Converting 'male' column to have male and female instead of true and false:
age_df['gender'] = age_df['male'].map(lambda x: 'male' if x else 'female')
# Checking for the path existance
age_df['path'] = age_df['id'].map(lambda x: os.path.join(root, 'ba-trainset', '{}.png'.format(x)))
age_df['exists'] = age_df['path'].map(os.path.exists)
print(age_df['exists'].sum(), 'images found of total of', age_df.shape[0], 'images.')
#---------------------------------------------------------------------------------
# Oldest children age in the dataset:
print('Maximum age: ' + str(age_df['boneage'].max()) + ' months')
# Youngest children age in the dataset:
print('Minimum age: ' + str(age_df['boneage'].min()) + ' months')
# Mean of children age in the dataset:
boneage_mean = age_df['boneage'].mean()
print('Mean BA: ' + str(boneage_mean))
# Median of children age in the dataset:
print('Median BA: ' + str(age_df['boneage'].median()))
# Standard deviation of children age in the dataset:
boneage_div = age_df['boneage'].std()
# Normalizing features (models perform better) to have Zero Mean, and
# Unified Standard Deviation using Z-score for training:
age_df['boneage_zscore'] = age_df['boneage'].map(lambda x: (x-boneage_mean)/boneage_div)
#---------------------------------------------------------------------------------
# Trimming data size to 10000 from 12000
age_df['boneage_category'] = pd.cut(age_df['boneage'], 10)
new_age_df = age_df.groupby(['boneage_category', 'male']).apply(lambda x: x.sample(500, replace = True)).reset_index(drop = True)
print('New Data Size:', new_age_df.shape[0], 'Old Size:', age_df.shape[0])
#---------------------------------------------------------------------------------
train_df, valid_df = train_test_split(new_age_df, test_size = 0.20, stratify = new_age_df['boneage_category'])
#---------------------------------------------------------------------------------
## Image preprocessing:
from keras.preprocessing.image import ImageDataGenerator
from keras.applications.vgg16 import preprocess_input
from keras.applications.imagenet_utils import preprocess_input
IMG_SIZE = (224, 224)
core_idg = ImageDataGenerator(samplewise_center = True,
samplewise_std_normalization = True,
height_shift_range = 0.05,
width_shift_range = 0.05,
rotation_range = 10,
fill_mode = 'nearest',
rescale = 1. / 255,
preprocessing_function = preprocess_input)
#---------------------------------------------------------------------------------
def flow_from_dataframe(img_data_gen, in_df, path_col, y_col, **dflow_args):
base_dir = os.path.dirname(in_df[path_col].values[0])
print('## Ignore next message from keras, values are replaced anyways')
df_gen = img_data_gen.flow_from_directory(base_dir, class_mode = 'sparse', **dflow_args)
df_gen.filenames = in_df[path_col].values
# Added df_gen.filepaths.extend because the filepaths is empty list.
# Hence added image path to the filepaths.
df_gen.filepaths.extend(df_gen.filenames)
df_gen.classes = np.stack(in_df[y_col].values)
df_gen.samples = in_df.shape[0]
df_gen.n = in_df.shape[0]
df_gen._set_index_array()
df_gen.directory = '' # since we have the full path
print('Reinserting dataframe: {} images'.format(in_df.shape[0]))
return df_gen
#---------------------------------------------------------------------------------
# Data Generators:
train_gen = flow_from_dataframe(core_idg, train_df,
path_col = 'path',
y_col = 'boneage_zscore',
target_size = IMG_SIZE,
color_mode = 'rgb',
batch_size = len(train_df),
shuffle = True)
X_train, y_train = next(train_gen)
def boneage_model():
base_model = Xception(input_shape = X_train.shape[1:], include_top = False, weights = 'imagenet')
base_model.trainable = True
model = Sequential()
model.add(base_model)
model.add(GlobalMaxPooling2D())
model.add(Flatten())
model.add(Dense(16, activation = 'relu'))
model.add(Dense(1, activation = 'linear'))
def mae_months(in_gt, in_pred):
return mean_absolute_error(boneage_div * in_gt, boneage_div * in_pred)
# Compile model
adam = Adam(learning_rate = 0.0005)
model.compile(loss = 'mse', optimizer = adam, metrics = [mae_months])
return model
#---------------------------------------------------------------------------------
# KFold
n_splits = 5
kf = KFold(n_splits = n_splits, shuffle = True, random_state = 42)
# create model
model = KerasRegressor(build_fn = boneage_model)
#---------------------------------------------------------------------------------
#### THIS IS WHERE THE ERROR STARTS
cv_mae = []
for train_index, val_index in kf.split(X_train):
model.fit(X_train[train_index], y_train[train_index], batch_size = 16)
pred = model.predict(X_train[val_index], batch_size = 2)
err = mean_absolute_error(y_train[val_index], pred)
cv_mae.append(err)
cv_mae
Note! train_df has a value of 8000 images of X-ray hand images.
According to the post here, it suggests trimming the batch_size down, hence why the batch_size = 2 above at the model.predict(). However, it still prints out the same error message. Please help!

How to predict time series values with keras, and using predictions as inputs

I have built a LSTM model that works well on my test and validation set in Keras; however, when I try to use it to predict values where my two target variables are NaN, the model is unable to predict. Now, I am worried that the modeling process will be all for nothing. I will put my code here, and I have mostly referred to the guide on the tensorflow website. This is my first tensorflow model, so I apologize if this is simple but I can't seem to predict any values and I don't really know how to go about formatting it to make it predictable similar to how it was easy to do for the validation and then use those predictions for input to compute farther back because I only have 3 years of train data, but 40 years that need to be backcasted.
I believe the problem is that right now I have data in the form (batch size, inputs) and I need it in the form (batch size, time steps looking back, inputs), but I can't figure out the way to make this possible
Creating data:
df.rename(columns = {'Unnamed: 0':'date'}, inplace = True)
true_test = df[pd.isnull(df['Return'])]
df=df[pd.isnull(df['Return'])==False]
Window Generator Class:
class WindowGenerator():
def __init__(self, input_width, label_width, shift,
train_df=train_df, val_df=val_df, test_df=test_df,
label_columns=None):
# Store the raw data.
self.train_df = train_df
self.val_df = val_df
self.test_df = test_df
# Work out the label column indices.
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in
enumerate(label_columns)}
self.column_indices = {name: i for i, name in
enumerate(train_df.columns)}
# Work out the window parameters.
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def __repr__(self):
return '\n'.join([
f'Total window size: {self.total_window_size}',
f'Input indices: {self.input_indices}',
f'Label indices: {self.label_indices}',
f'Label column name(s): {self.label_columns}'])
def split_window(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack(
[labels[:, :, self.column_indices[name]] for name in self.label_columns],
axis=-1)
# Slicing doesn't preserve static shape information, so set the shapes
# manually. This way the `tf.data.Datasets` are easier to inspect.
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
def make_dataset(self, data):
data = np.array(data, dtype=np.float32)
ds = tf.keras.utils.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32,)
ds = ds.map(self.split_window)
return ds
WindowGenerator.make_dataset = make_dataset
def plot(self, model=None, plot_col=['Return','Supply'], max_subplots=3):
if isinstance(plot_col, list):
self.plot(model = model, plot_col = 'Return')
self.plot(model = model, plot_col = 'Supply')
return None
inputs, labels = self.example
plt.figure(figsize=(12, 8), dpi=300)
plot_col_index = self.column_indices[plot_col]
max_n = min(max_subplots, len(inputs))
for n in range(max_n):
print("somthing plz \n\n\n\n")
plt.subplot(max_n, 1, n+1)
plt.ylabel(f'{plot_col} [normed]')
plt.plot(self.input_indices, inputs[n, :, plot_col_index],
label='Inputs', marker='.', zorder=-10)
if self.label_columns:
label_col_index = self.label_columns_indices.get(plot_col, None)
else:
label_col_index = plot_col_index
if label_col_index is None:
continue
plt.scatter(self.label_indices, labels[n, :, label_col_index],
edgecolors='k', label='Labels', c='#2ca02c', s=64)
if model is not None:
if plot_col == 'Return':
plt.scatter(self.label_indices, predictions[n, :, 0],
marker='X', edgecolors='k', label='Predictions',
c='#ff7f0e', s=64)
elif plot_col == 'Supply':
plt.scatter(self.label_indices, predictions[n, :, 1],
marker='X', edgecolors='k', label='Predictions',
c='#ff7f0e', s=64)
else:
plt.scatter(self.label_indices, predictions[n, :, label_col_index],
marker='X', edgecolors='k', label='Predictions',
c='#ff7f0e', s=64)
if n == 0:
plt.legend()
plt.xlabel('Time [h]')
WindowGenerator.plot = plot
Compile and Fit Function:
def compile_and_fit(model, window, patience=2):
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
patience=patience,
mode='min')
model.compile(loss=tf.keras.losses.MeanAbsoluteError(),
optimizer=tf.keras.optimizers.Adam(),
metrics=[tf.keras.metrics.MeanAbsoluteError(), coeff_determination])
history = model.fit(window.train, epochs=MAX_EPOCHS,
validation_data=window.val,
callbacks=[early_stopping])
return history
Fitting Model:
wide_window = WindowGenerator(
input_width=24, label_width=24, shift=1,
label_columns=['Return', 'Supply'])
## RNN: LSTM
n_steps = 24
n_features = 10
lstm_model = tf.keras.models.Sequential([
# Shape [batch, time, features] => [batch, time, lstm_units]
tf.keras.layers.LSTM(32, return_sequences = True, input_shape=(n_steps, n_features)),
tf.keras.layers.LSTM(16, return_sequences = True),
tf.keras.layers.LSTM(4, return_sequences = True),
# Shape => [batch, time, features]
tf.keras.layers.Dense(units=2)
])
val_performance['LSTM_SR'] = lstm_model.evaluate(wide_window.val)
performance['LSTM_SR'] = lstm_model.evaluate(wide_window.test, verbose=0)
Trying to test:
## Tried no expansion, axis = 1, and axis = 0, this one seems the closest
true_test2 = np.expand_dims(true_test, axis=1)
lstm_model.predict(true_test2, verbose = 2)
Error from above call:
ValueError Traceback (most recent call last)
path in <cell line:
5>()
----> 545 lstm_model.predict(true_test2, verbose = 2)
File ~/lib/python3.9/site-
packages/keras/utils/traceback_utils.py:67, in filter_traceback.
<locals>.error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
File ~/lib/python3.9/site-
packages/keras/engine/input_spec.py:264, in assert_input_compatibility(input_spec,
inputs, layer_name)
262 if spec_dim is not None and dim is not None:
263 if spec_dim != dim:
--> 264 raise ValueError(f'Input {input_index} of layer "{layer_name}" is '
265 'incompatible with the layer: '
266 f'expected shape={spec.shape}, '
267 f'found shape={display_shape(x.shape)}')
ValueError: Input 0 of layer "sequential_21" is incompatible with the layer: expected
shape=(None, 24, 10), found shape=(32, 1, 10)
Error with Just true_test:
ValueError: Input 0 of layer "sequential_23" is incompatible with
the layer: expected shape=(None, 24, 10), found shape=(32, 10)
Try removing input_shape() from 2nd and 3rd layers of your model.
input_shape() must be used only in the first layer of your model.
lstm_model = tf.keras.models.Sequential([
# Shape [batch, time, features] => [batch, time, lstm_units]
tf.keras.layers.LSTM(32, return_sequences = True, input_shape=(n_steps, n_features)),
tf.keras.layers.LSTM(16, return_sequences = True),
tf.keras.layers.LSTM(4),
# Shape => [batch, time, features]
tf.keras.layers.Dense(units=2)
])

How to turn sequences into batches in tensorflow?

I am working with the SketchRNN dataset.
I would like to do the followings:
Parse examples in TFRecord to get labels.
Filter rows with specific labels. There are 345 labels, and I would like to keep only data with label number below 10.
Batch multiple observations for training.
Below is my failed attempt:
from pathlib import Path
quickdraw_dir = Path("/home/long/.keras/./datasets/quickdraw/quickdraw_tutorial_dataset_v1.tar.gz").parent
train_files = sorted([str(path) for path in quickdraw_dir.glob("training.tfrecord-*")])
eval_files = sorted([str(path) for path in quickdraw_dir.glob("eval.tfrecord-*")])
def parse_single_example(data):
feature_descriptions = {
"ink": tf.io.VarLenFeature(dtype=tf.float32),
"shape": tf.io.FixedLenFeature([2], dtype=tf.int64),
"class_index": tf.io.FixedLenFeature([1], dtype=tf.int64)
}
example = tf.io.parse_single_example(data, feature_descriptions)
flat_sketch = tf.sparse.to_dense(example["ink"])
sketch = tf.reshape(flat_sketch, shape=[-1, 3])
length = example["shape"][0]
label = example["class_index"][0]
return sketch, length, label
def filter_cond(dataset):
return dataset.filter(lambda x, y, z: tf.math.less(z, 10))
def get_dataset(filepaths, batch_size=32, shuffle_buffer_size=None, n_parse_threads=5, n_read_threads=5, cache=False):
dataset = tf.data.TFRecordDataset(filepaths, num_parallel_reads=n_read_threads)
if cache:
dataset.cache()
if shuffle_buffer_size:
dataset.shuffle(shuffle_buffer_size)
dataset = dataset.map(parse_single_example, num_parallel_calls=n_parse_threads)
dataset = dataset.apply(filter_cond)
dataset = dataset.batch(batch_size=batch_size)
return dataset.prefetch(1)
train_dataset = get_dataset(train_files, shuffle_buffer_size=10000)
train_dataset.take(1)
Below is the error:
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
Input In [31], in <cell line: 1>()
----> 1 for sketches, lengths, labels in train_dataset.take(1):
2 print("sketches =", sketches)
3 print("sketches shape=", sketches.shape)
File ~/anaconda3/envs/experiment/lib/python3.9/site-packages/tensorflow/python/data/ops/iterator_ops.py:800, in OwnedIterator.__next__(self)
798 def __next__(self):
799 try:
--> 800 return self._next_internal()
801 except errors.OutOfRangeError:
802 raise StopIteration
File ~/anaconda3/envs/experiment/lib/python3.9/site-packages/tensorflow/python/data/ops/iterator_ops.py:783, in OwnedIterator._next_internal(self)
780 # TODO(b/77291417): This runs in sync mode as iterators use an error status
781 # to communicate that there is no more data to iterate over.
782 with context.execution_mode(context.SYNC):
--> 783 ret = gen_dataset_ops.iterator_get_next(
784 self._iterator_resource,
785 output_types=self._flat_output_types,
786 output_shapes=self._flat_output_shapes)
788 try:
789 # Fast path for the case `self._structure` is not a nested structure.
790 return self._element_spec._from_compatible_tensor_list(ret) # pylint: disable=protected-access
File ~/anaconda3/envs/experiment/lib/python3.9/site-packages/tensorflow/python/ops/gen_dataset_ops.py:2845, in iterator_get_next(iterator, output_types, output_shapes, name)
2843 return _result
2844 except _core._NotOkStatusException as e:
-> 2845 _ops.raise_from_not_ok_status(e, name)
2846 except _core._FallbackException:
2847 pass
File ~/anaconda3/envs/experiment/lib/python3.9/site-packages/tensorflow/python/framework/ops.py:7107, in raise_from_not_ok_status(e, name)
7105 def raise_from_not_ok_status(e, name):
7106 e.message += (" name: " + name if name is not None else "")
-> 7107 raise core._status_to_exception(e) from None
InvalidArgumentError: Cannot batch tensors with different shapes in component 0. First element had shape [55,3] and element 1 had shape [42,3]. [Op:IteratorGetNext]
From what I understand this is because tensorflow is unable to group together sequence data with variable length.
Below code works with processing and batching, but I am unable to filter my data (to reduce classes to predict):
def parse_example(data_batch):
feature_descriptions = {
"ink": tf.io.VarLenFeature(dtype=tf.float32),
"shape": tf.io.FixedLenFeature([2], dtype=tf.int64),
"class_index": tf.io.FixedLenFeature([1], dtype=tf.int64)
}
examples = tf.io.parse_example(data_batch, feature_descriptions)
flat_sketches = tf.sparse.to_dense(examples["ink"])
sketches = tf.reshape(flat_sketches, shape=[tf.size(data_batch), -1, 3])
lengths = examples["shape"][:, 0]
labels = examples["class_index"][:, 0]
return sketches, lengths, labels
def get_dataset(filepaths, batch_size=32, shuffle_buffer_size=None, n_parse_threads=5, n_read_threads=5, cache=False):
dataset = tf.data.TFRecordDataset(filepaths, num_parallel_reads=n_read_threads)
if cache:
dataset.cache()
if shuffle_buffer_size:
dataset.shuffle(shuffle_buffer_size)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.map(parse_example, num_parallel_calls=n_parse_threads)
# dataset = dataset.apply(filter_cond)
return dataset.prefetch(1)

Train Pytorch Autoencoder with custom dataset

I am new to Pytorch. I was able to build an autoencoder model and train it using the MINST dataset.
However, I need to train the model using a custom dataset.
I am getting the error 'ToTensor' object is not iterable when i try to train with the custom dataset.
Below is a code of my dataset class
class AutoEncoderDataSet(Dataset):
def __init__(self, in_dir, transform):
self._transforms = transform
self.img_paths = []
files = os.listdir(in_dir)
for file in files:
self.img_paths.append(os.path.join(in_dir, file))
def __getitem__(self, index):
img, img_trans = Image.open(self.img_paths[index]), Image.open(self.img_paths[index])
x, y = transform(img), transform(img_trans)
return x, y
def __len__(self):
return len(self.img_paths)
Here is how I am generating the dataloader
transform = transforms.Compose([torchvision.transforms.ToTensor()])
train_dataset = AutoEncoderDataSet('./datasets/train/', transform)
batch_size = 512
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True
When I try to train using the data generated with the custom dataset class, I am getting the error mentioned above.
Below is code for training the model
epochs = 2048
for epoch in range(epochs):
loss = 0
for batch_features, _ in train_loader:
# reshape mini-batch data to [N, 784] matrix
# load it to the active device
batch_features = batch_features.view(-1, 250*250).to(device)
# reset the gradients back to zero
# PyTorch accumulates gradients on subsequent backward passes
optimizer.zero_grad()
# compute ecoder output
outputs = model(batch_features)
# compute training reconstruction loss
train_loss = criterion(outputs, batch_features)
# compute accumulated gradients
train_loss.backward()
# perform parameter update based on current gradients
optimizer.step()
# add the mini-batch training loss to epoch loss
loss += train_loss.item()
# compute the epoch training loss
loss = loss / len(train_loader)
# display the epoch training loss
print("epoch : {}/{}, recon loss = {:.8f}".format(epoch + 1, epochs, loss))
And this is the error I am getting
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_11164/1462449221.py in <module>
3 for epoch in range(epochs):
4 loss = 0
----> 5 for batch_features, _ in test_loader:
6 # reshape mini-batch data to [N, 784] matrix
7 # load it to the active device
~\AppData\Local\Programs\Python\Python38\lib\site-packages\torch\utils\data\dataloader.py in __next__(self)
519 if self._sampler_iter is None:
520 self._reset()
--> 521 data = self._next_data()
522 self._num_yielded += 1
523 if self._dataset_kind == _DatasetKind.Iterable and \
~\AppData\Local\Programs\Python\Python38\lib\site-packages\torch\utils\data\dataloader.py in _next_data(self)
559 def _next_data(self):
560 index = self._next_index() # may raise StopIteration
--> 561 data = self._dataset_fetcher.fetch(index) # may raise StopIteration
562 if self._pin_memory:
563 data = _utils.pin_memory.pin_memory(data)
~\AppData\Local\Programs\Python\Python38\lib\site-packages\torch\utils\data\_utils\fetch.py in fetch(self, possibly_batched_index)
47 def fetch(self, possibly_batched_index):
48 if self.auto_collation:
---> 49 data = [self.dataset[idx] for idx in possibly_batched_index]
50 else:
51 data = self.dataset[possibly_batched_index]
~\AppData\Local\Programs\Python\Python38\lib\site-packages\torch\utils\data\_utils\fetch.py in <listcomp>(.0)
47 def fetch(self, possibly_batched_index):
48 if self.auto_collation:
---> 49 data = [self.dataset[idx] for idx in possibly_batched_index]
50 else:
51 data = self.dataset[possibly_batched_index]
~\AppData\Local\Programs\Python\Python38\lib\site-packages\torchvision\datasets\folder.py in __getitem__(self, index)
232 sample = self.loader(path)
233 if self.transform is not None:
--> 234 sample = self.transform(sample)
235 if self.target_transform is not None:
236 target = self.target_transform(target)
~\AppData\Local\Programs\Python\Python38\lib\site-packages\torchvision\transforms\transforms.py in __call__(self, img)
58
59 def __call__(self, img):
---> 60 for t in self.transforms:
61 img = t(img)
62 return img
TypeError: 'ToTensor' object is not iterable
Any suggestions would be greatly appreciated.

There is an error at the local variable 'batch_index'

I am trying to run the code regarding CNN using the dataset of Deep Learning and Beamforming.
During the process, I found an error at the local variable 'btch_index'. According to information by the Internet, there is a problem in Keras.
there is a problem in 'if batch_index == len(batches) - 1: # Last batch.'
UnboundLocalError: local variable 'batch_index' referenced before assignment
I tried to declare 'batch_index' as a global variable, but it also has another error.
"""Part of the training engine related to plain array data (e.g. Numpy).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.sparse import issparse
from .training_utils import batch_shuffle
from .training_utils import check_num_samples
from .training_utils import make_batches
from .training_utils import should_run_validation
from .. import backend as K
from .. import callbacks as cbks
from ..utils.generic_utils import Progbar
from ..utils.generic_utils import slice_arrays
from ..utils.generic_utils import to_list
from ..utils.generic_utils import unpack_singleton
def fit_loop(model, fit_function, fit_inputs,
out_labels=None,
batch_size=None,
epochs=100,
verbose=1,
callbacks=None,
val_function=None,
val_inputs=None,
shuffle=True,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1):
"""Abstract fit function for `fit_function(fit_inputs)`.
Assumes that fit_function returns a list, labeled by out_labels.
# Arguments
model: Keras model instance.
fit_function: Keras function returning a list of tensors
fit_inputs: List of tensors to be fed to `fit_function`
out_labels: List of strings, display names of
the outputs of `fit_function`
batch_size: Integer batch size or None if unknown.
epochs: Number of times to iterate over the data
verbose: Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training and validation
(if `val_function` and `val_inputs` are not `None`).
val_function: Keras function to call for validation
val_inputs: List of tensors to be fed to `val_function`
shuffle: Whether to shuffle the data at the beginning of each epoch
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
validation_freq: Only relevant if validation data is provided. Integer
or list/tuple/set. If an integer, specifies how many training
epochs to run before a new validation run is performed, e.g.
validation_freq=2` runs validation every 2 epochs. If a list,
tuple, or set, specifies the epochs on which to run validation,
e.g. `validation_freq=[1, 2, 10]` runs validation at the end
of the 1st, 2nd, and 10th epochs.
# Returns
`History` object.
"""
do_validation = False
if val_function and val_inputs:
do_validation = True
if (verbose and fit_inputs and
hasattr(fit_inputs[0], 'shape') and hasattr(val_inputs[0], 'shape')):
print('Train on %d samples, validate on %d samples' %
(fit_inputs[0].shape[0], val_inputs[0].shape[0]))
if validation_steps:
do_validation = True
if steps_per_epoch is None:
raise ValueError('Can only use `validation_steps` '
'when doing step-wise '
'training, i.e. `steps_per_epoch` '
'must be set.')
elif do_validation:
if steps_per_epoch:
raise ValueError('Must specify `validation_steps` '
'to perform validation '
'when doing step-wise training.')
num_train_samples = check_num_samples(fit_inputs,
batch_size=batch_size,
steps=steps_per_epoch,
steps_name='steps_per_epoch')
if num_train_samples is not None:
index_array = np.arange(num_train_samples)
model.history = cbks.History()
_callbacks = [cbks.BaseLogger(stateful_metrics=model.metrics_names[1:])]
if verbose:
if steps_per_epoch is not None:
count_mode = 'steps'
else:
count_mode = 'samples'
_callbacks.append(
cbks.ProgbarLogger(count_mode, stateful_metrics=model.metrics_names[1:]))
_callbacks += (callbacks or []) + [model.history]
callbacks = cbks.CallbackList(_callbacks)
out_labels = out_labels or []
# it's possible to callback a different model than itself
# (used by Sequential models)
callback_model = model._get_callback_model()
callback_metrics = list(model.metrics_names)
if do_validation:
callback_metrics += ['val_' + n for n in model.metrics_names]
callbacks.set_model(callback_model)
callbacks.set_params({
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'samples': num_train_samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks._call_begin_hook('train')
callbacks.model.stop_training = False
for cbk in callbacks:
cbk.validation_data = val_inputs
# To prevent a slowdown,
# we find beforehand the arrays that need conversion.
feed = (model._feed_inputs +
model._feed_targets +
model._feed_sample_weights)
indices_for_conversion_to_dense = []
for i in range(len(feed)):
if issparse(fit_inputs[i]) and not K.is_sparse(feed[i]):
indices_for_conversion_to_dense.append(i)
for epoch in range(initial_epoch, epochs):
model.reset_metrics()
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
if steps_per_epoch is not None:
for step_index in range(steps_per_epoch):
batch_logs = {'batch': step_index, 'size': 1}
callbacks._call_batch_hook('train', 'begin', step_index, batch_logs)
outs = fit_function(fit_inputs)
outs = to_list(outs)
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks._call_batch_hook('train', 'end', step_index, batch_logs)
if callback_model.stop_training:
break
if do_validation and should_run_validation(validation_freq, epoch):
val_outs = test_loop(model, val_function, val_inputs,
steps=validation_steps,
callbacks=callbacks,
verbose=0)
val_outs = to_list(val_outs)
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
else:
if shuffle == 'batch':
index_array = batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(num_train_samples, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if isinstance(fit_inputs[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(
fit_inputs[:-1], batch_ids) + [fit_inputs[-1]]
else:
ins_batch = slice_arrays(fit_inputs, batch_ids)
except TypeError:
raise TypeError('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {'batch': batch_index, 'size': len(batch_ids)}
callbacks._call_batch_hook('train', 'begin', batch_index, batch_logs)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
outs = fit_function(ins_batch)
outs = to_list(outs)
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks._call_batch_hook('train', 'end', batch_index, batch_logs)
if callbacks.model.stop_training:
break
if batch_index == len(batches) - 1: # Last batch.
if do_validation and should_run_validation(validation_freq, epoch):
val_outs = test_loop(model, val_function, val_inputs,
batch_size=batch_size,
callbacks=callbacks,
verbose=0)
val_outs = to_list(val_outs)
# Same labels assumed.
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
break
callbacks._call_end_hook('train')
return model.history
def predict_loop(model, f, ins,
batch_size=32,
verbose=0,
steps=None,
callbacks=None):
"""Abstract method to loop over some data in batches.
# Arguments
model: Keras model instance.
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring `predict_loop` finished.
Ignored with the default value of `None`.
callbacks: List of callbacks or an instance of
`keras.callbacks.CallbackList` to be called during prediction.
# Returns
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
num_samples = check_num_samples(ins,
batch_size=batch_size,
steps=steps,
steps_name='steps')
# Check if callbacks have not been already configured
if not isinstance(callbacks, cbks.CallbackList):
callbacks = cbks.CallbackList(callbacks)
callback_model = model._get_callback_model()
callbacks.set_model(callback_model)
callback_params = {
'batch_size': batch_size,
'steps': steps,
'samples': num_samples,
'verbose': verbose,
}
callbacks.set_params(callback_params)
if verbose == 1:
if steps is not None:
progbar = Progbar(target=steps)
else:
progbar = Progbar(target=num_samples)
indices_for_conversion_to_dense = []
for i in range(len(model._feed_inputs)):
if issparse(ins[i]) and not K.is_sparse(model._feed_inputs[i]):
indices_for_conversion_to_dense.append(i)
callbacks.model.stop_training = False
callbacks._call_begin_hook('predict')
if steps is not None:
# Step-based predictions.
# Since we do not know how many samples
# we will see, we cannot pre-allocate
# the returned Numpy arrays.
# Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = []
for step in range(steps):
batch_logs = {'batch': step, 'size': 1}
callbacks._call_batch_hook('predict', 'begin', step, batch_logs)
batch_outs = f(ins)
batch_outs = to_list(batch_outs)
if step == 0:
for batch_out in batch_outs:
unconcatenated_outs.append([])
for i, batch_out in enumerate(batch_outs):
unconcatenated_outs[i].append(batch_out)
batch_logs['outputs'] = batch_outs
callbacks._call_batch_hook('predict', 'end', step, batch_logs)
if verbose == 1:
progbar.update(step + 1)
callbacks.on_predict_end()
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))]
else:
# Sample-based predictions.
outs = []
batches = make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
#global batch_index
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if ins and isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
batch_logs = {'batch': batch_index, 'size': len(batch_ids)}
callbacks._call_batch_hook('predict', 'begin', batch_index, batch_logs)
batch_outs = f(ins_batch)
batch_outs = to_list(batch_outs)
if batch_index == 0:
# Pre-allocate the results arrays.
for batch_out in batch_outs:
shape = (num_samples,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=batch_out.dtype))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
batch_logs['outputs'] = batch_outs
callbacks._call_batch_hook('predict', 'end', batch_index, batch_logs)
if verbose == 1:
progbar.update(batch_end)
callbacks._call_end_hook('predict')
return unpack_singleton(outs)
def test_loop(model, f, ins,
batch_size=None,
verbose=0,
steps=None,
callbacks=None):
"""Abstract method to loop over some data in batches.
# Arguments
model: Keras model instance.
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size or `None`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
callbacks: List of callbacks or an instance of
`keras.callbacks.CallbackList` to be called during evaluation.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
model.reset_metrics()
num_samples = check_num_samples(ins,
batch_size=batch_size,
steps=steps,
steps_name='steps')
# Check if callbacks have not been already configured
if not isinstance(callbacks, cbks.CallbackList):
callbacks = cbks.CallbackList(callbacks)
callback_model = model._get_callback_model()
callbacks.set_model(callback_model)
callback_metrics = list(model.metrics_names)
callback_params = {
'batch_size': batch_size,
'steps': steps,
'samples': num_samples,
'verbose': verbose,
'metrics': callback_metrics,
}
callbacks.set_params(callback_params)
outs = []
if verbose == 1:
if steps is not None:
progbar = Progbar(target=steps)
else:
progbar = Progbar(target=num_samples)
# To prevent a slowdown,
# we find beforehand the arrays that need conversion.
feed = (model._feed_inputs +
model._feed_targets +
model._feed_sample_weights)
indices_for_conversion_to_dense = []
for i in range(len(feed)):
if issparse(ins[i]) and not K.is_sparse(feed[i]):
indices_for_conversion_to_dense.append(i)
callbacks.model.stop_training = False
callbacks._call_begin_hook('test')
if steps is not None:
for step in range(steps):
batch_logs = {'batch': step, 'size': 1}
callbacks._call_batch_hook('test', 'begin', step, batch_logs)
batch_outs = f(ins)
if isinstance(batch_outs, list):
if step == 0:
outs.extend([0.] * len(batch_outs))
for i, batch_out in enumerate(batch_outs):
if i == 0: # Index 0 == `Loss`
outs[i] = float(batch_out)
else:
outs[i] += float(batch_out)
else:
if step == 0:
outs.append(0.)
outs[0] += float(batch_outs)
for l, o in zip(model.metrics_names, batch_outs):
batch_logs[l] = o
callbacks._call_batch_hook('test', 'end', step, batch_logs)
if verbose == 1:
progbar.update(step + 1)
outs[0] /= steps # Index 0 == `Loss`
else:
batches = make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
batch_logs = {'batch': batch_index, 'size': len(batch_ids)}
callbacks._call_batch_hook('test', 'begin', batch_index, batch_logs)
batch_outs = f(ins_batch)
if isinstance(batch_outs, list):
if batch_index == 0:
outs.extend([0.] * len(batch_outs))
for i, batch_out in enumerate(batch_outs):
if i == 0: # Index 0 == `Loss`
outs[i] += float(batch_out) * len(batch_ids)
else:
outs[i] = float(batch_out)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += float(batch_outs) * len(batch_ids)
for l, o in zip(model.metrics_names, batch_outs):
batch_logs[l] = float(o)
callbacks._call_batch_hook('test', 'end', batch_index, batch_logs)
if verbose == 1:
progbar.update(batch_end)
outs[0] /= num_samples # Index 0 == `Loss`
callbacks._call_end_hook('test')
return unpack_singleton(outs)
I use the Windows OS, Python 3.6.8, Keras 2.3.1 and Tensorflow 2.0.0.
I search the net and I don't realize how I should deal with it.
Double check your indentation, you are getting this error because you are referencing the variable batch_index outside of the loop in which it was defined in, so it is out of scope.

Categories

Resources