How to seperate a Tensorflow dataset object in features and labels - python

My goal is it to feed a Keras model of an Autencoder only the (batches of) features from a tf.data.Dataset object.
Im loading the Dataset, format the Images and creating Batches like this:
#load dataset
(raw_train, raw_validation, raw_test), metadata = tfds.load(
'cats_vs_dogs',
split=[
tfds.Split.TRAIN.subsplit(tfds.percent[:80]),
tfds.Split.TRAIN.subsplit(tfds.percent[80:90]),
tfds.Split.TRAIN.subsplit(tfds.percent[90:])],
with_info=True,
as_supervised=True,
)
#normalize and resize images
IMG_SIZE = 160
def format_example(self, image, label):
image = tf.cast(image, tf.float32)
image = (image/255.0)
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
train = raw_train.map(format_example)
validation = raw_validation.map(format_example)
test = raw_test.map(format_example)
#create batches
SHUFFLE_BUFFER_SIZE = 1000
BATCH_SIZE = 32
train_batches = train.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
validation_batches = validation.batch(BATCH_SIZE)
test_batches = test.batch(BATCH_SIZE)
And at this point i would like to seperate the batches in features and labels, something like this:
train_x_batches, train_y_batches = train_batches
But i get this error:
`ValueError Traceback (most recent call last)
in
----> 1 train_x_batches, train_y_batches = train_batches
ValueError: too many values to unpack (expected 2)`

I get the same problem and I solved it like this:
train_x_batches = np.concatenate([x for x, y in train_batches], axis=0)
train_y_batches = np.concatenate([y for x, y in train_batches], axis=0)
And you can go back to your classes label using:
train_batches.class_names

If you need only features for your autoencoder, you can slice them via map:
train_x_batches = train_batches.map(lambda x: x[0])
Of course, you can do the same thing for your labels:
train_y_batches = train_batches.map(lambda x: x[1])

Related

MNIST pytorch dataset shape of images

I am training a neural network with Pytorch, and I would like to understand more of Mnist dataset.
The dataloader looks like this:
batch_size = 128
transform = transforms.Compose([
transforms.Resize((28,28)),
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5)),
])
train_dataset = datasets.MNIST('./data', transform=transform, download=True)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataset = datasets.MNIST('./data', transform=transform, download=True, train=False)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
However, when I train my own dataset there are problems loading the data. What I know is that the Mnist dataset for pytorch has the shape of (1,28,28) which are grayscaled images. I want to know how they are saved. Are they png, jpg, jpeg or npy files?
The MNIST dataset class is based on this code. If you would like to use your own dataset, you should write your custom dataset class to read your dataset based on its properties, like its image size, number of channels, labels, etc.
For instance something like this example:
class CustomImageDataset(Dataset):
def __init__(self, annotations_file, img_dir, transform=None, target_transform=None):
self.img_labels = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.img_labels)
def __getitem__(self, idx):
img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])
image = scipyIO.loadmat(img_path).get('rawData')
image = image.astype(np.float64)
h, w = image.shape
image = torch.from_numpy(image).reshape(1, h, w)
image = image.float()
ua = self.img_labels.iloc[idx, 1] # 1: ua value
us = self.img_labels.iloc[idx, 2] # 2: us value
g = self.img_labels.iloc[idx, 3] # 3: g value
gt = torch.tensor([ua, us, g])
gt = gt.float()
if self.transform:
image = self.transform(image)
if self.target_transform:
gt = self.target_transform(gt)
return image, gt
(above example is based on this repository)

How to resize image tensors

The following is my code where I'm converting every image to PIL and then turning them into Pytorch tensors:
transform = transforms.Compose([transforms.PILToTensor()])
# choose the training and test datasets
train_data = os.listdir('data/training/')
testing_data = os.listdir('data/testing/')
train_tensors = []
test_tensors = []
for train_image in train_data:
img = Image.open('data/training/' + train_image)
train_tensors.append(transform(img))
for test_image in testing_data:
img = Image.open('data/testing/' + test_image)
test_tensors.append(transform(img))
# Print out some stats about the training and test data
print('Train data, number of images: ', len(train_data))
print('Test data, number of images: ', len(testing_data))
batch_size = 20
train_loader = DataLoader(train_tensors, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_tensors, batch_size=batch_size, shuffle=True)
# specify the image classes
classes = ['checked', 'unchecked', 'other']
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
However, I am getting this error:
RuntimeError: stack expects each tensor to be equal size, but got [4, 66, 268] at entry 0 and [4, 88, 160] at entry 1
This is because my images are not resized prior to PIL -> Tensor. What is the correct way of resizing data images?
Try to utilize ImageFolder from torchvision, and assuming that images have diff size, you can use CenterCrop or RandomResizedCrop depending on your task. Check the Full list.
Here is an example:
train_dir = "data/training/"
train_dataset = datasets.ImageFolder(
train_dir,
transforms.Compose([
transforms.RandomResizedCrop(img_size), # image size int or tuple
# Add more transforms here
transforms.ToTensor(), # convert to tensor at the end
]))
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

how to create train, test & validation split of tf.data.Dataset in tf 2.1.0

the following code is copied from :
https://www.tensorflow.org/tutorials/load_data/images
the code aims to create dataset of images downloaded from the web and stored into folders depending upon their classes, please do refer to the link above for the whole context!
list_ds = tf.data.Dataset.list_files(str(data_dir/'*/*'))
for f in list_ds.take(5):
print(f.numpy())
def get_label(file_path):
# convert the path to a list of path components
parts = tf.strings.split(file_path, os.path.sep)
# The second to last is the class-directory
return parts[-2] == CLASS_NAMES
def decode_img(img):
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_jpeg(img, channels=3)
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
img = tf.image.convert_image_dtype(img, tf.float32)
# resize the image to the desired size.
return tf.image.resize(img, [IMG_WIDTH, IMG_HEIGHT])
def process_path(file_path):
label = get_label(file_path)
# load the raw data from the file as a string
img = tf.io.read_file(file_path)
img = decode_img(img)
return img, label
# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.
labeled_ds = list_ds.map(process_path, num_parallel_calls=AUTOTUNE)
for image, label in labeled_ds.take(1):
print("Image shape: ", image.numpy().shape)
print("Label: ", label.numpy())
def prepare_for_training(ds, cache=True, shuffle_buffer_size=1000):
# This is a small dataset, only load it once, and keep it in memory.
# use `.cache(filename)` to cache preprocessing work for datasets that don't
# fit in memory.
if cache:
if isinstance(cache, str):
ds = ds.cache(cache)
else:
ds = ds.cache()
ds = ds.shuffle(buffer_size=shuffle_buffer_size)
# Repeat forever
ds = ds.repeat()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches in the background while the model
# is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
train_ds = prepare_for_training(labeled_ds)
we are finally left with train_ds that is a PreffetchDataset object and contains the entire dataset of images, labels!
How to split train_ds into train, test & validation sets to feed it into a model?
After the ds.repeat() call the dataset is infinite and splitting an infinte dataset doesn't work very well. Therefore you should split it before the prepare_training() call. Like this:
labeled_ds = list_ds.map(process_path, num_parallel_calls=AUTOTUNE)
labeled_ds = labeled_ds.shuffle(10000).batch(BATCH_SIZE)
# Size of dataset
n = sum(1 for _ in labeled_ds)
n_train = int(n * 0.8)
n_valid = int(n * 0.1)
n_test = n - n_train - n_valid
train_ds = labeled_ds.take(n_train)
valid_ds = labeled_ds.skip(n_train).take(n_valid)
test_ds = labeled_ds.skip(n_train + n_valid).take(n_test)
The line n = sum(1 for _ in labeled_ds) iterates through the dataset once to get its size, then it is 3-way split into 80%/10%/10%.

How can I properly get my Dataset to create?

I have the following code:
imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.int32)
# Build a TF Queue, shuffle data
image, label = tf.data.Dataset.from_tensor_slices((imagepaths, labels))
and am getting the following error:
image, label = tf.data.Dataset.from_tensor_slices((imagepaths, labels))
ValueError: too many values to unpack (expected 2)
Shouldn't Dataset.from_tensor_slices see this as the length of the tensor, not the number of inputs? How can I fix this issue or combine the data tensors into the same variable more effectively?
Just for reference:
There are 1800 imagepaths and 1800 labels corresponding to each other. And to be clear, the imagepaths are paths to the files where the jpgs images are located. My goal after this is to shuffle the data set and build the neural network model.
That code is right here:
# Read images from disk
image = tf.read_file(image)
image = tf.image.decode_jpeg(image, channels=CHANNELS)
# Resize images to a common size
image = tf.image.resize_images(image, [IMG_HEIGHT, IMG_WIDTH])
# Normalize
image = image * 1.0/127.5 - 1.0
# Create batches
X, Y = tf.train.batch([image, label], batch_size=batch_size,
capacity=batch_size * 8,
num_threads=4)
try to do this:
def transform(entry):
img = entry[0]
lbl = entry[1]
return img, lbl
raw_data = list(zip(imagepaths, labels))
dataset = tf.data.Dataset.from_tensor_slices(raw_data)
dataset = dataset.map(transform)
and if you want to have a look at your dataset you can do it like this:
for e in dataset.take(1):
print(e)
you can add multiple map functions and you can after that use shuffle and batch on your dataset to prepare it for training ;)

Keras image generator keep giving different number of labels

I am trying to make a simple fine turned Resnet50 model using the Market1501 dataset and keras.
So the data set contains images (12000 or so) and 751 labels that I want to use (0-750). I can fit the data into a single go so I have to use a image generator for this.
So my base model is like this
base_model = ResNet50(weights='imagenet', include_top=False,input_tensor=Input(shape=(224,224,3)))
x = base_model.output
x = Flatten(name="flatten")(x)
x = Dropout(0.5)(x)
x = Dense(750, activation='softmax', name='fc8',kernel_initializer=RandomNormal(mean=0.0, stddev=0.001))(x)
model = Model(input=base_model.input, output=x)
And my image generator is like this
def image_generator(image_array, batch_size):
# Define data generator arguments
datagen_args = dict(rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=True)
# Create different data generators for each image
# This gives each image a unique transformation which will make it harder for the network
datagen = ImageDataGenerator(**datagen_args)
while True:
number_of_images = len(image_array)
indices = np.random.permutation(np.arange(number_of_images))
num_batches = number_of_images // batch_size
for bid in range(num_batches):
# loop once per batch
images = []
lables = []
batch_indices = indices[bid * batch_size: (bid + 1) * batch_size]
for i in batch_indices:
img, lbl = image_array[i]
# Process images
img = image.load_img(os.path.join(TRAIN, img), target_size=[224, 224])
img = image.img_to_array(img)
#img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
img = datagen.random_transform(img)
images.append(img)
lables.append(lbl)
yield np.array(images), to_categorical(lables)
And I use it like this
batch_size = 64
NUM_EPOCHS = 40
train_gen = image_generator(image_array, batch_size)
num_train_steps = len(image_array)
The issue is it give me this error
Error when checking target: expected fc8 to have shape (751,) but got array with shape (742,)
And the bigger issue is the 2nd number keep changing so I know its something with the image generator not getting every label in for each iteration.
EDIT
How the data is generated:
There is a external list with the image and the label like this
['0002_451_03.jpg', '0']
img001.jpg, 0
img002.jpg, 0
...
img1500.jpg, 750
This is read in and loaded into a array. The label is the number after the image
change
batch_indices = indices[bid * batch_size: (bid + 1) * batch_size]
with
batch_indices = indices[bid * batch_size: min((bid + 1) * batch_size, number_of_images)]

Categories

Resources