Get len(dataset) = 0 in object detection problem - python

I'm solving the problem of object detection on a fruit dataset: https://yadi.sk/d/UPwQB7OZrB48qQ.
I was given the code for my dataset class:
class2tag = {"apple": 1, "orange": 2, "banana": 3}
class FruitDataset(Dataset):
def __init__(self, data_dir, transform=None):
self.images = []
self.annotations = []
self.transform = transform
for annotation in glob.glob(data_dir + "/*xml"):
image_fname = os.path.splitext(annotation)[0] + ".jpg"
self.images.append(cv2.cvtColor(cv2.imread(image_fname), cv2.COLOR_BGR2RGB))
with open(annotation) as f:
annotation_dict = xmltodict.parse(f.read())
bboxes = []
labels = []
objects = annotation_dict["annotation"]["object"]
if not isinstance(objects, list):
objects = [objects]
for obj in objects:
bndbox = obj["bndbox"]
bbox = [bndbox["xmin"], bndbox["ymin"], bndbox["xmax"], bndbox["ymax"]]
bbox = list(map(int, bbox))
bboxes.append(torch.tensor(bbox))
labels.append(class2tag[obj["name"]])
self.annotations.append(
{"boxes": torch.stack(bboxes).float(), "labels": torch.tensor(labels)}
)
def __getitem__(self, i):
if self.transform:
# the following code is correct if you use albumentations
# if you use torchvision transforms you have to modify it =)
res = self.transform(
image=self.images[i],
bboxes=self.annotations[i]["boxes"],
labels=self.annotations[i]["labels"],
)
return res["image"], {
"boxes": torch.tensor(res["bboxes"]),
"labels": torch.tensor(res["labels"]),
}
else:
return self.images[i], self.annotations[i]
def __len__(self):
return len(self.images)
I'm doing my project in Google Colab, so I've mounted Google Drive and unzipped archive.
from google.colab import drive
drive.mount('/content/drive')
Then I did some augmentations using albumentations:
train_transform = A.Compose([
A.Flip(p=0.25),
A.RGBShift(p=0.2),
], bbox_params=A.BboxParams(format='coco'))
val_transform = A.Compose([], bbox_params=A.BboxParams(format='coco'))
train_dataset = FruitDataset("./train_zip/train", transform=train_transform)
val_dataset = FruitDataset("./test_zip/test", transform=val_transform)
However, when I run len(train_dataset), I get the value of 0. So, I cannot get why my dataset size is 0. Neither can I understand where the problem is.
Would be very grateful for any possible advice.

Related

Writing custom datasets and dataloaders with PyTorch

Good afternoon!
I have questions about the following tutorial:
https://pytorch.org/tutorials/beginner/data_loading_tutorial.html 1
I have a similar dataset (images + landmarks). I’ve built the custom dataloader following the tutorial and checked the types of dataloader components (torch.float64 for both images and landmarks).
Then I applied the dataloader to the classification model with this training class:
class Trainer():
def __init__(self,criterion = None,optimizer = None,schedular = None):
self.criterion = criterion
self.optimizer = optimizer
self.schedular = schedular
def train_batch_loop(self,model,train_dataloader):
train_loss = 0.0
train_acc = 0.0
for images,landmarks, labels in train_dataloader:
images = images.to(device)
landmarks = landmarks.to(device)
labels = labels.to(device)
self.optimizer.zero_grad()
I won’t be elaborating further because the training crushes at images = images.to(device) with the following error: AttributeError: ‘str’ object has no attribute 'to’
I don’t understand where this string is coming from if all the dataloader components are torch.float64.
I went back to check the initial data: in the tutorial, the landmarks are summarized in a pandas dataframe with landmark values as int64 and image name as “object”.
In my summary dataframe image name is an “object” as well and landmarks are numpy.float64. Again, no strings anywhere…
Appreciate any advice - what else should I check in addition to dtypes?
There are 30 cats and 48 landmarks for each image
The dataset is defined as follows:
class FaceLandmarksDataset(Dataset):
def __init__(self, data_frame, root_dir, transform=None):
self.data_frame = data_frame
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.data_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = os.path.join(self.root_dir,
self.data_frame.iloc[idx, 2])
image = io.imread(img_name)
landmarks = self.data_frame.iloc[idx, 3:]
landmarks = np.array([landmarks])
landmarks = landmarks.astype('float').reshape(-1, 2)
labels = self.data_frame.iloc[idx, 1].reshape(1)
sample = {'image': image, 'landmarks': landmarks, 'labels': labels}
if self.transform:
sample = self.transform(sample)
return sample
Hi i made some changes based on your dataset, please make adjustment where you see fit, as i don't have your data i cannot test this out but this should be it based on on my understanding
import torch
from torch.utils.data import Dataset
import os
from skimage import io
import numpy as np
from typing import Dict
class FaceLandmarksDataset(Dataset):
def __init__(self, data_frame, root_dir, transform=None):
self.data_frame = data_frame
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.data_frame)
def __getitem__(self, idx) -> Dict[str, torch.Tensor]:
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = os.path.join(self.root_dir,self.data_frame.iloc[idx]['image_name'])
image = io.imread(img_name)
landmarks = self.data_frame.iloc[idx, 3:].tolist()
landmarks = np.array(landmarks)
landmarks = landmarks.astype('float32').reshape(-1, 2)
labels = self.data_frame.iloc[idx]['label']
# Your transforming your image only therefore pass in the array
if self.transform:
image = self.transform(image)
# Create dictionary after finishing all transforms -> automatically becomes torch tensor when passed
sample = {
'image': image,
'landmarks': landmarks,
'labels': labels
}
return sample

Training stuck at Epoch 3 PyTorch

I am training a custom Encoder-Decoder network but the training gets stuck at Epoch 3. Nothing happens for about 2 hours. I will share the Dataset class and the DataLoader object. The version if CUDA and GPU can be seen in the pic below.
Training stuck here:
nvidia-smi output looks like this:
The __getitem__ method of the dataset class looks like this:
def __init__(self,
images_dir,
annots_dir,
train=True,
img_size=(512, 1536),
stride=4,
model='custom',
transforms=None):
"""
:param root: dataset directory
:param filenames: filenames inside the root directory
:param labels: Object Detection Labels
super(CustomDataset).__init__()
self.images_dir = images_dir
self.annots_dir = annots_dir
self.train = train
self.image_size = img_size
self.stride = stride
self.transforms = transforms
self.model = model
# Load the image and annotation files from the dataset
# self.image_files, self.annot_files = self._load_image_and_annot_files()
self.image_files = [os.path.join(self.images_dir, idx) for idx in os.listdir(self.images_dir)]
self.annot_files = [os.path.join(self.annots_dir, idx) for idx in os.listdir(self.annots_dir)]
def __getitem__(self, index):
"""
:param index: index...0 to N
:return: tensor_image and tensor_label
"""
# Image filename from _load_image_files()
# Load Image with _read_matrix() and label
curr_image_filename = self.image_files[index]
curr_annot_filename = self.annot_files[index]
# curr_image_filename = self.image_files[index]
# curr_annot_filename = self.annot_files[index]
np_image = self._read_matrix(raw_img=curr_image_filename)
np_image_normalized = np.squeeze(self._normalize_raw_img(np_image))
# label = self.labels[index]
boxes, classes, depths, tgts = self._load_annotations(curr_annot_filename)
# Normalize bounding boxes: range [0, 1]
targets_normalized = self._normalize_bbox(np_image_normalized, tgts)
# image and the corresponding label should be a tensor
torch_image = torch.from_numpy(np_image).reshape(1, 512, 1536).float() # dtype: torch.float64
torch_boxes = torch.from_numpy(boxes).type(torch.FloatTensor)
torch_depths = torch.from_numpy(depths)
if self.model == 'fasterrcnn':
# For FasterRCNN: As COCO format
area = (torch_boxes[:, 3] - torch_boxes[:, 1]) * (torch_boxes[:, 2] - torch_boxes[:, 0])
iscrowd = torch.zeros((boxes.shape[0],), dtype=torch.int64)
image_id = torch.Tensor([index])
torch_classes = torch.from_numpy(classes)
target = {'boxes': torch_boxes, 'labels': torch_classes.long(),
'area': area, 'iscrowd': iscrowd, 'image_id': image_id}
return torch_image, target
elif self.model == 'custom':
if self.train:
if self.transforms:
try:
tr = self.transforms()
transform_image, transform_boxes, labels = tr.__call__(np_image, tgts, tgts[:, :4], tgts[:, 4:])
transform_targets = np.hstack((np.array(transform_boxes), labels))
gt_tensor = gt_creator(img_size=self.image_size,
stride=self.stride,
num_classes=8,
label_lists=transform_targets)
return torch.from_numpy(transform_image).float(), gt_tensor
except IndexError:
pass
else:
gt_tensor = gt_creator(img_size=self.image_size,
stride=self.stride,
num_classes=8,
label_lists=targets_normalized)
return torch_image, gt_tensor
else:
return torch_image, targets_normalized
And in the train.py script the DataLoader object is:
train_loader = torch.utils.data.DataLoader(dataset=dataset,
shuffle=True,
batch_size=1,
num_workers=0,
collate_fn=detection_collate,
pin_memory=True)
Why does the training get stuck? Is there an issue with the __getitem__ method? Or the DataLoader?
Thank You.
This happens because torch doesnt restart your dataset, if your data runs out it stops and waits for more input so cycling has to be done manually.
I used something along the lines of
from itertools import cycle
class Dataloader():
#init and whatever
self.__iter__():
return cycle(get_sample()) # get_sample is your current getitem

How to construct batch that return equal number of images for per classes

I am working an image retrieval project, for making model more fair i want to construct batches that return:
5 images per class, and
75 images and per batch
I have total 300 classes in my dataset, so it obvious that only 15 classes of images can be contained in each batch.data is balanced this mean there is equal number of images for per class,I am using pytorch.
I have create pytorch dataset and I want to add above functionality in my ImageFolderLoader class whose code I added below.
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def find_classes(dir):
classes = os.listdir(dir)
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
classes = [clss.split('.')[1] for clss in classes]
return classes, class_to_idx
def make_dataset(dir, class_to_idx):
images = []
for target in os.listdir(dir):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for filename in os.listdir(d):
if is_image_file(filename):
path = '{0}/{1}'.format(target, filename)
item = (path, class_to_idx[target])
images.append(item)
return images
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolderLoader(Dataset):
def __init__(self, root, transform=None, loader=default_loader,):
classes, class_to_idx = find_classes(root)
imgs = make_dataset(root, class_to_idx)
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.loader = loader
def __getitem__(self, index):
path, target = self.imgs[index]
img = self.loader(os.path.join(self.root, path))
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.imgs)
if there is way to do this then please let me know>.
edit:- Anyone want to see solution for this, i added the solution below after solving this problem.
I solved the problem by including batch_sampler in DataLoader module. for this i used pytorch-balanced-sampler git project, which allows awesome customization for batch_sampler, you should visit this repo.
My custom dataset:
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def find_classes(dir):
classes = os.listdir(dir)
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
classes = [clss.split('.')[1] for clss in classes]
return classes, class_to_idx
def make_dataset(dir, class_to_idx):
images = []
for target in os.listdir(dir):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for filename in os.listdir(d):
if is_image_file(filename):
path = '{0}/{1}'.format(target, filename)
item = (path, class_to_idx[target])
images.append(item)
data_dict = {}
for item in images:
cls = item[1]
if cls not in data_dict.keys():
data_dict[cls] = [item]
else:
data_dict[cls].append(item)
return images,data_dict
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolderLoader(Dataset):
def __init__(self, root, transform=None, loader=default_loader):
classes, class_to_idx = find_classes(root)
imgs,instance_labels = make_dataset(root, class_to_idx)
self.instance_labels = instance_labels
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.loader = loader
def __getitem__(self, index):
path, target = self.imgs[index]
img = self.loader(os.path.join(self.root, path))
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.imgs)
Then i used SamplerFactory class from pytorch-balances-sampler project, you need to visit this repository for understand the parameters,
train_data = ImageFolderLoader(root=TRAIN_PATH, transform=transform)
batch_sampler = SamplerFactory().get(
class_idxs=my_list,
batch_size=75,
n_batches=146,
alpha=1,
kind="fixed"
)
There are a few open-ended questions in how this is implemented. For instance, do you want each class to be equally represented regardless of that class's actual frequency? Note that this may give better performance on minority classes at the expense of performance on majority classes.
Also, do you want each example to be used at most once per epoch, or at least once per epoch?
In any case, this will likely be difficult to accomplish with the standard getitem method because it returns an example with no regard for the other examples returned in the same batch. You'll likely need to define a custom dataloader object to ensure good data distribution and usage properties, which is a bit unfortunate because pytorch's dataloader and dataset objects work together quite nicely and efficiently for most simple use cases. Perhaps someone else has a solution that uses these objects.
Here's a solution that uses random sampling with replacement after each batch, so there's no guarantee that every example will be used. Also, it uses looping so you could probably do better with parallelization.
class ImageFolderLoader(Dataset):
def __init__(self, root, transform=None, loader=default_loader,):
classes, class_to_idx = find_classes(root)
imgs = make_dataset(root, class_to_idx)
#currently, imgs items are of the form (path,class)
data_dict = {}
for item in imgs:
cls = item[1]
if cls not in data_dict.keys():
data_dict[cls] = [item]
else:
data_dict[cls].append(item)
# each class is the key for a list of all items belonging to that class
self.data_dict = data_dict
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.loader = loader
def get_batch(self):
img_batch = []
label_batch = []
classes = random.sample((0,300),15)
for cls in classes:
class_data = self.data_dict[cls]
selection = random.sample((0,len(class_data),5)
for idx in selection:
img = self.loader(os.path.join(self.root, class_data[idx][0]))
if self.transform is not None:
img = self.transform(img)
img_batch.append(img)
label_batch.append(cls)
img_batch = torch.stack(img_batch)
label_batch = torch.stack(label_batch)
return img_batch, label_batch
def __len__(self):
return len(self.imgs)

How to create a custom data loader in Pytorch?

I have a file containing paths to images I would like to load into Pytorch, while utilizing the built-in dataloader features (multiprocess loading pipeline, data augmentations, and so on).
def create_links():
data_dir = "/myfolder"
full_path_list = []
assert os.path.isdir(data_dir)
for _, _, filenames in os.walk(data_dir):
for filename in filenames:
full_path_list.append(os.path.join(data_dir, filename))
with open(config.data.links_file, 'w+') as links_file:
for full_path in full_path_list:
links_file.write(f"{full_path}\n")
def read_links_file_to_list():
config = ConfigProvider.config()
links_file_path = config.data.links_file
if not os.path.isfile(links_file_path):
raise RuntimeError("did you forget to create a file with links to images? Try using 'create_links()'")
with open(links_file_path, 'r') as links_file:
return links_file.readlines()
So I have a list of files (or a generator, or whatever works), file_list = read_links_file_to_list().
How can I build a Pytorch dataloader around it, and how would I use it?
What you want is a Custom Dataset. The __getitem__ method is where you would apply transforms such as data-augmentation etc. To give you an idea of what it looks like in practice you can take a look at this Custom Dataset I wrote the other day:
class GTSR43Dataset(Dataset):
"""German Traffic Sign Recognition dataset."""
def __init__(self, root_dir, train_file, transform=None):
self.root_dir = root_dir
self.train_file_path = train_file
self.label_df = pd.read_csv(os.path.join(self.root_dir, self.train_file_path))
self.transform = transform
self.classes = list(self.label_df['ClassId'].unique())
def __getitem__(self, idx):
"""Return (image, target) after resize and preprocessing."""
img = os.path.join(self.root_dir, self.label_df.iloc[idx, 7])
X = Image.open(img)
y = self.class_to_index(self.label_df.iloc[idx, 6])
if self.transform:
X = self.transform(X)
return X, y
def class_to_index(self, class_name):
"""Returns the index of a given class."""
return self.classes.index(class_name)
def index_to_class(self, class_index):
"""Returns the class of a given index."""
return self.classes[class_index]
def get_class_count(self):
"""Return a list of label occurences"""
cls_count = dict(self.label_df.ClassId.value_counts())
# cls_percent = list(map(lambda x: (1 - x / sum(cls_count)), cls_count))
return cls_count
def __len__(self):
"""Returns the length of the dataset."""
return len(self.label_df)

Error preprocessing the input data when using Tensorflow Dataset API

I have images of [64,512,5] stored in *.npy files which I convert into *.tfrecords files.
I have verified that the reading of said records corresponds correctly with what is present in the *.npy files. However, when I perform some operation on the parser, like adding 1 to each pixel of the image, the result is not the expected one. The result should be 65*512*5 = 163840 but it is 163839.99980013957 (not always the same)
I have tried to perform different operations like tf.subtract, but the results are the same.
Could someone tell me what is wrong?
import re
import ast
import sys, select
import random as rn
from glob import glob
from tqdm import tqdm
from datetime import datetime
from configparser import SafeConfigParser
import numpy as np
import numpy.ma as ma
import scipy.misc
import os.path
from os import mkdir, stat
from os.path import exists, dirname, abspath
from os.path import join as dir_join
import tensorflow as tf
''' File hierarchy
'''
_code_dir = dirname(abspath(__file__))
_python_dir = dirname(_code_dir)
_model_dir = dirname(_python_dir)
_project_dir = dirname(_model_dir)
_ml_dir = dirname(_project_dir)
_srv_dir = dirname(_ml_dir)
_root_datasets_dir = dir_join(_srv_dir,'machine_learning','data_sets/ssd_prepared')
_config_dir = dir_join(_python_dir, 'config')
'''Data sets directories
'''
THIS_DATA_SET_DIR = 'Sph_50m' #WARNING: Global variable also used in helper.py
_data_dir = dir_join(_root_datasets_dir, THIS_DATA_SET_DIR)
_data_set_dir = dir_join(_data_dir,'ImageSet')
_data_npy_dir = dir_join(_data_dir,'data')
_data_tfRecord_dir = dir_join(_data_dir,'tfRecord')
''' Configuration parser
'''
cfg_parser = SafeConfigParser()
cfg_parser.read(dir_join(_config_dir,'cfg_model.ini'))
''' Private variables
'''
_batch_size = cfg_parser.getint(section='train', option='batch_size')
_max_epoch = cfg_parser.getint(section='train', option='max_epoch')
_standarize = cfg_parser.getboolean(section='train', option='standarize_input')
_input_shape = ast.literal_eval(cfg_parser.get(section='data_shape', option='input_shape'))
_label_channel = cfg_parser.getint(section='data_shape', option='label_channel')
_track_channel = cfg_parser.getint(section='data_shape', option='track_channel')
_mask_channel = cfg_parser.getint(section='data_shape', option='mask_channel')
_data_train = cfg_parser.get(section='data_set', option='data_train')
_data_val = cfg_parser.get(section='data_set', option='data_val')
_data_test = cfg_parser.get(section='data_set', option='data_test')
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value.reshape(-1)))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _floats_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value.reshape(-1)))
def numpy_to_TFRecord():
if not exists(_data_tfRecord_dir): mkdir(_data_tfRecord_dir)
for dataset in [_data_train, _data_val, _data_test]:
tfRecord_folder = dir_join(_data_tfRecord_dir, dataset)
if not exists(tfRecord_folder): mkdir(tfRecord_folder)
#Retrieve list of files
projections_dir=[]
file_ = open(dir_join(_data_set_dir, dataset+'.txt'), 'r')
for x in file_.readlines():
file_nat = x.strip()+'.npy'
filename = dir_join(_data_npy_dir, file_nat)
assert exists(filename), "{} doesn't exist".format(filename)
projections_dir.append(filename)
file_.close()
totaltfRecordSize = 0
numFile = 0
for projection_dir in tqdm(projections_dir, ncols= 100, desc = 'TFRecord {}'.format(dataset)):
scanName = projection_dir.split('/')[-1].split('.')[0]
if totaltfRecordSize > 100*(10**6) or totaltfRecordSize == 0:
# address to save the TFRecords file
train_filename = dir_join(tfRecord_folder, \
str(numFile) + '_' + dataset +'.tfrecords')
# open the TFRecords file
writer = tf.python_io.TFRecordWriter(train_filename)
numFile += 1
totaltfRecordSize = 0
# Load the image
projection = np.load(projection_dir)
image = projection[:,:,:_label_channel]
label = projection[:,:,_label_channel].astype(int)
mask = projection[:,:,_mask_channel].astype(int)
track = projection[:,:,_track_channel].astype(int)
# Create a feature
feature = {'image': _floats_feature(image),
'label': _int64_feature(label),
'mask' : _int64_feature(mask),
'track': _int64_feature(track),
'scanName': _bytes_feature(tf.compat.as_bytes(scanName))}
# Create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
# Serialize to string and write on the file
writer.write(example.SerializeToString())
fileSize = stat(train_filename).st_size
totaltfRecordSize += fileSize
writer.close()
sys.stdout.flush()
def readTFRecord():
# Transforms a scalar string `example_proto` into a pair of a scalar string and
# a scalar integer, representing an image and its label, respectively.
image_dim = _input_shape[0] * _input_shape[1] * _label_channel
label_dim = _input_shape[0] * _input_shape[1]
mean = np.load(dir_join(_data_dir,'mean.npy'))
std = np.load(dir_join(_data_dir,'std.npy'))
mean_tf = tf.convert_to_tensor(mean, dtype=tf.float32, name='mean')
std_tf = tf.convert_to_tensor(std, dtype=tf.float32, name='std')
with tf.variable_scope('TFRecord'):
def _parse_function(example_proto):
with tf.variable_scope('parser'):
features = {'image': tf.FixedLenFeature([image_dim], tf.float32),
'label': tf.FixedLenFeature([label_dim], tf.int64),
'mask' : tf.FixedLenFeature([label_dim], tf.int64),
'track': tf.FixedLenFeature([label_dim], tf.int64),
'scanName': tf.FixedLenFeature([], tf.string)}
parsed_features = tf.parse_single_example(example_proto, features)
# Reshape image data into the original shape
image = tf.reshape(parsed_features['image'], [_input_shape[0], _input_shape[1], _label_channel], name='image')
label = tf.reshape(parsed_features['label'], _input_shape, name='lable_reshape')
mask = tf.reshape(parsed_features['mask'], _input_shape, name='mask_reshape')
track = tf.reshape(parsed_features['track'], _input_shape, name='track_reshape')
scanName = parsed_features['scanName']
image = image + tf.constant(1., dtype=tf.float32)
return image, label, mask, track, scanName
training_filenames = glob(dir_join(_data_tfRecord_dir, _data_train, '*.tfrecords'))
validation_filenames = glob(dir_join(_data_tfRecord_dir, _data_val, '*.tfrecords'))
filenames = tf.placeholder(tf.string, shape=[None], name='filenames')
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_function, num_parallel_calls=20) # Parse the record into tensors.
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(_batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=10)
iterator = dataset.make_initializable_iterator()
next = iterator.get_next()
sess = tf.Session()
while True:
sess.run(iterator.initializer, feed_dict={filenames: training_filenames})
try:
img, _, _, _, scanX = sess.run(next)
for i, scan in enumerate(scanX):
print(scan.decode("utf-8"))
projection = np.load(dir_join(_data_npy_dir, scan.decode("utf-8") + '.npy'))
imagenp = projection[:,:,:_label_channel]
if np.abs(np.sum(img[i,...] - imagenp)) > 0.:
print(np.sum(img[i,...] - imagenp))
except tf.errors.OutOfRangeError:
break
return training_filenames, validation_filenames, filenames, iterator
if __name__ == '__main__':
numpy_to_TFRecord()
readTFRecord()
The test I'm doing in the previous code is to convert the *.npy files to *.tfrecords. Then, I compare the *.trecords with the *.npy. The value should be 0 if both images were identical.
img, _, _, _, scanX = sess.run(next)
for i, scan in enumerate(scanX):
print(scan.decode("utf-8"))
projection = np.load(dir_join(_data_npy_dir, scan.decode("utf-8") + '.npy'))
imagenp = projection[:,:,:_label_channel]
print(np.sum(img[i,...] - imagenp))
If the data is not preprocessed, these images are the same, however, if we perform some kind of transformation, the results do not match what was expected. In this case we are adding 1 to each pixel of the image, so the total difference should be 64 * 512 * 5.
image = image + tf.constant(1., dtype=tf.float32)
I would like to solve this error, since so far I have not been able to obtain the results obtained by my neural network using feed_dict instead of Tensorflow Dataset API, and this is the only point where I can observe a difference in the input data.

Categories

Resources