How to label images once importing them in python - python

I have a set of pictures, which are labelled dog, cat, truck, airplane and car in a folder. Once I import them in to python I want to assign them binary labels. The following code shows how I can extract pictures from the folder and do it for 1 class but how can I do it for multiple classes? For example 1 for 'dog', 2 for 'cat', 3 for 'truck', 4 for 'airplane' and 5 for 'car'.
Test_dir = "C:/Users/Instructor/Dropbox/Data Science/2.Temp_WORDFILES/test"
image_width = 32
image_height = 32
def read_images(directory, resize_to=(128, 128)):
"""This function extracts images from given
directory"""
files = glob.glob(directory + "/*.jpg")
images = []
labels = []
for f in tqdm.tqdm_notebook(files):
im = Image.open(f)
im = im.resize(resize_to)
im = np.array(im) / 255.0
im = im.astype("float32")
images.append(im)
label = 1 if "dog" in f.lower() else 0
labels.append(label)
return np.array(images), np.array(labels)
X, y = read_images(directory=Test_dir, resize_to=(IM_WIDTH, IM_HEIGHT))

Test_dir = "C:/Users/Instructor/Dropbox/Data Science/2.Temp_WORD FILES/test"
image_width = 32
image_height = 32
def read_images(directory, resize_to=(128, 128)):
"""This function extracts images from given
directory"""
files = glob.glob(directory + "/*.jpg")
images = []
labels = []
switch_values = {'dog':1,'cat':2, 'truck':3, 'airplane':4 ,'car':5}
for f in tqdm.tqdm_notebook(files):
im = Image.open(f)
im = im.resize(resize_to)
im = np.array(im) / 255.0
im = im.astype("float32")
images.append(im)
label = switch_values.get(f.lower())
labels.append(label)
return np.array(images), np.array(labels)
X, y = read_images(directory=Test_dir, resize_to=(IM_WIDTH, IM_HEIGHT))

Define a dictionary to map the animal name to label
animal_to_label = {'dog': 1,'cat': 2,'truck': 3,'airplane': 4,'car': 5 }
Test_dir = "C:/Users/Instructor/Dropbox/Data Science/2.Temp_WORDFILES/test"
image_width = 32
image_height = 32
def read_images(directory, resize_to=(128, 128)):
"""This function extracts images from given
directory"""
files = glob.glob(directory + "/*.jpg")
images = []
labels = []
switch_values = {'dog':1,'cat':2, 'truck':3, 'airplane':4 ,'car':5}
for f in tqdm.tqdm_notebook(files):
im = Image.open(f)
im = im.resize(resize_to)
im = np.array(im) / 255.0
im = im.astype("float32")
images.append(im)
name = f.split("/")[-1].split(".")[0]
label = animal_to_label[name.lower()]
labels.append(label)
return np.array(images), np.array(labels)
X, y = read_images(directory=Test_dir, resize_to=(IM_WIDTH, IM_HEIGHT))

Related

Get image name of pytorch dataset

I am using a custom dataset for image segmentation. While visualizing some of the images and masks i found an error. The problem for me know is, how to find the name of the image. The code i use for the pytorch datasetset creation is:
class SegmentationDataset(Dataset):
def __init__(self, df, augmentations):
self.df = df
self.augmentations = augmentations
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
row = self.df.iloc[idx]
image_path = DATA_DIR + row.images
mask_path = DATA_DIR + row.masks
image = skimage.io.imread(image_path)
mask = skimage.io.imread(mask_path)
mask = np.expand_dims(mask, axis = -1)
if self.augmentations:
data = self.augmentations(image = image, mask = mask)
image = data['image']
mask = data['mask']
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
mask = np.transpose(mask, (2, 0, 1)).astype(np.float32)
image = torch.Tensor(image) / 255.0
mask = torch.round(torch.Tensor(mask) / 255.0)
return image, mask
trainset = SegmentationDataset(train_df, get_train_augs())
validset = SegmentationDataset(valid_df, get_valid_augs())
When i then print one specific image, i see that the mask is not available/wrong:
idx = 9
print('Drawn sample ID:', idx)
image, mask = validset[idx]
show_image(image, mask)
How do i now get the image name of this idx = 9?
I'd imagine you could print out one of the following, under this line image = skimage.io.imread(image_path), it should help lead you to your answer:
print(row)
print(row.images)
print(images)
print(image_path)
To get the file name after you have parsed the fully quaified path above:
my_str = '/my/data/path/images/wallpaper.jpg'
result = my_str.rsplit('/', 1)[1]
print(result) # 'wallpaper.jpg'
with_slash = '/' + my_str.rsplit('/', 1)[1]
print(with_slash) # '/wallpaper.jpg'
['/my/data/path/images/', 'wallpaper.jpg']
print(my_str.rsplit('/', 1)[1])

I am converting annotated data(JSON) to COCO format while making Tensorflow Object detetction API, this is the error I am facing from last 2 days

import os
import argparse
import json
from labelme import utils
import numpy as np
import glob
import PIL.Image
class labelme2coco(object):
def init(self, labelme_json=["new_images\Train"],
save_json_path="new_images\Train_Coco/coco.json"):
"""
:param labelme_json: the list of all labelme json file paths
:param save_json_path: the path to save new json
"""
self.labelme_json = labelme_json
self.save_json_path = save_json_path
self.images = []
self.categories = []
self.annotations = []
self.label = []
self.annID = 1
self.height = 0
self.width = 0
self.save_json()
def data_transfer(self):
for num, json_file in enumerate(self.labelme_json):
with open(json_file, "r") as fp:
data = json.load(fp)
self.images.append(self.image(data, num))
for shapes in data["shapes"]:
label = shapes["label"].split("_")
if label not in self.label:
self.label.append(label)
points = shapes["points"]
self.annotations.append(self.annotation(points, label, num))
self.annID += 1
# Sort all text labels so they are in the same order across data splits.
self.label.sort()
for label in self.label:
self.categories.append(self.category(label))
for annotation in self.annotations:
annotation["category_id"] = self.getcatid(annotation["category_id"])
def image(self, data, num):
image = {}
img = utils.img_b64_to_arr(data["imageData"])
height, width = img.shape[:2]
img = None
image["height"] = height
image["width"] = width
image["id"] = num
image["file_name"] = data["imagePath"].split("/")[-1]
self.height = height
self.width = width
return image
def category(self, label):
category = {}
category["supercategory"] = label[0]
category["id"] = len(self.categories)
category["name"] = label[0]
return category
def annotation(self, points, label, num):
annotation = {}
contour = np.array(points)
x = contour[:, 0]
y = contour[:, 1]
area = 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
annotation["segmentation"] = [list(np.asarray(points).flatten())]
annotation["iscrowd"] = 0
annotation["area"] = area
annotation["image_id"] = num
annotation["bbox"] = list(map(float, self.getbbox(points)))
annotation["category_id"] = label[0] # self.getcatid(label)
annotation["id"] = self.annID
return annotation
def getcatid(self, label):
for category in self.categories:
if label == category["name"]:
return category["id"]
print("label: {} not in categories: {}.".format(label, self.categories))
exit()
return -1
def getbbox(self, points):
polygons = points
mask = self.polygons_to_mask([self.height, self.width], polygons)
return self.mask2box(mask)
def mask2box(self, mask):
index = np.argwhere(mask == 1)
rows = index[:, 0]
clos = index[:, 1]
left_top_r = np.min(rows) # y
left_top_c = np.min(clos) # x
right_bottom_r = np.max(rows)
right_bottom_c = np.max(clos)
return [
left_top_c,
left_top_r,
right_bottom_c - left_top_c,
right_bottom_r - left_top_r,
]
def polygons_to_mask(self, img_shape, polygons):
mask = np.zeros(img_shape, dtype=np.uint8)
mask = PIL.Image.fromarray(mask)
xy = list(map(tuple, polygons))
PIL.ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)
mask = np.array(mask, dtype=bool)
return mask
def data2coco(self):
data_coco = {}
data_coco["images"] = self.images
data_coco["categories"] = self.categories
data_coco["annotations"] = self.annotations
return data_coco
def save_json(self):
print("save coco json")
self.data_transfer()
self.data_coco = self.data2coco()
print(self.save_json_path)
os.makedirs(
os.path.dirname(os.path.abspath(self.save_json_path)), exist_ok=True
)
json.dump(self.data_coco, open(self.save_json_path, "w"), indent=4)
if name == "main":
parser = argparse.ArgumentParser(
description="labelme annotation to coco data json file."
)
parser.add_argument(
labelme_images="new_images\Train",
help="Directory to labelme images and annotation json files.",
type=str,
)
parser.add_argument(
"--output", help="Output json file path.", default="new_images\Train_Coco/coco.json"
)
args = parser.parse_args()
labelme_json = glob.glob(os.path.join(args.labelme_images, "*.json"))
labelme2coco(labelme_json, args.output)
Error which I am getting
Type-Error and redirecting to argparse library
The tutorial I am following is also attached Train a Mask R-CNN model with the Tensorflow Object Detection API
Try using the below correction:
import argparse
parser = argparse.ArgumentParser(
description="labelme annotation to coco data json file."
)
parser.add_argument(
"labelme_images",
help="Directory to labelme images and annotation json files.",
type=str,
)
parser.add_argument(
"--output", help="Output json file path.", default="new_images\Train_Coco/coco.json"
)
args = parser.parse_args()
labelme_json = glob.glob(os.path.join(args.labelme_images, "*.json"))
labelme2coco(labelme_json, args.output)

Image Mask to apply image filters

I want to calculate variance, gabor and entropy filters to some images, but the images have blank areas that I donĀ“t want to apply the filters. I try to use a np.ma.array option but return this error: "'MaskedArray' object is not callable"
this is the code:
def bandas_img (image, array1, array2):
imagenRGB = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
return cv2.inRange(imagenRGB, array1, array2)
def rescale_by_width(image, target_width, method=cv2.INTER_LANCZOS4):
h = int(round(target_width * image.shape[0] / image.shape[1]))
return cv2.resize(image, (target_width, h), interpolation=method)
#Resized image by width
target_width = 400
#To mask null values
mask_image = True
hue = 20
sat = 57
value = 116
toleranciaH = 150
toleranciaS = 150
toleranciaV = 150
lower = np.array ([hue - toleranciaH, sat - toleranciaS, value - toleranciaV])
upper = np.array ([hue + toleranciaH, sat + toleranciaS, value + toleranciaV])
#working directory where the csv files are
os.chdir("C:/Users/Mariano/Documents/3 - Visual studio code/Prueba filtrar mascara/filtrada") ##ojoooo las barras van /// y no D:/OMAN/BHI TEXTURES/U-2
file_extension = '.png' #Check Extension
all_filenames = [i for i in glob.glob(f"*{file_extension}")]
for f in all_filenames:
image = cv2.imread(f,1)
#resized Image
resized1 = rescale_by_width(image, target_width)
#Set f value (image name)
f = f.replace(".png", "")
#Save Image
plt.imsave(f+"_resized.png", resized1)
#Create mask for null values
if mask_image == True:
mask = bandas_img(resized1, lower, upper)
cv2.imwrite(f+"_mask.png", mask)
resized2 = io.imread(f+"_resized.png", as_gray=True)
resized3 = resized2.copy()
#First Try
resized3[mask == 0] = np.nan
resized3[mask != 0] = resized2[mask != 0]
#Second Try
mask1 = (resized3 == np.nan)
resized_Mask = np.ma.array(resized3, mask = mask1)
#Varianza
k=6
img_mean = ndimage.uniform_filter(resized_Mask, (k, k))
img_sqr_mean = ndimage.uniform_filter(resized_Mask**2, (k, k))
img_var = img_sqr_mean - img_mean**2
img_var[mask == 0] = 1
plt.imsave(f+"_varianza.png", img_var)

Join extracted/splitted patches to reconstruct an image

i used this code below to extract patches from a image.
extract code:
import os
import glob
from PIL import Image
Image.MAX_IMAGE_PIXELS = None # to avoid image size warning
imgdir = "/path/to/image/folder"
filelist = [f for f in glob.glob(imgdir + "**/*.png", recursive=True)]
savedir = "/path/to/image/folder/output"
start_pos = start_x, start_y = (0, 0)
cropped_image_size = w, h = (256, 256)
for file in filelist:
img = Image.open(file)
width, height = img.size
frame_num = 1
for col_i in range(0, width, w):
for row_i in range(0, height, h):
crop = img.crop((col_i, row_i, col_i + w, row_i + h))
name = os.path.basename(file)
name = os.path.splitext(name)[0]
save_to= os.path.join(savedir, name+"_{:03}.png")
crop.save(save_to.format(frame_num))
frame_num += 1
Now i want to reconstruct this imagem from all those patches extracted before, i've tried 2 diffenret codes
so my DB is 120x256x256x3 extracted patches, theres 120 patches to fit in 3840x2048 shape..:
patches = []
for directory_path in glob.glob('D:\join_exemplo'):
for img_path in glob.glob(os.path.join(directory_path, "*.png")):
img = cv2.imread(img_path,1)
patches.append(img)
input_patches = np.array(patches)
first i've tried sklearn.feature_extraction.image importing reconstruct_from_patches_2d, but got a black image:
reconstruct = reconstruct_from_patches_2d(input_patches, input_image)
reconstruct = reconstruct.astype(np.uint8)
Image.fromarray(reconstruct, 'RGB').save(r'D:\join_exemplo\re\re3.png')
also tried, this below but got a grayscale tone pattern image
input_image = (3840,2048,3)
reconstructed_arr = np.zeros(shape=(3840,2048,3))
#%%
>>> step = 256
>>> for x in range(img.shape[0]):
for y in range(img.shape[1]):
x_pos, y_pos = x * step, y * step
reconstructed_arr[x_pos:x_pos + 512, y_pos:y_pos + 512] = img[x, y, 0, ...]
>>> (input_image == reconstructed_arr).all()
True
cv2.imwrite(r'D:\join_exemplo\re\re.png',reconstructed_arr)
Can someone see whats wrong? sorry about my bad english

Why do we have to resize the array when reading the image?

IMG_SIZE = 50
traing_data =[]
def create_training_data():
for c in categories:
path = os.path.join(dir,c)
class_num = categories.index(c) # dog =0, cat =1
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))
traing_data.append([new_array,class_num])
except Exception as e:
pass # Some photos are broken
create_training_data()
X = np.array(X).reshape(-1,IMG_SIZE,IMG_SIZE,1)
y = np.array(y)
Hi guys! I was studying convolutional neural network from youtube and I was wondering why do I have to resize the image array ( new_array = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE)) ) and the X array ( X = np.array(X).reshape(-1,IMG_SIZE,IMG_SIZE,1) ) ?

Categories

Resources