Scaling pixels between -1 and 1 using cv2.Normalize() - python

def preprocess(self):
# Import image
pic1 = self.path
raw_image = cv2.imread(pic1)
#cv2.imshow('Raw image',raw_image)
#cv2.waitKey(0)
# Resize image
dim = (320,180)
resized = cv2.resize(raw_image, dim)
#cv2.imshow('Resized Image',resized)
#cv2.waitKey(0)
# Scale image
scaled = cv2.normalize(resized, None, alpha=-1, beta=1, norm_type=cv2.NORM_MINMAX,dtype=cv2.CV_32F)
#cv2.imshow('Scaled Image',scaled)
#cv2.waitKey(0)
return scaled
I'm trying to scale the pixel values of "raw_image" to within the range -1 to 1 as part of a pre-process for identifying an object using machine learning. Essentially, a camera takes a picture, resizes and scales the image to the same size as the images within a dataset used for training and validating. Then that image is inferred by the model generated using model.fit() to detect what the object in the image actually is.
The question here is: " Is this scaling function correct for putting the pixel values in the range of -1 to 1?" It appears SUPER dark when I use cv2.imshow and I'm afraid the model isn't recognizing it properly.

Related

How to treat different size images in ML pipeline

I'm here asking a general question about image processing applied to a machine learning pipeline. In this post, I will refer to ML as every algorithm that is not deep learning (therefore it doesn't use a neural network).
I'm developing a classifier to catalog different clothes .png images. I have labels (for each image I know the category) so it's a supervised learning problem.
My objective is to use PCA to reduce the problem's dimensionality and then use bag of visual words to perform the classification. I'm using python for this project.
The problem is that each photo has a different size and a different ratio between width and height (therefore I can't only resize them because I wouldn't have a unique height value for each image).
My, inelegant, solution is to fix the width at 200 px and then pad a bunch of zeros rows to each image (each image is a NumPy array of maximum_h rows and each row is width long).
Here the script:
#help function to convert images in array
def get_image(image_path: str, resize=True, w=300):
"""
:param image_path: string, path of the image
:param resize: boolean, if True the image is resized. Default: True
:param w: integer, specify the width of the resized image
:return: numpy array of the greyscale version of the image
"""
try:
image = Image.open(image_path).convert("L")
if resize:
wpercent = (w/float(image.size[0]))
hsize = int((float(image.size[1])*float(wpercent)))
image = image.resize((w,hsize), Image.ANTIALIAS)
#pixel_values = np.array(image.getdata())
return image
except:
#AI19/04442.png corrupted
#AI18/02971.png corrupted
#print(image_path)
return None
def extract_images(paths:list, categories: list, w: int, maximum_h: int):
A = np.zeros([len(paths), w * maximum_h])
y = []
counter = 0
for image_path, label in tqdm(zip(paths, categories)):
im = get_image(image_path, w=w)
if im:
#adapt images to fit
h,w = np.array(im).shape
delta_h = maximum_h-h
zeros_ = np.zeros((delta_h, w), dtype=int)
im = np.concatenate((im, zeros_), axis=0)
A[counter, :] = im.reshape(1, -1)
y.append(label)
counter += 1
else:
continue
return (A,y)
The problem here is the classifier performs badly (20%) because I add a significant amount of zeros to each image that increases the dimensionality but doesn't add information.
Looking at the biggest eigenvectors of the PCA algorithm I see that a lot of information is concentrated in these "padding" area (and this confirm my impression).
Is there a better way to handle different size images in python?

Image pre-procesisng with dataframe

I have about 1000 images in a cvs file. I have already managed to put those images in my Python programm by doing the following:
df = pd.read_csv("./Testes_small.csv")
# Creates the dataframe
training_set = pd.DataFrame({'Images': training_imgs,'Labels': training_labels})
train_dataGen = ImageDataGenerator(rescale=1./255)
train_generator = train_dataGen.flow_from_dataframe(dataframe = training_set, directory="",
x_col="Images", y_col="Labels",
class_mode="categorical",
target_size=(224, 224),batch_size=32)
##Steps to plot the images
imgs,labels = next(train_generator)
for i in range(batch_size): # range de 0 a 31
image = imgs[i]
plt.imshow(image)
plt.show()
So now I have the train_generator variable of type python.keras.preprocessing.image.DataframeIterator Its size is (32,224,224,3).
In the function ImageDataGenerator I want to put my own preprocessing function to resize the images. I want to do this because I have some rectangular images that when resized lose its ratio.
Per examples these images before(upper image) and after(the lower one) resizing:
Clearly the secong image loses shape
I found this function(it's the answer to a previous thread):
def resize_image(self, image: Image, length: int) -> Image:
"""
Resize an image to a square. Can make an image bigger to make it fit or smaller if it doesn't fit. It also crops
part of the image.
:param self:
:param image: Image to resize.
:param length: Width and height of the output image.
:return: Return the resized image.
"""
"""
Resizing strategy :
1) We resize the smallest side to the desired dimension (e.g. 1080)
2) We crop the other side so as to make it fit with the same length as the smallest side (e.g. 1080)
"""
if image.size[0] < image.size[1]:
# The image is in portrait mode. Height is bigger than width.
# This makes the width fit the LENGTH in pixels while conserving the ration.
resized_image = image.resize((length, int(image.size[1] * (length / image.size[0]))))
# Amount of pixel to lose in total on the height of the image.
required_loss = (resized_image.size[1] - length)
# Crop the height of the image so as to keep the center part.
resized_image = resized_image.crop(
box=(0, required_loss / 2, length, resized_image.size[1] - required_loss / 2))
# We now have a length*length pixels image.
return resized_image
else:
# This image is in landscape mode or already squared. The width is bigger than the heihgt.
# This makes the height fit the LENGTH in pixels while conserving the ration.
resized_image = image.resize((int(image.size[0] * (length / image.size[1])), length))
# Amount of pixel to lose in total on the width of the image.
required_loss = resized_image.size[0] - length
# Crop the width of the image so as to keep 1080 pixels of the center part.
resized_image = resized_image.crop(
box=(required_loss / 2, 0, resized_image.size[0] - required_loss / 2, length))
# We now have a length*length pixels image.
return resized_image
I'm trying to insert it like this img_datagen = ImageDataGenerator(rescale=1./255, preprocessing_function = resize_image but it doesn't work because I'm not giving an im. Do you have any ideas on how can I do this?
Check the documentation for providing custom functions to the ImageDataGenerator. It says and quote,
"preprocessing_function: function that will be applied on each input. The function will run after the image is resized and augmented. The function should take one argument: one image (Numpy tensor with rank 3), and should output a Numpy tensor with the same shape."
From the above documentation we can note the following:
When will this function be executed:
after resizing image.
after any data augmentation which has to be done.
Function argument requirements:
only one argument.
this argument is for only one numpy image.
the image should be numpy tensor of rank 3
Function output requirements:
one output image.
should be same shape as input
This last point is really important for your question. Since your function is resizing the image it's output will not be the same shape as input and so you cannot do this directly.
One alternative to get this done is to do your resizing of dataset before passing to ImageDataGenerator.

Resizing non uniform images with precise face location

I work at a studio that does school photos and we are trying to make a script to eliminate the job of cropping each photo to a template. The photos we work with are fairly uniform but they vary in resolution and head position a bit. I took up the mantle of trying to write the script with my fairly limited Python knowledge and through a lot of trial and error and online resources I think I have got most of the way there.
At the moment I am trying to figure out the best way to have the image crop from the NumPy array with the head where I want and I just cant find a good flexible solution. The head needs to be positioned slightly differently for pose 1 and pose 2 so its needs to be easy to change on the fly (Probably going to implement some sort of simple GUI to input stuff like that, but for now I can just change the code).
I also need to be able to change the output resolution of the photo so they are all uniform (2000x2500). Anyone have any ideas?
At the moment this is my current code, it just saves the detected face square:
import cv2
import os.path
import glob
# Cascade path
cascPath = 'haarcascade_frontalface_default.xml'
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
#Check for output folder and create if its not there
if not os.path.exists('output'):
os.makedirs('output')
# Read Images
images = glob.glob('*.jpg')
for c, i in enumerate(images):
image = cv2.imread(i, 1)
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find face(s) using cascade
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1, # size of groups
minNeighbors=5, # How many groups around are detected as face for it to be valid
minSize=(500, 500) # Min size in pixels for face
)
# Outputs number of faces found in image
print('Found {0} faces!'.format(len(faces)))
# Places a rectangle on face
for (x, y, w, h) in faces:
imgCrop = image[y:y+h,x:x+w]
if len(faces) > 0:
#Saves Images to output folder with OG name
cv2.imwrite('output/'+ i, imgCrop)
I can crop using it like this:
# Crop Padding
left = 300
right = 300
top = 400
bottom = 1000
for (x, y, w, h) in faces:
imgCrop = image[y-top:y+h+bottom, x-left:x+w+right]
but that outputs pretty random resolutions and changes based on the image resolution
TL;DR
To set a new resolution with the dimension, you can use cv2.resize. There may be a pixel loss so you can use the interpolation method.
The newly resized image may be in BGR format, so you may need to convert to RGB format.
cv2.resize(src=crop, dsize=(2000, 2500), interpolation=cv2.INTER_LANCZOS4)
crop = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB) # Make sure the cropped image is in RGB format
cv2.imwrite("image-1.png", crop)
Suggestion:
One approach is using python's face-recognition library.
The approach is using two sample images for training.
Predict the next image based on training images.
For instance, The followings are the training images:
We want to predict the faces in the below image:
When we get the facial encodings of the training images and apply to the next image:
import face_recognition
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
# Load a sample picture and learn how to recognize it.
first_image = face_recognition.load_image_file("images/ex.jpg")
first_face_encoding = face_recognition.face_encodings(first_image)[0]
# Load a second sample picture and learn how to recognize it.
second_image = face_recognition.load_image_file("images/index.jpg")
sec_face_encoding = face_recognition.face_encodings(second_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
first_face_encoding,
sec_face_encoding
]
print('Learned encoding for', len(known_face_encodings), 'images.')
# Load an image with an unknown face
unknown_image = face_recognition.load_image_file("images/babes.jpg")
# Find all the faces and face encodings in the unknown image
face_locations = face_recognition.face_locations(unknown_image)
face_encodings = face_recognition.face_encodings(unknown_image, face_locations)
# Convert the image to a PIL-format image so that we can draw on top of it with the Pillow library
# See http://pillow.readthedocs.io/ for more about PIL/Pillow
pil_image = Image.fromarray(unknown_image)
# Create a Pillow ImageDraw Draw instance to draw with
draw = ImageDraw.Draw(pil_image)
# Loop through each face found in the unknown image
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
draw.rectangle(((left, top), (right, bottom)), outline=(0, 0, 255), width=5)
# Remove the drawing library from memory as per the Pillow docs
del draw
# Display the resulting image
plt.imshow(pil_image)
plt.show()
The output will be:
The above is my suggestion. When you create a new resolution with the current image, there will be a pixel loss. Therefore you need to use an interpolation method.
For instance: after finding the face locations, select the coordinates in the original image.
# Add after draw.rectangle function.
crop = unknown_image[top:bottom, left:right]
Set new resolution with the size 2000 x 2500 and interpolation with CV2.INTERN_LANCZOS4.
Possible Question: Why CV2.INTERN_LANCZOS4?
Of course, you can select whatever you like, but in this post CV2.INTERN_LANCZOS4 was suggested.
cv2.resize(src=crop, dsize=(2000, 2500), interpolation=cv2.INTER_LANCZOS4)
Save the image
crop = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB) # Make sure the cropped image is in RGB format
cv2.imwrite("image-1.png", crop)
Outputs are around 4.3 MB Therefore I can't display in here.
From the final result, we clearly see and identify faces. The library precisely finds the faces in the image.
Here what you can do:
Either you can use the training images of your own-set, or you can use the example above.
Apply the face-recognition function for each image, using the trained face-locations and save the results in the directory.
here is how I got it to crop how I wanted, this is added right below the "output number of faces" function
#Get the face postion and output values into variables, might not be needed but I did it
for (x, y, w, h) in faces:
xdis = x
ydis = y
w = w
h = h
#Get scale value by dividing wanted head hight by detected head hight
ws = 600/w
hs = 600/h
#scale image to get head to right size, uses bilinear interpolation by default
scale = cv2.resize(image,(0,0),fx=hs,fy=ws)
#calculate head postion for given values
sxdis = int(xdis*ws) #applying scale to x distance and turning it into a integer
sydis = int(ydis*hs) #applying scale to y distance and turning it into a integer
sycent = sydis+300 #adding half head hight to get center
ystart = sycent-700 #subtract where you want the head center to be in pixels, this is for the vertical
yend = ystart+2500 #Add whatever you want vertical resolution to be
xcent = sxdis+300 #adding half head hight to get center
xstart = xcent-1000 #subtract where you want the head center to be in pixels, this is for the horizontal
xend = xstart+2000 #add whatever you want the horizontal resolution to be
#Crop the image
cropped = scale[ystart:yend, xstart:xend]
Its a mess but it works exactly how I wanted it to work.
ended up going with openCV instead of switching to python-Recognition because of speed but I might switch over if I can get multithreading to work in python-recognition.

Resample DICOM Images to Size and Spacing and align to same Origin

I have a set of 4 DICOM CT Volumes which I am reading with SimpleITK ImageSeriesReader. Two of the images represent the CT of patient before and after the surgery. The other two images are binary segmentation masks segmented on the former 2 CT images. The segmentations are a ROI of their source CT.
All the 4 CT images, have different Size, Spacing, Origin and Direction. I have tried applying this GitHub gist https://gist.github.com/zivy/79d7ee0490faee1156c1277a78e4a4c4 to resize my images to 512x512x512 and Spacing 1x1x1. However, it doesn't place the images at the correct location. The segmented structure is always placed in the center of the CT image, instead of the correct location, as you can see from the pictures.
This my "raw" DICOM Image with its tumor segmentation (orange blob).
This is after the "resizing" algorithm and writing to disk (same image as before, just the tumor is colored green blob because inconsistency):
Code used for resampling all 4 DICOM Volumes to the same dimensions:
def resize_resample_images(images):
""" Resize all the images to the same dimensions, spacing and origin.
Usage: newImage = resize_image(source_img_plan, source_img_validation, ROI(ablation/tumor)_mask)
1. translate to same origin
2. largest number of slices and interpolate the others.
3. same resolution 1x1x1 mm3 - resample
4. (physical space)
Slice Thickness (0018,0050)
ImagePositionPatient (0020,0032)
ImageOrientationPatient (0020,0037)
PixelSpacing (0028,0030)
Frame Of Reference UID (0020,0052)
"""
# %% Define tuple to store the images
tuple_resized_imgs = collections.namedtuple('tuple_resized_imgs',
['img_plan',
'img_validation',
'ablation_mask',
'tumor_mask'])
# %% Create Reference image with zero origin, identity direction cosine matrix and isotropic dimension
dimension = images.img_plan.GetDimension() #
reference_direction = np.identity(dimension).flatten()
reference_size = [512] * dimension
reference_origin = np.zeros(dimension)
data = [images.img_plan, images.img_validation, images.ablation_mask, images.tumor_mask]
reference_spacing = np.ones(dimension) # resize to isotropic size
reference_image = sitk.Image(reference_size, images.img_plan.GetPixelIDValue())
reference_image.SetOrigin(reference_origin)
reference_image.SetSpacing(reference_spacing)
reference_image.SetDirection(reference_direction)
reference_center = np.array(
reference_image.TransformContinuousIndexToPhysicalPoint(np.array(reference_image.GetSize()) / 2.0))
#%% Paste the GT segmentation masks before transformation
tumor_mask_paste = (paste_roi_image(images.img_plan, images.tumor_mask))
ablation_mask_paste = (paste_roi_image(images.img_validation, images.ablation_mask))
images.tumor_mask = tumor_mask_paste
images.ablation_mask = ablation_mask_paste
# %% Apply transforms
data_resized = []
for idx,img in enumerate(data):
transform = sitk.AffineTransform(dimension) # use affine transform with 3 dimensions
transform.SetMatrix(img.GetDirection()) # set the cosine direction matrix
# TODO: check translation when computing the segmentations
transform.SetTranslation(np.array(img.GetOrigin()) - reference_origin) # set the translation.
# Modify the transformation to align the centers of the original and reference image instead of their origins.
centering_transform = sitk.TranslationTransform(dimension)
img_center = np.array(img.TransformContinuousIndexToPhysicalPoint(np.array(img.GetSize()) / 2.0))
centering_transform.SetOffset(np.array(transform.GetInverse().TransformPoint(img_center) - reference_center))
centered_transform = sitk.Transform(transform)
centered_transform.AddTransform(centering_transform)
# Using the linear interpolator as these are intensity images, if there is a need to resample a ground truth
# segmentation then the segmentation image should be resampled using the NearestNeighbor interpolator so that
# no new labels are introduced.
if (idx==1 or idx==2): # temporary solution to resample the GT image with NearestNeighbour
resampled_img = sitk.Resample(img, reference_image, centered_transform, sitk.sitkNearestNeighbor, 0.0)
else:
resampled_img = sitk.Resample(img, reference_image, centered_transform, sitk.sitkLinear, 0.0)
# append to list
data_resized.append(resampled_img)
# assuming the order stays the same, reassigng back to tuple
resized_imgs = tuple_resized_imgs(img_plan=data_resized[0],
img_validation=data_resized[1],
ablation_mask=data_resized[2],
tumor_mask=data_resized[3])
Code for "pasting" the ROI segmentations images into a correct size. Might be redundant.:
def paste_roi_image(image_source, image_roi):
""" Resize ROI binary mask to size, dimension, origin of its source/original img.
Usage: newImage = paste_roi_image(source_img_plan, roi_mask)
"""
newSize = image_source.GetSize()
newOrigin = image_source.GetOrigin()
newSpacing = image_roi.GetSpacing()
newDirection = image_roi.GetDirection()
if image_source.GetSpacing() != image_roi.GetSpacing():
print('the spacing of the source and derived mask differ')
# re-cast the pixel type of the roi mask
pixelID = image_source.GetPixelID()
caster = sitk.CastImageFilter()
caster.SetOutputPixelType(pixelID)
image_roi = caster.Execute(image_roi)
# black 3D image
outputImage = sitk.Image(newSize, image_source.GetPixelIDValue())
outputImage.SetOrigin(newOrigin)
outputImage.SetSpacing(newSpacing)
outputImage.SetDirection(newDirection)
# transform from physical point to index the origin of the ROI image
# img_center = np.array(img.TransformContinuousIndexToPhysicalPoint(np.array(img.GetSize()) / 2.0))
destinationIndex = outputImage.TransformPhysicalPointToIndex(image_roi.GetOrigin())
# paste the roi mask into the re-sized image
pasted_img = sitk.Paste(outputImage, image_roi, image_roi.GetSize(), destinationIndex=destinationIndex)
return pasted_img

resizing images based on distance between 2 objects in the image

I have multiple images, each image has 2 objects,but the camera capturing those images has been moving forward and backward, I want to resize all the images so that the distance between the 2 objects can be the same in all the images.
Can it be done in python and opencv.
currently I'm trying to get the ratio between the distances of 2 images, and then resize, but I can't get the right answer
code:
resize_factor = default_dist_bet_objects / current_image_distance
res = cv2.resize(image, fx = resize_facor, fy= resize_factor,
interpolation=cv2.INTER_CUBIC)
then i either crop or paste on another image so that all sizes (300,400)

Categories

Resources