Optimal way to resize an image with OpenCV Python - python

I want to resize an image based on a percentage and keep it as close to the original with minimal noise and distortion. The resize could be up or down as in I could scale to 5% the size of the original image or 500% (or any other value these are given as example)
This is what i tried and i'd need absolute minimal changes in the resized image since i'll be using it in comparison with other images
def resizing(main,percentage):
main = cv2.imread(main)
height = main.shape[ 0] * percentage
width = crop.shape[ 1] * percentage
dim = (width,height)
final_im = cv2.resize(main, dim, interpolation = cv2.INTER_AREA)
cv2.imwrite("C:\\Users\\me\\nature.jpg", final_im)

You can use this syntax of cv2.resize:
cv2.resize(image,None,fx=int or float,fy=int or float)
fx depends on width
fy depends on height
You can put the second argument None or (0,0)
Example:
img = cv2.resize(oriimg,None,fx=0.5,fy=0.5)
Note:
0.5 means 50% of image to be scaling

I think you're trying to resize and maintain aspect ratio. Here's a function to upscale or downscale an image based on percentage
Original image example
Resized image to 0.5 (50%)
Resized image to 1.3 (130%)
import cv2
# Resizes a image and maintains aspect ratio
def maintain_aspect_ratio_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# Grab the image size and initialize dimensions
dim = None
(h, w) = image.shape[:2]
# Return original image if no need to resize
if width is None and height is None:
return image
# We are resizing height if width is none
if width is None:
# Calculate the ratio of the height and construct the dimensions
r = height / float(h)
dim = (int(w * r), height)
# We are resizing width if height is none
else:
# Calculate the ratio of the width and construct the dimensions
r = width / float(w)
dim = (width, int(h * r))
# Return the resized image
return cv2.resize(image, dim, interpolation=inter)
if __name__ == '__main__':
image = cv2.imread('1.png')
cv2.imshow('image', image)
resize_ratio = 1.2
resized = maintain_aspect_ratio_resize(image, width=int(image.shape[1] * resize_ratio))
cv2.imshow('resized', resized)
cv2.imwrite('resized.png', resized)
cv2.waitKey(0)

Related

How to remove image background using Canny

So I got this image with a busy background and I want to remove the device inside it:
I wrote a basic Canny script to get a highlight of the device:
import cv2
# Read the original image
img = cv2.imread('antigen.png')
# Convert to graycsale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Blur the image for better edge detection
img_blur = cv2.GaussianBlur(img_gray, (3,3), 0)
# Canny Edge Detection
edges = cv2.Canny(image=img_blur, threshold1=100, threshold2=200) # Canny Edge Detection
# Display Canny Edge Detection Image
cv2.imshow('Canny Edge Detection', edges)
cv2.waitKey(0)
cv2.destroyAllWindows()
How can I use the outline from the canny image to basically cut out the device from the original pic from its background?
You can use cv2.matchTemplate to perform template matching by finding the best matched ROI.
import cv2
import numpy as np
# Resizes a image and maintains aspect ratio
def maintain_aspect_ratio_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# Grab the image size and initialize dimensions
dim = None
(h, w) = image.shape[:2]
# Return original image if no need to resize
if width is None and height is None:
return image
# We are resizing height if width is none
if width is None:
# Calculate the ratio of the height and construct the dimensions
r = height / float(h)
dim = (int(w * r), height)
# We are resizing width if height is none
else:
# Calculate the ratio of the 0idth and construct the dimensions
r = width / float(w)
dim = (width, int(h * r))
# Return the resized image
return cv2.resize(image, dim, interpolation=inter)
# Load template, convert to grayscale, perform canny edge detection
template = cv2.imread('template.jpg')
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
template = cv2.Canny(template, 50, 200)
(tH, tW) = template.shape[:2]
cv2.imshow("template", template)
# Load original image, convert to grayscale
original_image = cv2.imread('1AWH8.jpg')
(img_height, img_width)=original_image.shape[:2]
final = original_image.copy()
gray = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
found = None
# Dynamically rescale image for better template matching
for scale in np.linspace(0.2, 1.0, 20)[::-1]:
# Resize image to scale and keep track of ratio
resized = maintain_aspect_ratio_resize(gray, width=int(gray.shape[1] * scale))
r = gray.shape[1] / float(resized.shape[1])
# Stop if template image size is larger than resized image
if resized.shape[0] < tH or resized.shape[1] < tW:
break
# Detect edges in resized image and apply template matching
canny = cv2.Canny(resized, 50, 200)
detected = cv2.matchTemplate(canny, template, cv2.TM_CCOEFF)
(_, max_val, _, max_loc) = cv2.minMaxLoc(detected)
# Higher correlation means better match
if found is None or max_val > found[0]:
found = (max_val, max_loc, r)
# Compute coordinates of bounding box
(_, max_loc, r) = found
(start_x, start_y) = (int(max_loc[0] * r), int(max_loc[1] * r))
(end_x, end_y) = (int((max_loc[0] + tW) * r), int((max_loc[1] + tH) * r))
# Draw bounding box on ROI to remove
cv2.rectangle(original_image, (start_x, start_y), (end_x, end_y), (0,255,0), 2)
cv2.imshow('detected', original_image)
# Erase unwanted ROI (Fill ROI with white)
cv2.rectangle(final, (start_x, start_y), (end_x, end_y), (255,255,255), -1)
cv2.imwrite('final.jpg', final)
cv2.waitKey(0)

How to remove specific tag/sticker/object from images using OpenCV?

I have hundreds of images of jewelry products. Some of them have "best-seller" tag on them. The position of the tag is different from image to image. I want iterate over all images, and if an image has this tag then remove it. The resulted image will render the background over the removed object's pixels.
Example of an image with Tag/sticker/object:
Tag/sticker/object to remove:
import numpy as np
import cv2 as cv
img = plt.imread('./images/001.jpg')
sticker = plt.imread('./images/tag.png',1)
diff_im = cv2.absdiff(img, sticker)
I want the resulted image to be like this:
Here's an method using a modified scale-invariant Template Matching approach. The overall strategy:
Load template, convert to grayscale, perform canny edge detection
Load original image, convert to grayscale
Continuously rescale image, apply template matching using edges, and keep track of the correlation coefficient (higher value means better match)
Find coordinates of best fit bounding box then erase unwanted ROI
To begin, we load in the template and perform Canny edge detection. Applying template matching with edges instead of the raw image removes color variation differences and gives a more robust result. Extracting edges from template image:
Next we continuously scale down the image and apply template matching on our resized image. I maintain aspect ratio with each resize using a old answer. Here's a visualization of the strategy
The reason we resize the image is because standard template matching using cv2.matchTemplate will not be robust and may give false positives if the dimensions of the template and the image do not match. To overcome this dimension issue, we use this modified approach:
Continuously resize the input image at various smaller scales
Apply template matching using cv2.matchTemplate and keep track of the largest correlation coefficient
The ratio/scale with the largest correlation coefficient will have the best matched ROI
Once the ROI is obtained, we can "delete" the logo by filling in the rectangle with white using
cv2.rectangle(final, (start_x, start_y), (end_x, end_y), (255,255,255), -1)
Detected -> Removed
import cv2
import numpy as np
# Resizes a image and maintains aspect ratio
def maintain_aspect_ratio_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# Grab the image size and initialize dimensions
dim = None
(h, w) = image.shape[:2]
# Return original image if no need to resize
if width is None and height is None:
return image
# We are resizing height if width is none
if width is None:
# Calculate the ratio of the height and construct the dimensions
r = height / float(h)
dim = (int(w * r), height)
# We are resizing width if height is none
else:
# Calculate the ratio of the 0idth and construct the dimensions
r = width / float(w)
dim = (width, int(h * r))
# Return the resized image
return cv2.resize(image, dim, interpolation=inter)
# Load template, convert to grayscale, perform canny edge detection
template = cv2.imread('template.png')
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
template = cv2.Canny(template, 50, 200)
(tH, tW) = template.shape[:2]
cv2.imshow("template", template)
# Load original image, convert to grayscale
original_image = cv2.imread('1.png')
final = original_image.copy()
gray = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
found = None
# Dynamically rescale image for better template matching
for scale in np.linspace(0.2, 1.0, 20)[::-1]:
# Resize image to scale and keep track of ratio
resized = maintain_aspect_ratio_resize(gray, width=int(gray.shape[1] * scale))
r = gray.shape[1] / float(resized.shape[1])
# Stop if template image size is larger than resized image
if resized.shape[0] < tH or resized.shape[1] < tW:
break
# Detect edges in resized image and apply template matching
canny = cv2.Canny(resized, 50, 200)
detected = cv2.matchTemplate(canny, template, cv2.TM_CCOEFF)
(_, max_val, _, max_loc) = cv2.minMaxLoc(detected)
# Uncomment this section for visualization
'''
clone = np.dstack([canny, canny, canny])
cv2.rectangle(clone, (max_loc[0], max_loc[1]), (max_loc[0] + tW, max_loc[1] + tH), (0,255,0), 2)
cv2.imshow('visualize', clone)
cv2.waitKey(0)
'''
# Keep track of correlation value
# Higher correlation means better match
if found is None or max_val > found[0]:
found = (max_val, max_loc, r)
# Compute coordinates of bounding box
(_, max_loc, r) = found
(start_x, start_y) = (int(max_loc[0] * r), int(max_loc[1] * r))
(end_x, end_y) = (int((max_loc[0] + tW) * r), int((max_loc[1] + tH) * r))
# Draw bounding box on ROI to remove
cv2.rectangle(original_image, (start_x, start_y), (end_x, end_y), (0,255,0), 2)
cv2.imshow('detected', original_image)
# Erase unwanted ROI (Fill ROI with white)
cv2.rectangle(final, (start_x, start_y), (end_x, end_y), (255,255,255), -1)
cv2.imshow('final', final)
cv2.waitKey(0)
Use cv.matchTemplate.
An example is provided in the documentation.
After finding the object just draw the rectangle with a negative thickness to have it filled in white.

Get size and position of rotated rectangle after using cv2.resize

I have an image with e.g. Width=999 and Height=767. I know the contour of my ROI and have used rect=cv2.minAreaRect() to get the CenterPosition, Width, Height and Angle of the rotated rectangle around it. Now I need to resize the image to the size of another image with e.g. Width=4096 and Height=2160. So far I am using cv2.resize() to do so.
My problem now is, that of course also the boxPoints of my rectangle take place somewhere else and the data in rect about CenterPosition, Width, Height and Angle of the rotated rectangle around the now resized ROI is not updated and so false. I have tried different workarounds, but didn't find any solution yet.
Here is my code:
import numpy as np
import cv2
#Create black image
img = np.zeros((767, 999, 3), np.uint8)
#Turn ROI to white
cv2.fillConvexPoly(img, np.array(ROI_contour), (255, 255, 255))
#Get Width, Height, and Angle of rectangle around ROI
rect = cv2.minAreaRect(np.array(ROI_contour))
#Draw rotated rectangle in red
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box], 0, (0,0,255), 1)
#Resize img to new size
img_resized = cv2.resize(img, (4096, 2160), interpolation=cv2.INTER_AREA)
Here is how img e.g. could look like:
img with ROI in white before resizing - CenterPosition, Width, Height and Angle of ROI is known by rect.
How can I get new Width, Height and Angle of the resized ROI?
This is simple unitary method.
In your example, h_old = 767, w_old = 999; h_new = 4096, w_new = 2160.
h_ratio = h_new / h_old = 5.34, w_ratio = w_new / w_old = 2.16
Say the center_position, width and height of the rectangle found in old image is: (old_center_x, old_center_y), old_rect_width and old_rect_height, respectively.
Then the new values would become:
(old_center_x * w_ratio, old_center_y * h_ratio), old_rect_width * w_ratio, old_rect_height * h_ratio, respectively.
Since the aspect ratio of the two images is also not the same,
old_aspect_ratio = 999/767 = 1.30, new_aspect_ratio = 2160 / 4096 = 0.52, you need to multiply this factor with the new dimensions too.

TypeError: dst is not a numpy array, neither a scalar - resize jpg python

I'm trying to resize an image for further perspective treatments and I'm getting this error.
File "/home/passerin/Documents/tesis/Project/test/scanner2/scan2.py", line 79, in resize
resized = cv2.resize(img, (height, width), interpolation)
TypeError: dst is not a numpy array, neither a scalar
here's the code...
# load an image
flat_object=cv2.imread("/home/passerin/Documents/tesis/Project/test/scanner2/images/personal-foto-5.png")
# resize the image
flat_object_resized = resize(flat_object, height=600)
# make a copy
flat_object_resized_copy = flat_object.copy()
#convert to HSV color scheme
flat_object_resized_hsv = cv2.cvtColor(flat_object_resized_copy,
cv2.COLOR_BGR2HSV)
# split HSV to three chanels
hue, saturation, value = cv2.split(flat_object_resized_hsv)
this is where the error shows up.
def resize(img, width=None, height=None, interpolation=cv2.INTER_AREA):
global ratio
w, h, _ = img.shape
if width is None and height is None:
return img
elif width is None:
ratio = height / h
width = int(w * ratio)
# print(width)
resized = cv2.resize(img, (height, width), interpolation)
return resized
else:
ratio = width / w
height = int(h * ratio)
# print(height)
resized = cv2.resize(img, (height, width), interpolation)
return resized
You are calling with positional arguments but according to the argument specifcation you left some out - it thinks your third positional argument is for the dst parameter. Even though parameters/arguments are optional if you don't supply them with their keyword, the function expects them in the order given in the argspec. Try calling it with keyword arguments
cv2.resize(src = img, dsize = (height, width), interpolation = interpolation)
or just
cv2.resize(img, (height, width), interpolation = interpolation)

How do I make rectangular image squared using OpenCV and Python?

I have all sorts of images of rectangular shape. I need to modify them to uniform square shape (different size ok).
For that I have to layer it on top of larger squared shape.
Background is black.
I figured it to the point when I need to layer 2 images:
import cv2
import numpy as np
if 1:
img = cv2.imread(in_img)
#get size
height, width, channels = img.shape
print (in_img,height, width, channels)
# Create a black image
x = height if height > width else width
y = height if height > width else width
square= np.zeros((x,y,3), np.uint8)
cv2.imshow("original", img)
cv2.imshow("black square", square)
cv2.waitKey(0)
How do I stack them on top of each other so original image is centered vertically and horizontally on top of black shape?
I figured it. You need to "broadcast into shape":
square[(y-height)/2:y-(y-height)/2, (x-width)/2:x-(x-width)/2] = img
Final draft:
import cv2
import numpy as np
if 1:
img = cv2.imread(in_img)
#get size
height, width, channels = img.shape
print (in_img,height, width, channels)
# Create a black image
x = height if height > width else width
y = height if height > width else width
square= np.zeros((x,y,3), np.uint8)
#
#This does the job
#
square[int((y-height)/2):int(y-(y-height)/2), int((x-width)/2):int(x-(x-width)/2)] = img
cv2.imwrite(out_img,square)
cv2.imshow("original", img)
cv2.imshow("black square", square)
cv2.waitKey(0)
You can use numpy.vstack to stack your images vertically and numpy.hstack to stack your images horizontally.
Please mark answered if you this resolves your problem.

Categories

Resources