How to rotate bounding box in open CV and crop it (python) - python

I have the coordinates of a rectangle (XMIN,YMIN,XMAX & YMAX) on a particular image. I wish to rotate the rectangle at a particular angle and then crop it from the image.
How do I do that??
For example this image.i have got the output bounding box appearing on the left side (plotted it using XMIN,YMIN,XMAX & YMAX). I want to rotate it as per the image on the right side and then crop it.
Can someone provide the way to get this output with a sample code or point me to a link with the explanation

Here is one way in Python/OpenCV.
Read the input
Convert to HSV
Do color thresholding on the green box
Get the outer contour
Print the bounding box
Rotate the image by 10 deg clocwise
Convert that image to HSV
Do color thresholding on the rotated green box
Get the outer contour
Create a black image with the white filled contour
Get the white pixel coordinates
Get the minAreaRect from the coordinates
Get the vertices of the rotated rectangle
Draw the rotated rectangle outline on the rotated image
Input:
import cv2
import numpy as np
from scipy import ndimage
# load image
img = cv2.imread("berry.png")
# convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# threshold using inRange or green
range1 = (20,200,170)
range2 = (80,255,255)
thresh = cv2.inRange(hsv,range1,range2)
# get bounding box coordinates from the one outer contour
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
x,y,w,h = cv2.boundingRect(contours[0])
print("bounding_box(x,y,w,h):",x,y,w,h)
# rotate image by 10 degree clockwise
rotated = img.copy()
rotated = ndimage.rotate(img, -10, cval=255)
# convert rotated to hsv
hsv_rotated = cv2.cvtColor(rotated, cv2.COLOR_BGR2HSV)
# threshold using inRange or green
range1 = (20,200,170)
range2 = (80,255,255)
thresh_rotated = cv2.inRange(hsv_rotated,range1,range2)
# get bounding box coordinates from the one outer contour
contours = cv2.findContours(thresh_rotated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
# draw white filled contour on black background
mask = np.zeros_like(thresh_rotated)
cv2.drawContours(mask, [contours[0]], -1, (255), -1)
# get coordinates of white pixels in mask
coords = np.column_stack(np.where(mask.transpose() > 0))
# get rotated rectangle
rotrect = cv2.minAreaRect(coords)
# rotated rectangle box points
box = np.int0(cv2.boxPoints(rotrect))
print("rotate_box_corners:\n",box)
# draw rotated rectangle on rotated image
result = rotated.copy()
cv2.polylines(result, [box], True, (0,0,255), 1)
# write result to disk
cv2.imwrite("berry_thresh.png", thresh)
cv2.imwrite("berry_rotated.png", rotated)
cv2.imwrite("berry_thresh_rotated.png", thresh_rotated)
cv2.imwrite("berry_mask.png", mask)
cv2.imwrite("berry_rotated_box.png", result)
# display results
cv2.imshow("THRESH", thresh)
cv2.imshow("ROTATED", rotated)
cv2.imshow("THRESH_ROT", thresh_rotated)
cv2.imshow("MASK", mask)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold of green lines in input:
Rotated input:
Threshold of green lines in rotated image:
Filled threshold:
Result showing rotated rectangle on rotated image:
Input Bounding Box:
bounding_box(x,y,w,h): 12 13 212 124
Output Vertices:
rotate_box_corners:
[[222 172]
[ 14 136]
[ 35 14]
[243 51]]

You can use a rotation matrix to rotate both the images and the bounding boxes.
Steps:
Generate a rotation matrix
Use OpenCV warpAffine to rotate the
image
Rotate the 4 corners of the bounding box using the same
rotation matrix
Read about getRotationMatrix2D and warpAffine
img_path = '' #The image path
bb = [] #XMIN,YMIN,XMAX,YMAX
angle = 3 #Rotation angle in degrees, +ve is counter-clockwise
bb = np.array(((bb[0],bb[1]),(bb[2],bb[1]),(bb[2],bb[3]),(bb[0],bb[3]))) #Get all 4 coordinates of the box
img = cv2.imread(img_path) #Read the img
center = (img.shape[0]//2,img.shape[1]//2) #Get the center of the image
rotMat = cv2.getRotationMatrix2D(center,angle,1.0) #Get the rotation matrix, its of shape 2x3
img_rotated = cv2.warpAffine(img,rotMat,img.shape[1::-1]) #Rotate the image
bb_rotated = np.vstack((bb.T,np.array((1,1,1,1)))) #Convert the array to [x,y,1] format to dot it with the rotMat
bb_rotated = np.dot(rotMat,bb_rotated).T #Perform Dot product and get back the points in shape of (4,2)
#Plot the original image and bb
plt.imshow(img)
plt.plot(
np.append(bb[:,0],bb[0,0]),
np.append(bb[:,1],bb[0,1])
)
plt.show()
#Plot the rotated image and bb
plt.imshow(img_rotated)
plt.plot(
np.append(bb_rotated[:,0],bb_rotated[0,0]),
np.append(bb_rotated[:,1],bb_rotated[0,1])
)
plt.show()

Related

How to find roi_corners based on lower and upper pink color range so it can be blurred in python with opencv

I am using hardcoded values in roi_corners to blur number plate of car which is in pink color. I want to find out roi_corners by detecting lower and upper pink color range so that it can be blurred automatically by detecting location of pink color. I am using below code which works fine, only need help in finding roi_corners programmatically based on pink color lower and upper range. Pink color range is provided below for your help.
lower_color = np.array([158, 127, 0])
upper_color = np.array([179, 255, 255])
Please find below code which I am using
import cv2 as cv
import numpy as np
# Here I define the list of vertices of an example polygon ROI:
roi_corners = np.array([[(34,188),(30,214),(80,227),(82,200)]],dtype = np.int32)
print ('print roi_corners ')
print (roi_corners)
print (type (roi_corners)) # <class 'numpy.ndarray'>
# Read the original Image:
image = cv.imread('image_new.jpeg')
# create a blurred copy of the entire image:
blurred_image = cv.GaussianBlur(image,(43, 43), 30)
# create a mask for the ROI and fill in the ROI with (255,255,255) color :
mask = np.zeros(image.shape, dtype=np.uint8)
channel_count = image.shape[2]
ignore_mask_color = (255,)*channel_count
cv.fillPoly(mask, roi_corners, ignore_mask_color)
# create a mask for everywhere in the original image except the ROI, (hence mask_inverse) :
mask_inverse = np.ones(mask.shape).astype(np.uint8)*255 - mask
# combine all the masks and above images in the following way :
final_image = cv.bitwise_and(blurred_image, mask) + cv.bitwise_and(image, mask_inverse)
cv.imshow("image", image)
cv.imshow("final_image", final_image)
cv.waitKey()
cv.destroyAllWindows()
Here is one way to get the bounds of the pink on the license plate in Python OpenCV.
- Read the input
- Threshold on the pink
- Apply morphology to clean it up
- Get the contour
- Get the rotated rectangle corners from the contour
- Draw the rotated rectangle on the input image
- Save the results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread("pink_license.jpg")
# get color bounds of pink
lower =(130,0,220) # lower bound for each channel
upper = (170,255,255) # upper bound for each channel
# create the mask and use it to change the colors
thresh = cv2.inRange(img, lower, upper)
# apply morphology
kernel = np.ones((3,3), np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = np.ones((7,7), np.uint8)
morph = cv2.morphologyEx(morph, cv2.MORPH_DILATE, kernel)
# get contour
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
cntr = contours[0]
# get rotated rectangle from contour
rot_rect = cv2.minAreaRect(cntr)
box = cv2.boxPoints(rot_rect)
box = np.int0(box)
print(box)
# draw rotated rectangle on copy of img
rot_bbox = img.copy()
cv2.drawContours(rot_bbox,[box],0,(0,255,0),1)
# write img with red rotated bounding box to disk
cv2.imwrite("pink_license_thresh.jpg", thresh)
cv2.imwrite("pink_license_morph.jpg", morph)
cv2.imwrite("pink_license_rot_rect.png", rot_bbox)
# display it
cv2.imshow("THRESHOLD", thresh)
cv2.imshow("MORPH", morph)
cv2.imshow("BBOX", rot_bbox)
cv2.waitKey(0)
Threshold image:
Morphology cleaned image:
Green Rotated Rectangle on input:
Corner Coordinates:
[[ 74 212]
[ 39 209]
[ 40 197]
[ 75 200]]

to detect patches in binary images using opencv python

I want to detect all the patches in the enter image description hereimage, I attached the code used to detect them:
import cv2
import numpy as np
import matplotlib.pyplot as plt
image=cv2.imread("bw2.jpg",0)
# convert to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# create a binary thresholded image
_, binary = cv2.threshold(gray, 0, 500, cv2.THRESH_BINARY_INV)
# show it
plt.imshow(gray, cmap="gray")
plt.show()
# find the contours from the thresholded image
contours, hierarchy = cv2.findContours(gray, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
print("contours:",contours)
# draw all contours
for c in contours:
if cv2.contourArea(c) < 3000:
continue
(x, y, w, h) = cv2.boundingRect(c)
#cv2.rectangle(image, (x,y), (x+w,y+h), (0, 255, 0), 2)
## BEGIN - draw rotated rectangle
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(image,[box],0,(255,51,255),2)
# show the image with the drawn contours
plt.imshow(image)
#plt.imshow(im3)
cv2.imwrite("detectImg2.png",image)
plt.show()
I get output image as hereenter image description here
I want to detect all of them, can anyone tell me how to achieve this I new to image processing
Here is how I would extract and rotate each blob in your image using Python OpenCV.
Read the input
Convert to gray
Threshold
Apply morphology open and close to clean small spots
Get all the external contours
Loop over each contour and do the following:
Draw the contour on a copy of the input image
Get the rotated rectangle of the contour and extract its center, dimensions and rotation angle
Get the corners of the rotated rectangle
Draw the rotated rectangle on another copy of the input
Correct the rotation angle for image unrotation
Generate a mask image with the filled rotated rectangle
Apply the mask image to the morphology cleaned image to remove near-by other white regions
Get the affine warp matrix using the center and corrected rotation angle
Unrotated the the masked image using warpAffine
Get the contour of the one blob in the unrotated image
Get the contours bounding box
Crop the masked image (or alternately crop the input image)
Save the cropped image
Exit the loop
Save the contour and rotrect images
Input:
import cv2
import numpy as np
image = cv2.imread("bw2.jpg")
hh, ww = image.shape[:2]
# convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# create a binary thresholded image
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# apply morphology
kernel = np.ones((7,7), np.uint8)
clean = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = np.ones((13,13), np.uint8)
clean = cv2.morphologyEx(clean, cv2.MORPH_CLOSE, kernel)
# get external contours
contours = cv2.findContours(clean, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
contour_img = image.copy()
rotrect_img = image.copy()
i = 1
for c in contours:
# draw contour on input
cv2.drawContours(contour_img,[c],0,(0,0,255),2)
# get rotated rectangle from contour
# get its dimensions
# get angle relative to horizontal from rotated rectangle
rotrect = cv2.minAreaRect(c)
(center), (width,height), angle = rotrect
box = cv2.boxPoints(rotrect)
boxpts = np.int0(box)
# draw rotated rectangle on copy of image
cv2.drawContours(rotrect_img,[boxpts],0,(0,255,0),2)
# from https://www.pyimagesearch.com/2017/02/20/text-skew-correction-opencv-python/
# the `cv2.minAreaRect` function returns values in the
# range [-90, 0); as the rectangle rotates clockwise the
# returned angle tends to 0 -- in this special case we
# need to add 90 degrees to the angle
if angle < -45:
angle = -(90 + angle)
# otherwise, check width vs height
else:
if width > height:
angle = -(90 + angle)
else:
angle = -angle
# negate the angle for deskewing
neg_angle = -angle
# draw mask as filled rotated rectangle on black background the size of the input
mask = np.zeros_like(clean)
cv2.drawContours(mask,[boxpts],0,255,-1)
# apply mask to cleaned image
blob_img = cv2.bitwise_and(clean, mask)
# Get rotation matrix
#center = (width // 2, height // 2)
M = cv2.getRotationMatrix2D(center, neg_angle, scale=1.0)
#print('m: ',M)
# deskew (unrotate) the rotated rectangle
deskewed = cv2.warpAffine(blob_img, M, (ww, hh), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
# threshold it again
deskewed = cv2.threshold(deskewed, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# get bounding box of contour of deskewed rectangle
cntrs = cv2.findContours(deskewed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
cntr = cntrs[0]
x,y,w,h = cv2.boundingRect(cntr)
# crop to white region
crop = deskewed[y:y+h, x:x+w]
# alternately crop the input
#crop = image[y:y+h, x:x+w]
# save deskewed image
cv2.imwrite("bw2_deskewed_{0}.png".format(i),crop)
print("")
i = i + 1
# save contour and rot rect images
cv2.imwrite("bw2_contours.png",contour_img)
cv2.imwrite("bw2_rotrects.png",rotrect_img)
# display result, though it won't show transparency
cv2.imshow("thresh", thresh)
cv2.imshow("clean", clean)
cv2.imshow("contours", contour_img)
cv2.imshow("rectangles", rotrect_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Contour image:
Rotated rectangles images:
First 3 unrotated images:
Affine warp rotation angles:
13.916877746582031
-42.87890625
18.8118896484375
-44.333797454833984
-38.65980911254883
-37.25965881347656
8.806793212890625
14.931419372558594
-37.405357360839844
-34.99202346801758
35.537681579589844
-35.350345611572266
-42.3245735168457
50.12316131591797
-42.969085693359375
52.750038146972656
45.0
your code is correct for detecting those patches, only a minor mistake is here
if cv2.contourArea(c) < 3000:
continue
reduce 3000 to 100 or below values, because your are giving a condition as contours below 3000 to be neglect

Is there any way to extract distorted rectangle/square from the image?

This is the image and I want to fill the edges of this rectangle or square so that I could crop it using contours. What I have done so far is that i used canny edge detector to find edges and then using bitwise_or I get this rectangle filled a little but not completely. What to do to fill this rectangle or is there any way to directly crop this?
image = cv2.imread('C:/Users/hp/Desktop/segmentation/test3.jpeg')
img3 = img2 = image.copy()
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
img3 = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
lower = np.array([155,25,0])
upper = np.array([179,255,255])
mask = cv2.inRange(image, lower, upper)
edges = cv2.Canny(mask, 1, 255, apertureSize=7)
result = cv2.bitwise_or(edges, mask)
Here is one way to extract the bounds of the rectangle white pixels in Python/OpenCV.
Read the input
Convert to gray
Threshold
Do Canny edge detection
Get Hough line segments and draw as white on black background
Get the bounds of the white pixels
Crop the input to the bounds
Input:
import cv2
import numpy as np
# load image as grayscale
img = cv2.imread('rect_lines.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
# apply canny edge detection
edges = cv2.Canny(thresh, 100, 200)
# get hough line segments
threshold = 100
minLineLength = 50
maxLineGap = 20
lines = cv2.HoughLinesP(thresh, 1, np.pi/360, threshold, minLineLength, maxLineGap)
# draw lines
linear = np.zeros_like(thresh)
for [line] in lines:
#print(line)
x1 = line[0]
y1 = line[1]
x2 = line[2]
y2 = line[3]
cv2.line(linear, (x1,y1), (x2,y2), (255), 1)
# get bounds of white pixels
white = np.where(linear==255)
xmin, ymin, xmax, ymax = np.min(white[1]), np.min(white[0]), np.max(white[1]), np.max(white[0])
#print(xmin,xmax,ymin,ymax)
# draw bounding box on input
bounds = img.copy()
cv2.rectangle(bounds, (xmin,ymin), (xmax,ymax), (0,0,255))
# crop the image at the bounds
crop = img[ymin:ymax, xmin:xmax]
# save resulting masked image
cv2.imwrite('rect_lines_edges.png', edges)
cv2.imwrite('rect_lines_hough.png', linear)
cv2.imwrite('rect_lines_bounds.png', bounds)
cv2.imwrite('rect_lines_crop.png', crop)
# display result, though it won't show transparency
cv2.imshow("thresh", thresh)
cv2.imshow("edges", edges)
cv2.imshow("lines", linear)
cv2.imshow("bounds", bounds)
cv2.imshow("crop", crop)
cv2.waitKey(0)
cv2.destroyAllWindows()
Canny edges:
Hough line segments:
Bounding box on input:
Cropped image:

Extract marked areas in image by same sized surrounding rectangles

I want to extract one or more areas of an image which are marked on the image with an arbitrary shape (the shape edges have always the same color - red). The marked areas all have about the same size (not excactly) and I would like to have the cut out rectangles to have the same result size.
Can you give me a hint how to do this? I guess one could use opencv for this task in python, but I'm not familiar with it, so thanks for your help!
edit: added an example image, the goal would be to extract the red areas by rectangles of the same size.
Here is one way to do that in Python/OpenCV.
Read the gray input with red shapes drawn
Threshold on the red color of the shapes
Apply morphology close to ensure the shapes are continous outlines with no gaps
Get the external contours and their bounding boxes
Compute the centers of each bounding box and save in list and also the maximum width and height of all the bounding boxes.
For each center and the maximum width and height, crop the input image and save
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('monet_shapes.png')
# threshold on red regions
lowerBound = np.array([0, 0, 150]);
upperBound = np.array([100, 100, 255]);
thresh = cv2.inRange(img, lowerBound, upperBound);
# apply morphology to ensure regions are continuous outlines and no gaps
kernel = np.ones((9,9), np.uint8)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# get external contours
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
wmax = 0
hmax = 0
# get bounding boxes and max width and max height from all boxes and centers
centers = []
for cntr in contours:
# get bounding boxes
x,y,w,h = cv2.boundingRect(cntr)
cx = x + w // 2
cy = y + h // 2
cent = [cx,cy]
centers.append(cent)
if w > wmax:
wmax = w
if h > hmax:
hmax = h
print(wmax,hmax)
# show thresh and result
cv2.imshow("thresh", thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save threshold
cv2.imwrite("monet_shapes_thresh.png",thresh)
# crop bounding boxes of size maxw, maxh about centers and save
i = 1
for cent in centers:
cx = cent[0]
cy = cent[1]
box = img[cy-hmax//2:cy+hmax//2, cx-wmax//2:cx+wmax//2]
cv2.imwrite("blackbox_result_{0}.png".format(i),box)
i = i + 1
Threshold image:
Resulting 5 cropped regions:

How to use OpenCV to crop an image based on a certain criteria?

I would like to crop the images like the one below using python's OpenCV library. The area of interest is inside the squiggly lines on the top and bottom, and the lines on the side. The problem is that every image is slightly different. This means that I need some automated way of cropping for the area of interest. I guess the top and the sides would be easy since you could just crop it by 10 pixels or so. But how can I crop out the bottom half of the image where the line is not straight? I have included this example image. The image that follows highlights in pink the area of the image that I am interested in keeping.
Here is one way using Python/OpenCV.
Read input
Get center point (assume it is inside the desired region)
Convert image to grayscale
Floodfill the gray image and set background to black
Get the largest contour and its bounding box
Draw the largest contour as filled on black background as mask
Apply the mask to the input image
Crop the masked input image
Input:
import cv2
import numpy as np
# load image and get dimensions
img = cv2.imread("odd_region.png")
hh, ww, cc = img.shape
# compute center of image (as integer)
wc = ww//2
hc = hh//2
# create grayscale copy of input as basis of mask
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# create zeros mask 2 pixels larger in each dimension
zeros = np.zeros([hh + 2, ww + 2], np.uint8)
# do floodfill at center of image as seed point
ffimg = cv2.floodFill(gray, zeros, (wc,hc), (255), (0), (0), flags=8)[1]
# set rest of ffimg to black
ffimg[ffimg!=255] = 0
# get contours, find largest and its bounding box
contours = cv2.findContours(ffimg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
area_thresh = 0
for cntr in contours:
area = cv2.contourArea(cntr)
if area > area_thresh:
area = area_thresh
outer_contour = cntr
x,y,w,h = cv2.boundingRect(outer_contour)
# draw the filled contour on a black image
mask = np.full([hh,ww,cc], (0,0,0), np.uint8)
cv2.drawContours(mask,[outer_contour],0,(255,255,255),thickness=cv2.FILLED)
# mask the input
masked_img = img.copy()
masked_img[mask == 0] = 0
#masked_img[mask != 0] = img[mask != 0]
# crop the bounding box region of the masked img
result = masked_img[y:y+h, x:x+w]
# draw the contour outline on a copy of result
result_outline = result.copy()
cv2.drawContours(result_outline,[outer_contour],0,(0,0,255),thickness=1,offset=(-x,-y))
# display it
cv2.imshow("img", img)
cv2.imshow("ffimg", ffimg)
cv2.imshow("mask", mask)
cv2.imshow("masked_img", masked_img)
cv2.imshow("result", result)
cv2.imshow("result_outline", result_outline)
cv2.waitKey(0)
cv2.destroyAllWindows()
# write result to disk
cv2.imwrite("odd_region_cropped.png", result)
cv2.imwrite("odd_region_cropped_outline.png", result_outline)
Result:
Result With Contour Drawn:

Categories

Resources