OpenCV copy irregular contour region to another image after contour rotation - python

I am working with an image with distorted/rotated texts. I need to rotate these text blobs back to the horizontal level (0 degrees) before I can run OCR on them. I managed to fix the rotation issue but now I need to find a way to copy the contents of the original contour to the rotated matrix.
Here are a few things I've done to extract and fix the rotation issue:
Find contour
Heavy dilation and remove non-text lines
Find the contour angle and do angle correction in the polar space.
I have tried using affine transformation to rotate the rectangle text blobs but it ended up cropping out some of the texts because some of the text blobs are irregular. Result here
Blue dots in the contours are centroids, the numbers are contour angles. How can I copy the content of unrotated contour, rotate them and copy to a new image?
Code
def getContourCenter(contour):
M = cv2.moments(contour)
if M["m00"] != 0:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
else:
return 0, 0
return int(cx), int(cy)
def rotateContour(contour, center: tuple, angle: float):
def cart2pol(x, y):
theta = np.arctan2(y, x)
rho = np.hypot(x, y)
return theta, rho
def pol2cart(theta, rho):
x = rho * np.cos(theta)
y = rho * np.sin(theta)
return x, y
# Translating the contour by subtracting the center with all the points
norm = contour - [center[0], center[1]]
# Convert the points to polar co-ordinates, add the rotation, and convert it back to Cartesian co-ordinates.
coordinates = norm[:, 0, :]
xs, ys = coordinates[:, 0], coordinates[:, 1]
thetas, rhos = cart2pol(xs, ys)
thetas = np.rad2deg(thetas)
thetas = (thetas + angle) % 360
thetas = np.deg2rad(thetas)
# Convert the new polar coordinates to cartesian co-ordinates
xs, ys = pol2cart(thetas, rhos)
norm[:, 0, 0] = xs
norm[:, 0, 1] = ys
rotated = norm + [center[0], center[1]]
rotated = rotated.astype(np.int32)
return rotated
def straightenText(image, vis):
# create a new mat
mask = 0*np.ones([image.shape[0], image.shape[1], 3], dtype=np.uint8)
# invert pixel index arrangement and dilate aggressively
dilate = cv2.dilate(~image, ImageUtils.box(33, 1))
# find contours
_, contours, hierarchy = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for contour in contours:
[x, y, w, h] = cv2.boundingRect(contour)
if w > h:
# find contour angle and centers
(x, y), (w, h), angle = cv2.minAreaRect(contour)
cx, cy = getContourCenter(contour)
# fix angle returned
if w < h:
angle = 90 + angle
# fix contour angle
rotatedContour = rotateContour(contour, (cx, cy), 0-angle)
cv2.drawContours(vis, contour, -1, (0, 255, 0), 2)
cv2.drawContours(mask, rotatedContour, -1, (255, 0, 0), 2)
cv2.circle(vis, (cx, cy), 2, (0, 0, 255), 2, 8) # centroid
cv2.putText(vis, str(round(angle, 2)), (cx, cy), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 2)

Here is one way, which is the simplest way I can think to do it in Python/OpenCV, though may not be optimal in speed.
Create a white empty image for the desired output. (So we have black text on white background in case you need to do OCR)
Get the rotated bounding rectangle of your contour in your input.
Get the normal bounding rectangle of your contour in the output.
Get the 4 bounding box corners for each.
Compute an affine transform matrix between the two sets of 4 corner
points.
Warp the (whole) input image to the same size (non-optimal).
Use the output bounding box dimensions and upper left corner with
numpy slicing to transfer the region in the warped image to the same
region in the white output image.
Repeat for each text contour using the resulting image in place of
the original white image as the new destintination image.
So here is a simulation to show you how.
Source Text Image:
Source Text Image with Red Rotated Rectangle:
Desired Bounding Rectangle in White Destination Image:
Text Transferred To White Image into Desired Rectangle Region:
Code:
import cv2
import numpy as np
# Read source text image.
src = cv2.imread('text_on_white.png')
hs, ws, cs = src.shape
# Read same text image with red rotated bounding box drawn.
src2 = cv2.imread('text2_on_white.png')
# Read white image showing desired output bounding box.
src2 = cv2.imread('text2_on_white.png')
# create white destination image
dst = np.full((hs,ws,cs), (255,255,255), dtype=np.uint8)
# define coordinates of bounding box in src
src_pts = np.float32([[51,123], [298,102], [300,135], [54,157]])
# size and placement of text in dst is (i.e. bounding box):
xd = 50
yd = 200
wd = 249
hd = 123
dst_pts = np.float32([[50,200], [298,200], [298,234], [50,234]])
# get rigid affine transform (no skew)
# use estimateRigidTransform rather than getAffineTransform so can use all 4 points
matrix = cv2.estimateRigidTransform(src_pts, dst_pts, 0)
# warp the source image
src_warped = cv2.warpAffine(src, matrix, (ws,hs), cv2.INTER_AREA, borderValue=(255,255,255))
# do numpy slicing on warped source and place in white destination
dst[yd:yd+hd, xd:xd+wd] = src_warped[yd:yd+hd, xd:xd+wd]
# show results
cv2.imshow('SRC', src)
cv2.imshow('SRC2', src2)
cv2.imshow('SRC_WARPED', src_warped)
cv2.imshow('DST', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save results
cv2.imwrite('text_on_white_transferred.png', dst)

To extract ONLY the content of a single contour, and not its larger bounding box, you can create a mask by drawing a filled contour and then applying that to the original image. In your case you would need something like this:
# prepare the target image
resX,resY = image.shape[1],image.shape[0]
target = np.zeros((resY, resX , 3), dtype=np.uint8)
target.fill(255) # make it entirely white
# find the contours
allContours,hierarchy = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# then perform rotation, etc, per contour
for contour in allContours:
# create empty mask
mask = np.zeros((resY, resX , 1), dtype=np.uint8)
# draw the contour filled into the mask
cv2.drawContours(mask, [contour], -1, (255), thickness=cv2.FILLED)
# copy the relevant part into a new image
# (you might want to use bounding box here for more efficiency)
single = cv2.bitwise_and(image, image, mask=mask)
# then apply your rotation operations both on the mask and the result
single = doContourSpecificOperation(single)
mask = doContourSpecificOperation(mask)
# then, put the result into your target image (which was originally white)
target = cv2.bitwise_and(target, single, mask=mask)

Related

How to detect circle defects?

Is there any way to tell if a circle has such defects? Roundness does not work. Or is there a way to eliminate them?
perimeter = cv2.arcLength(cnts[0],True)
area = cv2.contourArea(cnts[0])
roundness = 4*pi*area/(perimeter*perimeter)
print("Roundness:", roundness)
The "roundness" measure is sensitive to a precise estimate of the perimeter. What cv2.arcLength() does is add the lengths of each of the polygon edges, which severely overestimates the length of outlines. I think this is the main reason that this measure hasn't worked for you. With a better perimeter estimator you would get useful results.
An alternative measure that might be more useful is "circularity", defined as the coefficient of variation of the radius. In short, you compute the distance of each polygon vertex (i.e. outline point) to the centroid, then determine the coefficient of variation of these distances (== std / mean).
I wrote a quick Python script to compute this starting from an OpenCV contour:
import cv2
import numpy as np
# read in OP's example image, making sure we ignore the red arrow
img = cv2.imread('jGssp.png')[:, :, 1]
_, img = cv2.threshold(img, 127, 255, 0)
# get the contour of the shape
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contour = contours[0][:, 0, :]
# add the first point as the last, to close it
contour = np.concatenate((contour, contour[0, None, :]))
# compute centroid
def cross_product(v1, v2):
"""2D cross product."""
return v1[0] * v2[1] - v1[1] * v2[0]
sum = 0.0
xsum = 0.0
ysum = 0.0
for ii in range(1, contour.shape[0]):
v = cross_product(contour[ii - 1, :], contour[ii, :])
sum += v
xsum += (contour[ii - 1, 0] + contour[ii, 0]) * v
ysum += (contour[ii - 1, 1] + contour[ii, 1]) * v
centroid = np.array([ xsum, ysum ]) / (3 * sum)
# Compute coefficient of variation of distances to centroid (==circularity)
d = np.sqrt(np.sum((contour - centroid) ** 2, axis=1))
circularity = np.std(d) / np.mean(d)
This make me think of a similar problem that I had. You could compute the signature of the shape. The signature can be defined as, for each pixel of the border of the shape, the distance between this pixel and the center of the shape.
For a perfect circle, the distance from the border to the center should be constant (in an ideal continuous world). When defects are visible on the edge of the circle (either dents or excesses), the ideal constant line changes to a wiggly curve, with huge variation when on the defects.
It's fairly easy to detect those variation with FFT for example, which allows to quantify the defect significance.
You can expand this solution to any given shape. If your ideal shape is a square, you can compute the signature, which will give you some kind of sinusoidal curve. Defects will appear in a same way on the curve, and would be detectable with the same logic as with a circle.
I can't give you an code example, as the project was for a company project, but the idea is still here.
Here is one way to do that in Python/OpenCV.
Read the input
Threshold on white (to remove the red arrow)
Apply Hough Circle
Draw the circle on the thresholded image for comparison
Draw a white filled circle on black background from the circle parameters.
Get the difference between the thresholded image and the drawn circle image
Apply morphology open to remove the ring from the irregular boundary of the original circle
Count the number of white pixels in the previous image as the amount off defect
Input:
import cv2
import numpy as np
# Read image
img = cv2.imread('circle_defect.png')
hh, ww = img.shape[:2]
# threshold on white to remove red arrow
lower = (255,255,255)
upper = (255,255,255)
thresh = cv2.inRange(img, lower, upper)
# get Hough circles
min_dist = int(ww/5)
circles = cv2.HoughCircles(thresh, cv2.HOUGH_GRADIENT, 1, minDist=min_dist, param1=150, param2=15, minRadius=0, maxRadius=0)
print(circles)
# draw circles on input thresh (without red arrow)
circle_img = thresh.copy()
circle_img = cv2.merge([circle_img,circle_img,circle_img])
for circle in circles[0]:
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
(x,y,r) = circle
x = int(x)
y = int(y)
r = int(r)
cv2.circle(circle_img, (x, y), r, (0, 0, 255), 1)
# draw filled circle on black background
circle_filled = np.zeros_like(thresh)
cv2.circle(circle_filled, (x,y), r, 255, -1)
# get difference between the thresh image and the circle_filled image
diff = cv2.absdiff(thresh, circle_filled)
# apply morphology to remove ring
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
result = cv2.morphologyEx(diff, cv2.MORPH_OPEN, kernel)
# count non-zero pixels
defect_count = np.count_nonzero(result)
print("defect count:", defect_count)
# save results
cv2.imwrite('circle_defect_thresh.jpg', thresh)
cv2.imwrite('circle_defect_circle.jpg', circle_img)
cv2.imwrite('circle_defect_circle_diff.jpg', diff)
cv2.imwrite('circle_defect_detected.png', result)
# show images
cv2.imshow('thresh', thresh)
cv2.imshow('circle_filled', circle_filled)
cv2.imshow('diff', diff)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Input without Red Arrow:
Red Circle Drawn on Input:
Circle from HoughCircle:
Difference:
Difference Cleaned Up:
Textual Result:
defect count: 500

OpenCV: using Canny and Shi-Tomasi to detect round corners of a playing card

I want to do some planar rectification, to convert from left to right:
I have the code to do the correction, but I need the 4 corner coords.
I'm using the following code to find them:
import cv2
image = cv2.imread('input.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
canny = cv2.Canny(gray, 120, 255, 1)
corners = cv2.goodFeaturesToTrack(canny,4,0.5,50)
for corner in corners:
x,y = corner.ravel()
cv2.circle(image,(x,y),5,(36,255,12),-1)
cv2.imshow("result", image)
cv2.waitKey()
It reads the image, and transforms it to grayscale + canny
But the resultant corners (found by cv2.goodFeaturesToTrack) aren't the desired ones:
I need the external corners of the card, any clue to achieve it?
Thanks
This is the input.png:
Update: Added four point perspective transform.
I have skipped perspective transform as the question is about finding right corners.
You can skip the loop by getting contour with maximum area then processing it. Some blurring may help it further. Press Esc button to get next image output.
Another useful method, how to find corners points of a shape in an image in opencv?
Ouput Images
Code
"""
Task: Detect card corners and fix perspective
"""
import cv2
import numpy as np
img = cv2.imread('resources/KSuVq.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,0)
cv2.imshow('Thresholded original',thresh)
cv2.waitKey(0)
## Get contours
contours,h = cv2.findContours(thresh,cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
## only draw contour that have big areas
imx = img.shape[0]
imy = img.shape[1]
lp_area = (imx * imy) / 10
#################################################################
# Four point perspective transform
# https://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
#################################################################
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
#################################################################
## Get only rectangles given exceeding area
for cnt in contours:
approx = cv2.approxPolyDP(cnt,0.01 * cv2.arcLength(cnt, True), True)
## calculate number of vertices
#print(len(approx))
if len(approx) == 4 and cv2.contourArea(cnt) > lp_area:
print("rectangle")
tmp_img = img.copy()
cv2.drawContours(tmp_img, [cnt], 0, (0, 255, 255), 6)
cv2.imshow('Contour Borders', tmp_img)
cv2.waitKey(0)
tmp_img = img.copy()
cv2.drawContours(tmp_img, [cnt], 0, (255, 0, 255), -1)
cv2.imshow('Contour Filled', tmp_img)
cv2.waitKey(0)
# Make a hull arround the contour and draw it on the original image
tmp_img = img.copy()
mask = np.zeros((img.shape[:2]), np.uint8)
hull = cv2.convexHull(cnt)
cv2.drawContours(mask, [hull], 0, (255, 255, 255), -1)
cv2.imshow('Convex Hull Mask', mask)
cv2.waitKey(0)
# Draw minimum area rectangle
tmp_img = img.copy()
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(tmp_img, [box], 0, (0, 0, 255), 2)
cv2.imshow('Minimum Area Rectangle', tmp_img)
cv2.waitKey(0)
# Draw bounding rectangle
tmp_img = img.copy()
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(tmp_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('Bounding Rectangle', tmp_img)
cv2.waitKey(0)
# Bounding Rectangle and Minimum Area Rectangle
tmp_img = img.copy()
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(tmp_img, [box], 0, (0, 0, 255), 2)
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(tmp_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('Bounding Rectangle', tmp_img)
cv2.waitKey(0)
# determine the most extreme points along the contour
# https://www.pyimagesearch.com/2016/04/11/finding-extreme-points-in-contours-with-opencv/
tmp_img = img.copy()
extLeft = tuple(cnt[cnt[:, :, 0].argmin()][0])
extRight = tuple(cnt[cnt[:, :, 0].argmax()][0])
extTop = tuple(cnt[cnt[:, :, 1].argmin()][0])
extBot = tuple(cnt[cnt[:, :, 1].argmax()][0])
cv2.drawContours(tmp_img, [cnt], -1, (0, 255, 255), 2)
cv2.circle(tmp_img, extLeft, 8, (0, 0, 255), -1)
cv2.circle(tmp_img, extRight, 8, (0, 255, 0), -1)
cv2.circle(tmp_img, extTop, 8, (255, 0, 0), -1)
cv2.circle(tmp_img, extBot, 8, (255, 255, 0), -1)
print("Corner Points: ", extLeft, extRight, extTop, extBot)
cv2.imshow('img contour drawn', tmp_img)
cv2.waitKey(0)
#cv2.destroyAllWindows()
## Perspective Transform
tmp_img = img.copy()
pts = np.array([extLeft, extRight, extTop, extBot])
warped = four_point_transform(tmp_img, pts)
cv2.imshow("Warped", warped)
cv2.waitKey(0)
cv2.destroyAllWindows()
References
https://docs.opencv.org/4.5.0/dd/d49/tutorial_py_contour_features.html
https://www.pyimagesearch.com/2016/04/11/finding-extreme-points-in-contours-with-opencv/
https://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
Canny is a tool for edge detection, and if correctly tuned it does what it says on the tin.
Once you get the edges, you must define what a corner is. For instance, is it a sharp turn in a edge?
You'd like to use the function cv2.goodFeaturesToTrack, which is supposed to be a corner detection tool, but once again, what is a corner? It uses the Shi-Tomasi algorithm to find the N "best" corners in an image, which is just a threshold, and some minimum distance between points.
In the end, it is guaranteed to almost never bear the four corners you want. You should try these alternatives, and stick with the best option:
try to get more corners and geometrically determine the four "outmost" ones.
combine your method with some other transformation, or object-matching. For instance, if you are looking for a rectangular-ish image, try to match it against a template, compute the transform matrix and resolve edges after transformation.
use a different edge detection method, or a combination of methods.
Note that a card doesn't have sharp corners like a piece of paper, so you'll end up cropping the card or skewing it if using any "corner" on the rounded edges, or trying to locate an edge outside the actual "white" of the card, to avoid the skew (try to inscribe the card into a sharp-edge rectangle) - note that Canny is not effective in this case.
Here is one way to find the corners in Python OpenCV. I note this is more complicates since the green dots on the input complicate the issue and they likely would not be in the input image. One could simply threshold on the green dots using cv2.inRange() to find the green dots. But I will assume this is not really what you want.
- Read the input
- Convert to gray
- Threshold
- Get the largest contour and draw it on the input
- Reduce the number of vertices in the contour as a polygon and draw the polygon on the input.
- The polygon has 5 vertices and two are virtually the same. Normally, one would get 4 verices if the green dots were not there. So draw a white filled polygon on a black background.
- Get the corners from the white polygon on black background and draw on these vertices
- Save the results
Input:
import cv2
import numpy as np
import time
# load image
img = cv2.imread("hello.png")
# convert to gray
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)[1]
# get the largest contour
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
big_contour = max(contours, key=cv2.contourArea)
peri = cv2.arcLength(big_contour, True)
# draw contour on input in red
result = img.copy()
result2 = np.zeros_like(img)
cv2.drawContours(result, [big_contour], 0, (0,0,255), 1)
cv2.drawContours(result2, [big_contour], 0, (0,0,255), 1)
# reduce to fewer vertices on polygon
poly = cv2.approxPolyDP(big_contour, 0.1 * peri, False)
# draw polygon on input in green
cv2.polylines(result, [poly], False, (0,255,0), 1)
cv2.polylines(result2, [poly], False, (0,255,0), 1)
# list polygon points
print("Polygon Points:")
for p in poly:
px = p[0][0]
py = p[0][1]
print(px,py)
print('')
# draw white filled polygon on black background
result3 = np.zeros_like(thresh)
cv2.fillPoly(result3,[poly],255)
# get corners
corners = cv2.goodFeaturesToTrack(result3,4,0.01,50,useHarrisDetector=True,k=0.04)
# print corner coords and draw circles
result3 = cv2.merge([result3,result3,result3])
print("Corners:")
for c in corners:
x,y = c.ravel()
print(int(x), int(y))
cv2.circle(result3,(x,y),3,(0,0,255),-1)
# save result
cv2.imwrite("hello_contours.png", result)
cv2.imwrite("hello_polygon.png", result2)
cv2.imwrite("hello_corners.png", result3)
# display it
cv2.imshow("thresh", thresh)
cv2.imshow("result", result)
cv2.imshow("result2", result2)
cv2.imshow("result3", result3)
cv2.waitKey(0)
Contours and Polygon on input image:
Contours and Polygon on black background:
Polygon Vertices:
227 69
41 149
114 284
307 167
228 70
Note the first and last vertices are within one pixel of each other
Corners on white polygon on black background:
Corner Vertices:
306 167
42 149
114 283
227 69

Finding length of each line segment passing through centroid , and how to constrict the line till outer contour

Input Image
Processed Image
import numpy as np
import cv2
img = cv2.imread('Image(i).png', 0)
ret, img =cv2.threshold(img, 128, 255, cv2.THRESH_BINARY)
img_bw = img<=120
img_bw =img_bw.astype('uint8')
#Fit the ellipses
contours0, hierarchy = cv2.findContours( img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
outer_ellipse = [cv2.approxPolyDP(contours0[0], 0.1, True)]
inner_ellipse = [cv2.approxPolyDP(contours0[0], 0.1, True)]
ref = np.zeros_like(img_bw)
out=img.copy()
h, w = img.shape[:2]
vis = np.zeros((h, w, 3), np.uint8)
cv2.drawContours( vis, outer_ellipse, -1, (255,0,0), 1)
cv2.drawContours( vis, inner_ellipse, -1, (0,0,255), 1)
##Extract contour of ellipses
cnt_outer = np.vstack(outer_ellipse).squeeze()
cnt_inner = np.vstack(inner_ellipse).squeeze()
#Determine centroid
M = cv2.moments(cnt_inner)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
print (cx, cy)
#Draw full segment lines
#cv2.line(vis,(cx,0),(cx,w),(150,0,0),1)
width = img.shape[1]
height = img.shape[0]
N = 20
for i in range(N):
tmp = np.zeros_like(img_bw)
theta = i*(360/N)
theta *= np.pi/180.0
cv2.line(tmp, (cx, cy),
(int(cx-np.cos(theta)*w),
int(cy+np.sin(theta)*h)), (150,0,0), 1)
(row,col) = np.nonzero(np.logical_and(tmp, ref))
#cv2.line(out, (cx, cy), (col,row),(255,0,0), 1)
# Show the image
cv2.imshow('Output', out)
cv2.waitKey(0)
cv2.destroyAllWindows()
As seen in processed image the lines passing through centroid are not constricted till outer contour and are passing trough it.
I want the lines to be stopped at the outer contour so as that I can measure distance from centroid to the outer contour.
First image is the input image and second image is of line segments passing through centroid.
Here's a possible approach:
draw your outer contour filled with black on a white background
You now have a black ellipse. Then, without actually drawing anything:
use skimage.draw.line to get the list of points along all your radii
use Numpy argmax() to get first white pixel along radii
Here is the code:
#!/usr/bin/env python3
import cv2
import math
from skimage.draw import line
import numpy as np
# Load image as greyscale
img = cv2.imread('ellipses.png', cv2.IMREAD_GRAYSCALE)
_, img = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY)
h, w = img.shape
#Fit the ellipses
contours, hierarchy = cv2.findContours( img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
outer_ellipse = [cv2.approxPolyDP(contours[0], 0.1, True)]
# Draw outer contour filled with black on white background
vis = np.zeros_like(img) + 255
cnt = cv2.drawContours(vis, outer_ellipse, -1, 0, -1)
# Centroid by existing method
cx, cy = 365, 335
maxThickness = 0
# Take 10 points along top
for x in range(0,w,int(w/10)):
# ... and bottom
for y in 0, h-1:
# Get y and x of all pixels between centroid and top and bottom edge
yy, xx = line(cy, cx, 0, x)
firstWhiteIndex = np.argmax(vis[yy,xx])
fx, fy = xx[firstWhiteIndex], yy[firstWhiteIndex]
# Get length of this radial line
length = np.sqrt((cx-fx)**2 + (cy-fy)**2)
# Remember if longer than all others so far seen
if length > maxThickness:
maxThickness = length
fxMax, fyMax = fx, fy
# Take 10 points down left side
for y in range(0,h,int(h/10)):
# ... and right
for x in 0, w-1:
# Get y and x of all pixels between centroid and left and right edge
yy, xx = line(cy, cx, 0, x)
firstWhiteIndex = np.argmax(vis[yy,xx])
fx, fy = xx[firstWhiteIndex], yy[firstWhiteIndex]
# Get length of this radial line
length = np.sqrt((cx-fx)**2 + (cy-fy)**2)
# Remember if longer than all others so far seen
if length > maxThickness:
maxThickness = length
fxMax, fyMax = fx, fy
print(f'Max thickness: {maxThickness}')
# Draw thickest radius in mid-grey
cv2.line(img, (cx,cy), (fxMax, fyMax), 128, 5)
cv2.imwrite('result.png', img)
I have an approach that is not the best but this is what I can think of now.
While drawing lines in the above image, modify the code and do the following:
Before the for loop, draw a binary image of the same size containing only the outer contour circle. Save this image for later use.
Now in the for loop, draw each line in a separate binary blank image. Thus, now you will have two images, first the image having only the outer circle, and second image will only contain the line.
Now perform a bitwise_and operation on these 2 images.
Now you will get a white pixel only that is the point of intersection of the line and the outer circle.
Now find the coordinates of the white pixel in the image found and hence you will have the coordinate of point of intersection.
Obviously this is not the most efficient way but it is real time. Also, keep this in mind that the outer circle width should be atleast 2 in the image and the lines should be of width 1. You may get more than one point of intersection in some cases, take any 1 of them. The difference in them will be only of 1-2 pixels that can be neglected.

How to center the content/object of a binary image in python?

I have a code that computes the orientation of a figure. Based on this orientation the figure is then rotated until it is straightened out. This all works fine. What I am struggling with, is getting the center of the rotated figure to the center of the whole image. So the center point of the figure should match the center point of the whole image.
Input image:
code:
import cv2
import numpy as np
import matplotlib.pyplot as plt
path = "inputImage.png"
image=cv2.imread(path)
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
thresh=cv2.threshold(gray,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
contours,hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
cnt1 = contours[0]
cnt=cv2.convexHull(contours[0])
angle = cv2.minAreaRect(cnt)[-1]
print("Actual angle is:"+str(angle))
rect = cv2.minAreaRect(cnt)
p=np.array(rect[1])
if p[0] < p[1]:
print("Angle along the longer side:"+str(rect[-1] + 180))
act_angle=rect[-1]+180
else:
print("Angle along the longer side:"+str(rect[-1] + 90))
act_angle=rect[-1]+90
#act_angle gives the angle of the minAreaRect with the vertical
if act_angle < 90:
angle = (90 + angle)
print("angleless than -45")
# otherwise, just take the inverse of the angle to make
# it positive
else:
angle=act_angle-180
print("grter than 90")
# rotate the image to deskew it
(h, w) = image.shape[:2]
print(h,w)
center = (w // 2, h // 2)
print(center)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h),flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
plt.imshow(rotated)
cv2.imwrite("rotated.png", rotated)
With output:
As you can see the white figure is slightly placed to left, I want it to be perfectly centered.
Does anyone know how this can be done?
EDIT: I have tried #joe's suggestion and subtracted the centroid coordinates, from the center of the image by dividing the width and height of the picture by 2. From this I got an offset, this had to be added to the array that describes the image. But I don't know how I add the offset to the array. How would this work with the x and y coordinates?
The code:
img = cv2.imread("inputImage")
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray_image,127,255,0)
height, width = gray_image.shape
print(img.shape)
wi=(width/2)
he=(height/2)
print(wi,he)
M = cv2.moments(thresh)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
offsetX = (wi-cX)
offsetY = (he-cY)
print(offsetX,offsetY)
print(cX,cY)
Here is one way in Python/OpenCV.
Get the bounding box for the white region from the contours. Compute the offset for the recentered region. Use numpy slicing to copy that to the center of a black background the size of the input.
Input:
import cv2
import numpy as np
# read image as grayscale
img = cv2.imread('white_shape.png', cv2.COLOR_BGR2GRAY)
# get shape
hh, ww = img.shape
# get contours (presumably just one around the nonzero pixels)
contours = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
for cntr in contours:
x,y,w,h = cv2.boundingRect(cntr)
# recenter
startx = (ww - w)//2
starty = (hh - h)//2
result = np.zeros_like(img)
result[starty:starty+h,startx:startx+w] = img[y:y+h,x:x+w]
# view result
cv2.imshow("RESULT", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save reentered image
cv2.imwrite('white_shape_centered.png',result)
One approach is to obtain the bounding box coordinates of the binary object then crop the ROI using Numpy slicing. From here we calculate the new shifted coordinates then paste the ROI onto a new blank mask.
Code
import cv2
import numpy as np
# Load image as grayscale and obtain bounding box coordinates
image = cv2.imread('1.png', 0)
height, width = image.shape
x,y,w,h = cv2.boundingRect(image)
# Create new blank image and shift ROI to new coordinates
mask = np.zeros(image.shape, dtype=np.uint8)
ROI = image[y:y+h, x:x+w]
x = width//2 - ROI.shape[0]//2
y = height//2 - ROI.shape[1]//2
mask[y:y+h, x:x+w] = ROI
cv2.imshow('ROI', ROI)
cv2.imshow('mask', mask)
cv2.waitKey()
#NawinNarain, from this point onwards where you found out the relative shifts w.r.t. centroid of the image, it is very straightforward - You want to make an Affine matrix with this translations and apply cv2.warpAffine() to your image. That's -it.
T = np.float32([[1, 0, shift_x], [0, 1, shift_y]])
We then use warpAffine() to transform the image using the matrix, T
centered_image = cv2.warpAffine(image, T, (orig_width, orig_height))
This will transform your image so that the centroid is at the center. Hope this helps. The complete center image function will look like this:
def center_image(image):
height, width = image.shape
print(img.shape)
wi=(width/2)
he=(height/2)
print(wi,he)
ret,thresh = cv2.threshold(image,95,255,0)
M = cv2.moments(thresh)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
offsetX = (wi-cX)
offsetY = (he-cY)
T = np.float32([[1, 0, offsetX], [0, 1, offsetY]])
centered_image = cv2.warpAffine(image, T, (width, height))
return centered_image

How to find upper left most corner of my Contour Bounding Box in Python OpenCV

What I'm doing: I have a robotic arm and I want to find x,y coordinates for objects on a piece of paper.
I am able to find a contour of a sheet of paper and get its dimensions (h,w). I want the coordinates of my upper left corner so when I place objects onto my piece of paper I can get image coordinates relative to that point. From there I'll convert those pixel coordinates to cm and I'll be able to return x,y coordinates to my robotic arm.
Problem: I find the center of my contour and I thought the upper left corner would then be the...
center x coordinate - (width/2), center y coordinate - (height/2)
Picture of the contour box I'm getting.
*Picture of contour with my box that should be around the upperleft corner of my contour
However, I get a coordinate out of the bounds of my piece of paper. Is there an easier way to find my upper left coordinates?
code
class Boundary(object):
def __init__(self, image):
self.frame = image
self.DefineBounds()
def DefineBounds(self):
# convert the image to grayscale, blur it, and detect edges
# other options are four point detection, white color detection to search for the board?
gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 35, 125)
# find the contours in the edged image and keep the largest one;
# we'll assume that this is our piece of paper in the image
# (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
th, contours, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
c = max(contours, key=cv2.contourArea)
# compute the bounding box of the of the paper region and return it
cv2.drawContours(self.frame, c, -1, (0, 255, 0), 3)
cv2.imshow("B and W", edged)
cv2.imshow("capture", self.frame)
cv2.waitKey(0)
# minAreaRect returns (center (x,y), (width, height), angle of rotation )
# width = approx 338 (x-direction
# height = 288.6 (y-direction)
self.CenterBoundBox = cv2.minAreaRect(c)[0]
print("Center location of bounding box is {}".format(self.CenterBoundBox))
CxBBox = cv2.minAreaRect(c)[0][1]
CyBBox = cv2.minAreaRect(c)[0][0]
# prints picture resolution
self.OGImageHeight, self.OGImageWidth = self.frame.shape[:2]
#print("OG width {} and height {}".format(self.OGImageWidth, self.OGImageHeight))
print(cv2.minAreaRect(c))
BboxWidth = cv2.minAreaRect(c)[1][1]
BboxHeight = cv2.minAreaRect(c)[1][0]
self.Px2CmWidth = BboxWidth / 21.5 # 1cm = x many pixels
self.Px2CmHeight = BboxHeight / 18 # 1cm = x many pixels
print("Bbox diemensions {} x {}".format(BboxHeight, BboxWidth))
print("Conversion values Px2Cm width {}, Px2Cm height {}".format(self.Px2CmWidth, self.Px2CmHeight))
self.TopLeftCoords = (abs(CxBBox - BboxWidth/2), abs(CyBBox - BboxHeight/2))
x = int(round(self.TopLeftCoords[0]))
y = int(round(self.TopLeftCoords[1]))
print("X AND Y COORDINATES")
print(x)
print(y)
cv2.rectangle(self.frame, (x, y), (x+10, y+10), (0, 255, 0), 3)
print(self.TopLeftCoords)
cv2.imshow("BOX",self.frame)
cv2.waitKey(0)
Finds a rotated rectangle of the minimum area enclosing the input 2D point set.
From: OpenCV docs
So the reason for your problem is obvious, your countour has a slight slant, so the minimum rectangle which encloses the whole contour will be out of bounds on the lower side.
Since
contours
just holds a vector of points (talking about the C++ interface here) it should be easy to find the upper left corner by searching for the point with lowest x and highest y value in the largest contour.

Categories

Resources