Angle of a line using OpenCV - python

I have these two Images
Vertical white line on a black background
Horizontal white line on a black background
I used the following code to get the angle of the line
import numpy as np
import cv2
x = cv2.imread('ver.png')
cv_image = cv2.cvtColor(x, cv2.COLOR_RGB2GRAY)
ret, thresh = cv2.threshold(cv_image,70,255,cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
recta = cv2.minAreaRect(contours[0])
center_x, center_y, angle = recta
print (angle)
but the printed angle value is the same for both images which is -90
it is mentioned in the documentation that the cv2.minAreaRect() returns as following:
( top-left corner(x,y), (width, height), angle of rotation )
but for me, it only returns (center-x, center-y, angle)
btw: I wanna write a code for a line follower drone so that I need to know the angle of the detected line so I adjust my drone according to it

minAreaRect prints -90 for both because it defines the rectangle differently for those lines (you can swap the width and height and end up with the same rectangle). If you need something that can distinguish between them then you can take the rectangle corners and find the longer side. You can use that line to calculate an angle.
The following code will distinguish between the two (0 degrees for horizontal, -89.999999 degrees for vertical). It should be bound between [-90, 90] degrees (relative to the bottom of the screen).
import numpy as np
import cv2
import math
# 2d distance
def dist2D(one, two):
dx = one[0] - two[0];
dy = one[1] - two[1];
return math.sqrt(dx*dx + dy*dy);
# angle between three points (the last point is the middle)
def angle3P(p1, p2, p3):
# get distances
a = dist2D(p3, p1);
b = dist2D(p3, p2);
c = dist2D(p1, p2);
# calculate angle // assume a and b are nonzero
# (law of cosines)
numer = c**2 - a**2 - b**2;
denom = -2 * a * b;
if denom == 0:
denom = 0.000001;
rads = math.acos(numer / denom);
degs = math.degrees(rads);
# check if past 90 degrees
return degs;
# get the rotated box
x = cv2.imread('horizontal.png')
cv_image = cv2.cvtColor(x, cv2.COLOR_RGB2GRAY)
ret, thresh = cv2.threshold(cv_image,70,255,cv2.THRESH_BINARY)
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
recta = cv2.minAreaRect(contours[0])
center_x, center_y, angle = recta
# get the corners
box = cv2.boxPoints(recta)
box = np.int0(box)
# choose the first point
root = box[0];
# find the longer side
end = None;
one = box[-1];
two = box[1];
if dist2D(one, root) > dist2D(two, root):
end = one;
else:
end = two;
# take the left-most point
left_point = None;
right_point = None;
if end[0] < root[0]:
left_point = end;
right_point = root;
else:
left_point = root;
right_point = end;
# calculate the angle [-90, 90]
offshoot = [left_point[0] + 100, left_point[1]];
angle = angle3P(right_point, offshoot, left_point);
if left_point[1] > right_point[0]:
angle = -angle;
print(angle);
Edit:
Woops, I got my orientation mixed up. I edited the code, now it should be from [-90, 90] degrees.

Related

How to rotate an image to get not-null pixels?

In the image I linked below, I need to get all the yellow/green pixels in this rotated rectangle and get rid of the blue background, so that the rectangle's axis are aligned with the x and y axis.
I'm using numpy but don't have a clue what I should do.
I uploaded the array in this drive in case anyone would like to work with the actual array
Thanks for the help in advance.
I used the same image as user2640045, but different approach.
import numpy as np
import cv2
# load and convert image to grayscale
img = cv2.imread('image.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# binarize image
threshold, binarized_img = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# find the largest contour
contours, hierarchy = cv2.findContours(binarized_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
c = max(contours, key = cv2.contourArea)
# get size of the rotated rectangle
center, size, angle = cv2.minAreaRect(c)
# get size of the image
h, w, *_ = img.shape
# create a rotation matrix and rotate the image
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated_img = cv2.warpAffine(img, M, (w, h))
# crop the image
pad_x = int((w - size[0]) / 2)
pad_y = int((h - size[1]) / 2)
cropped_img = rotated_img[pad_y : pad_y + int(size[1]), pad_x : pad_x + int(size[0]), :]
Result:
I realize there is a allow_pickle=False option in numpys load method but I didn't feel comfortable with unpickling/using data from the internet so I used the small image. After removing the coordinate system and stuff I had
I define two helper methods. One to later rotate the image taken from an other stack overflow thread. See link below. And one to get a mask being one at a specified color and zero otherwise.
import numpy as np
import matplotlib.pyplot as plt
import sympy
import cv2
import functools
color = arr[150,50]
def similar_to_boundary_color(arr, color=tuple(color)):
mask = functools.reduce(np.logical_and, [np.isclose(arr[:,:,i], color[i]) for i in range(4)])
return mask
#https://stackoverflow.com/a/9042907/2640045
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
Next I calculate the angle to rotate about. I do that by finding the lowest pixel at width 50 and 300. I picked those since they are far enough from the boundary to not be effected by missing corners etc..
i,j = np.where(~similar_to_boundary_color(arr))
slope = (max(i[j == 50])-max(i[j == 300]))/(50-300)
angle = np.arctan(slope)
arr = rotate_image(arr, np.rad2deg(angle))
plt.imshow(arr)
.
One way of doing the cropping is the following. You calculate the mid in height and width. Then you take two slices around the mid say 20 pixels in one direction and to until the mid in the other one. The biggest/smallest index where the pixel is white/background colored is a reasonable point to cut.
i,j = np.where(~(~similar_to_boundary_color(arr) & ~similar_to_boundary_color(arr, (0,0,0,0))))
imid, jmid = np.array(arr.shape)[:2]/2
imin = max(i[(i < imid) & (jmid - 10 < j) & (j < jmid + 10)])
imax = min(i[(i > imid) & (jmid - 10 < j) & (j < jmid + 10)])
jmax = min(j[(j > jmid) & (imid - 10 < i) & (i < imid + 10)])
jmin = max(j[(j < jmid) & (imid - 10 < i) & (i < imid + 10)])
arr = arr[imin:imax,jmin:jmax]
plt.imshow(arr)
and the result is:

find angle between major axis of ellipse and x-axis of coordinate (help me implement method from paper)

So I am trying to implement a method from this paper. I am stuck at the part where I have to find the angle between the major axis of the lesion’s best-fit ellipse and the x-axis of the coordinate system.
Here is the sample image:
Here is what I got so far:
Is it possible to find that angle? And after the angle has been found, I have to flip the RoI along x-axis by the angle.
UPDATE ----------
Google drive link to Roi Image: RoI image
Implementing method step by step based on the paper.
First, I should recenter the RoI to the center of the image coordinate. In the paper, they centered the RoI using its centroid. I manage to do it based on this code I found in this answer. The result is fine if my RoI is small and not touching the image border. But if I have large image the result is really bad. So I ended up centering the RoI using boundingRect. Here is the result of centering:
Code for centering RoI:
import math
import cv2
import numpy as np
import matplotlib.pyplot as plt
# read image
cont_img = cv2.imread(r"C:\Users\Pandu\Desktop\IMD064_lesion.bmp", 0)
cont_rgb = cv2.cvtColor(cont_img, cv2.COLOR_GRAY2RGB)
# fit ellipse and find ellipse properties
hh, ww = cont_img.shape
contours, hierarchy = cv2.findContours(cont_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
ellipse = cv2.fitEllipse(contours[0])
(xc, yc), (d1, d2), angle = ellipse
# centering by centroid
half_width = int(ww/2)
half_height = int(hh/2)
offset_x = (half_width-xc)
offset_y = (half_height-yc)
T = np.float32([[1, 0, offset_x], [0, 1, offset_y]])
centered_by_centroid = cv2.warpAffine(cont_img.copy(), T, (ww, hh))
plt.imshow(centered_by_centroid, cmap=plt.cm.gray)
# centering by boundingRect
# This centered RoI is (L)
x, y, w, h = cv2.boundingRect(contours[0])
startx = (ww - w)//2
starty = (hh - h)//2
centered_by_boundingRect = np.zeros_like(cont_img)
centered_by_boundingRect[starty:starty+h, startx:startx+w] = cont_img[y:y+h, x:x+w]
plt.imshow(centered_by_boundingRect, cmap=plt.cm.gray)
Second, after centering the RoI, I should find the orientation angel and rotate the RoI based on that angel and then flip . Using code from this answer. (is this the correct way to rotate the RoI?):
# find ellipse properties of centered RoI
contours, hierarchy = cv2.findContours(centered_by_boundingRect, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
ellipse = cv2.fitEllipse(contours[0])
(xc, yc), (d1, d2), angle = ellipse
roi_centroid = (xc, yc)
rot_angle = 90 - angle
if rot_angle < 0:
rot_angle += 180
# This rotated RoI is (Lx)
M = cv2.getRotationMatrix2D(roi_centroid, -rot_angle, 1.0)
rot_im = cv2.warpAffine(centered_by_boundingRect, M, (ww, hh))
plt.imshow(rot_im, cmap=plt.cm.gray)
# (Ly)
# by passing 0 to flip() should flip image around x-axis, but I get the same result as the paper
res_flip_y = cv2.flip(rot_im.copy(), 0)
plt.imshow(res_flip_y , cmap=plt.cm.gray)
# (L) (xor) (Lx)
res_x_xor = cv2.bitwise_xor(centered_by_boundingRect, rot_im)
plt.imshow(res_x_xor, cmap=plt.cm.gray)
# (L) (xor) (Ly)
res_y_xor = cv2.bitwise_xor(centered_by_boundingRect, res_flip_x)
plt.imshow(res_y_xor, cmap=plt.cm.gray)
I still can't get the same result as the paper, the rotating operation also produce bad result on large RoI. Help...
UPDATE ---------- 20/03/2021
Small RoI: fine result on rotation and looks similar with the paper, but still not getting the same end result on the L (xor) Lx or L (xor) Ly
Large RoI: bad result on rotation as the RoI get out of border/image
The angle you're looking for is returned from fitEllipse. It's just rotated a bit according to a different reference frame. You can get your counter-clockwise rotation angle by doing 90 - angle. As for rotating the roi you can either use minAreaRect to get a minimum-fit rectangle directly, or you can fit a bounding box to the contour and rotate each point individually.
The green rectangle is the minAreaRect(), the red rectangle is the boundingRect() after it's been rotated.
import cv2
import numpy as np
import math
# rotate point
def rotate2D(point, deg):
rads = math.radians(deg);
x, y = point;
rcos = math.cos(rads);
rsin = math.sin(rads);
rx = x * rcos - y * rsin;
ry = x * rsin + y * rcos;
rx = round(rx);
ry = round(ry);
point[0] = rx;
point[1] = ry;
# translate point
def translate2D(src, target, sign):
tx, ty = target;
src[0] += tx * sign;
src[1] += ty * sign;
# read image
cont_img = cv2.imread("blob.png", 0)
cont_rgb = cv2.cvtColor(cont_img, cv2.COLOR_GRAY2RGB)
# find contour
_, contours, hierarchy = cv2.findContours(cont_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# fit ellipse and get ellipse properties
ellipse = cv2.fitEllipse(contours[0])
(xc, yc), (d1, d2), angle = ellipse
# -------- NEW STUFF IN HERE --------------
# calculate counter-clockwise angle relative to x-axis
rot_angle = 90 - angle;
if rot_angle < 0:
rot_angle += 180;
print(rot_angle);
# if you want a rotated ROI I would recommend using minAreaRect rather than rotating a different rectangle
# fit a minrect to the image # this is taken directly from OpenCV's tutorials
rect = cv2.minAreaRect(contours[0]);
box = cv2.boxPoints(rect);
box = np.int0(box);
cv2.drawContours(cont_rgb, [box], 0, (0,255,0), 2);
# but if you really want to use a different rectangle and rotate it, here's how to do it
# create rectangle
x,y,w,h = cv2.boundingRect(contours[0]);
rect = [];
rect.append([x,y]);
rect.append([x+w,y]);
rect.append([x+w,y+h]);
rect.append([x,y+h]);
# rotate it
rotated_rect = [];
center = [x + w/2, y + h/2];
for point in rect:
# for each point, center -> rotate -> uncenter
translate2D(point, center, -1);
rotate2D(point, 90 - rot_angle); # "90 - angle" is because rotation goes clockwise
translate2D(point, center, 1);
rotated_rect.append([point]);
rotated_rect = np.array(rotated_rect);
cv2.drawContours(cont_rgb, [rotated_rect.astype(int)], -1, (0,0,255), 2);
# ------------- END OF NEW STUFF -----------------
# draw fitted ellipse and centroid
target_ellipse = cv2.ellipse(cont_rgb.copy(), ellipse, (37, 99, 235), 10)
centroid = cv2.circle(target_ellipse.copy(), (int(xc), int(yc)), 20, (250, 204, 21), -1)
# draw major axis
rmajor = max(d1, d2)/2
if angle > 90:
angle = angle - 90
else:
angle = angle + 90
xtop_major = xc + math.cos(math.radians(angle))*rmajor
ytop_major = yc + math.sin(math.radians(angle))*rmajor
xbot_major = xc + math.cos(math.radians(angle+180))*rmajor
ybot_major = yc + math.sin(math.radians(angle+180))*rmajor
top_major = (int(xtop_major), int(ytop_major))
bot_major = (int(xbot_major), int(ybot_major))
target_major_axis = cv2.line(centroid.copy(),
top_major, bot_major,
(0, 255, 255), 5)
## image center coordinate
hh, ww = target_major_axis.shape[:2];
x_center_start = (0, int(hh/2))
x_center_end = (int(ww), int(hh/2))
y_center_start = (int(ww/2), 0)
y_center_end = (int(ww/2), int(hh))
img_x_middle_coor = cv2.line(target_major_axis.copy(), x_center_start, x_center_end, (219, 39, 119), 10)
img_y_middle_coor = cv2.line(img_x_middle_coor.copy(), y_center_start,
y_center_end, (190, 242, 100), 10)
# show
cv2.imshow("image", img_y_middle_coor);
cv2.waitKey(0);
For the future: check that your code runs before pasting it on here. Aside from the missing "import" lines it was also missing this line:
hh, ww = target_major_axis.shape[:2]
If the sample code you paste has errors, then everyone who wants to help will have to waste some time bug-stomping before they can begin working on a solution.

Extracting data from tables without any grid lines and border from scanned image of document

Extracting table data from digital PDFs have been simple using camelot and tabula. However, the solution doesn't work with scanned images of the document pages specifically when the table doesn't have borders and inner grids. I have been trying to generate vertical and horizontal lines using OpenCV. However, since the scanned images will have slight rotation angles, it is difficult to proceed with the approach.
How can we utilize OpenCV to generate grids (horizontal and vertical lines) and borders for the scanned document page which contains table data (along with paragraphs of text)? If this is feasible, how to nullify the rotation angle of the scanned image?
I wrote some code to estimate the horizontal lines from the printed letters in the page. The same could be done for vertical ones I guess. The code below follows some general assumptions, here
some basic steps in pseudo code style:
prepare picture for contour detection
do contour detection
we assume most contours are letters
calc mean width of all contours
calc mean area of contours
filter all contours with two conditions:
a) contour (letter) heigths < meanHigh * 2
b) contour area > 4/5 meanArea
calc center point of all remaining contours
assume we have line regions (bins)
list all center point which are inside the region
do linear regression of region points
save slope and intercept
calc mean slope and intercept
here the full code:
import cv2
import numpy as np
from scipy import stats
def resizeImageByPercentage(img,scalePercent = 60):
width = int(img.shape[1] * scalePercent / 100)
height = int(img.shape[0] * scalePercent / 100)
dim = (width, height)
# resize image
return cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
def calcAverageContourWithAndHeigh(contourList):
hs = list()
ws = list()
for cnt in contourList:
(x, y, w, h) = cv2.boundingRect(cnt)
ws.append(w)
hs.append(h)
return np.mean(ws),np.mean(hs)
def calcAverageContourArea(contourList):
areaList = list()
for cnt in contourList:
a = cv2.minAreaRect(cnt)
areaList.append(a[2])
return np.mean(areaList)
def calcCentroid(contour):
houghMoments = cv2.moments(contour)
# calculate x,y coordinate of centroid
if houghMoments["m00"] != 0: #case no contour could be calculated
cX = int(houghMoments["m10"] / houghMoments["m00"])
cY = int(houghMoments["m01"] / houghMoments["m00"])
else:
# set values as what you need in the situation
cX, cY = -1, -1
return cX,cY
def getCentroidWhenSizeInRange(contourList,letterSizeWidth,letterSizeHigh,deltaOffset,minLetterArea=10.0):
centroidList=list()
for cnt in contourList:
(x, y, w, h) = cv2.boundingRect(cnt)
area = cv2.minAreaRect(cnt)
#calc diff
diffW = abs(w-letterSizeWidth)
diffH = abs(h-letterSizeHigh)
#thresold A: almost smaller than mean letter size +- offset
#when almost letterSize
if diffW < deltaOffset and diffH < deltaOffset:
#threshold B > min area
if area[2] > minLetterArea:
cX,cY = calcCentroid(cnt)
if cX!=-1 and cY!=-1:
centroidList.append((cX,cY))
return centroidList
DEBUGMODE = True
#read image, do git clone https://github.com/WZBSocialScienceCenter/pdftabextract.git for the example
img = cv2.imread('pdftabextract/examples/catalogue_30s/data/ALA1934_RR-excerpt.pdf-2_1.png')
#get some basic infos
imgHeigh, imgWidth, imgChannelAmount = img.shape
if DEBUGMODE:
cv2.imwrite("img00original.jpg",resizeImageByPercentage(img,30))
cv2.imshow("original",img)
# prepare img
imgGrey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# apply Gaussian filter
imgGaussianBlur = cv2.GaussianBlur(imgGrey,(5,5),0)
#make binary img, black or white
_, imgBinThres = cv2.threshold(imgGaussianBlur, 130, 255, cv2.THRESH_BINARY)
## detect contours
contours, _ = cv2.findContours(imgBinThres, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#we get some letter parameter
averageLetterWidth, averageLetterHigh = calcAverageContourWithAndHeigh(contours)
threshold1AllowedLetterSizeOffset = averageLetterHigh * 2 # double size
averageContourAreaSizeOfMinRect = calcAverageContourArea(contours)
threshHold2MinArea = 4 * averageContourAreaSizeOfMinRect / 5 # 4/5 * mean
print("mean letter Width: ", averageLetterWidth)
print("mean letter High: ", averageLetterHigh)
print("threshold 1 tolerance: ", threshold1AllowedLetterSizeOffset)
print("mean letter area ", averageContourAreaSizeOfMinRect)
print("thresold 2 min letter area ", threshHold2MinArea)
#we get all centroid of letter sizes contours, the other we ignore
centroidList = getCentroidWhenSizeInRange(contours,averageLetterWidth,averageLetterHigh,threshold1AllowedLetterSizeOffset,threshHold2MinArea)
if DEBUGMODE:
#debug print all centers:
imgFilteredCenter = img.copy()
for cX,cY in centroidList:
#draw in red color as BGR
cv2.circle(imgFilteredCenter, (cX, cY), 5, (0, 0, 255), -1)
cv2.imwrite("img01letterCenters.jpg",resizeImageByPercentage(imgFilteredCenter,30))
cv2.imshow("letterCenters",imgFilteredCenter)
#we estimate a bin widths
amountPixelFreeSpace = averageLetterHigh #TODO get better estimate out of histogram
estimatedBinWidth = round( averageLetterHigh + amountPixelFreeSpace) #TODO round better ?
binCollection = dict() #range(0,imgHeigh,estimatedBinWidth)
#we do seperate the center points into bins by y coordinate
for i in range(0,imgHeigh,estimatedBinWidth):
listCenterPointsInBin = list()
yMin = i
yMax = i + estimatedBinWidth
for cX,cY in centroidList:
if yMin < cY < yMax:#if fits in bin
listCenterPointsInBin.append((cX,cY))
binCollection[i] = listCenterPointsInBin
#we assume all point are in one line ?
#model = slope (x) + intercept
#model = m (x) + n
mList = list() #slope abs in img
nList = list() #intercept abs in img
nListRelative = list() #intercept relative to bin start
minAmountRegressionElements = 12 #is also alias for letter amount we expect
#we do regression for every point in the bin
for startYOfBin, values in binCollection.items():
#we reform values
xValues = [] #TODO use more short transform
yValues = []
for x,y in values:
xValues.append(x)
yValues.append(y)
#we assume a min limit of point in bin
if len(xValues) >= minAmountRegressionElements :
slope, intercept, r, p, std_err = stats.linregress(xValues, yValues)
mList.append(slope)
nList.append(intercept)
#we calc the relative intercept
nRelativeToBinStart = intercept - startYOfBin
nListRelative.append(nRelativeToBinStart)
if DEBUGMODE:
#we debug print all lines in one picute
imgLines = img.copy()
colorOfLine = (0, 255, 0) #green
for i in range(0,len(mList)):
slope = mList[i]
intercept = nList[i]
startPoint = (0, int( intercept)) #better round ?
endPointY = int( (slope * imgWidth + intercept) )
if endPointY < 0:
endPointY = 0
endPoint = (imgHeigh,endPointY)
cv2.line(imgLines, startPoint, endPoint, colorOfLine, 2)
cv2.imwrite("img02lines.jpg",resizeImageByPercentage(imgLines,30))
cv2.imshow("linesOfLetters ",imgLines)
#we assume in mean we got it right
meanIntercept = np.mean(nListRelative)
meanSlope = np.mean(mList)
print("meanIntercept :", meanIntercept)
print("meanSlope ", meanSlope)
#TODO calc angle with math.atan(slope) ...
if DEBUGMODE:
cv2.waitKey(0)
original:
center point of letters:
lines:
I had the same problem some time ago and this tutorial is the solution to that. It explains using pdftabextract which is a Python library by Markus Konrad and leverages OpenCV’s Hough transform to detect the lines and works even if the scanned document is a bit tilted. The tutorial walks your through parsing a 1920s German newspaper

Detect multiple rectangles in image

I am trying to detect the count of pipes in this picture. For this, I'm using OpenCV and Python-based detection. Based, on existing answers to similar questions, I was able to come up with the following steps
Open the image
Filter it
Apply Edge Detection
Use Contours
Check for the count
The total count of pipes is ~909 when we count it manually give or take 4.
After applying the filter
import cv2
import matplotlib.pyplot as plt
import numpy as np
img = cv2.imread('images/input-rectpipe-1.jpg')
blur_hor = cv2.filter2D(img[:, :, 0], cv2.CV_32F, kernel=np.ones((11,1,1), np.float32)/11.0, borderType=cv2.BORDER_CONSTANT)
blur_vert = cv2.filter2D(img[:, :, 0], cv2.CV_32F, kernel=np.ones((1,11,1), np.float32)/11.0, borderType=cv2.BORDER_CONSTANT)
mask = ((img[:,:,0]>blur_hor*1.2) | (img[:,:,0]>blur_vert*1.2)).astype(np.uint8)*255
I get this masked image
This looks fairly accurate in terms of the number of visible rectangles it shows. However, when I try to take the count and plot the bounding box on top of the picture, it picks a lot of unwanted regions as well. For circles, HoughCircles has a way of defining the max and min radius. Is there something similar for rectangles that can improve accuracy. Also, I'm open to suggestions for alternative approaches to this problem.
ret,thresh = cv2.threshold(mask,127,255,0)
contours,hierarchy = cv2.findContours(thresh, 1, 2)
count = 0
for i in range(len(contours)):
count = count+1
x,y,w,h = cv2.boundingRect(contours[i])
rect = cv2.minAreaRect(contours[i])
area = cv2.contourArea(contours[i])
box = cv2.boxPoints(rect)
ratio = w/h
M = cv2.moments(contours[i])
if M["m00"] == 0.0:
cX = int(M["m10"] / 1 )
cY = int(M["m01"] / 1 )
if M["m00"] != 0.0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
if (area > 50 and area < 220 and hierarchy[0][i][2] < 0 and (ratio > .5 and ratio < 2)):
#cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
cv2.circle(img, (cX, cY), 1, (255, 255, 255), -1)
count = count + 1
print(count)
cv2.imshow("m",mask)
cv2.imshow("f",img)
cv2.waitKey(0)
UPDATE
Based on the second answer I have converted the c++ code to python code and got closer results but still missing out on a few obvious rectangles.
Of course you could filter them by their area. I took your binary image and continued the work as below:
1- Do a loop on all the contours you found from findContours
2- In the loop check if each contour, is an internal contour or not
3- From those which are internal contours, check their area and if the area is in the acceptable range, check the width/height ratio of each contour and finally if it is good too, count that contour as a pipe.
I did the above method on your binary image, and found 794 pipes:
(Some boxes are lost though, You should change the parameters of the edge detector to get more separable boxes in the image.)
and here is the code (It's c++ but easily convertible to python):
Mat img__1, img__2,img__ = imread("E:/R.jpg", 0);
threshold(img__, img__1, 128, 255, THRESH_BINARY);
vector<vector<Point>> contours;
vector< Vec4i > hierarchy;
findContours(img__1, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_NONE);
Mat tmp = Mat::zeros(img__1.size(), CV_8U);
int k = 0;
for (size_t i = 0; i < contours.size(); i++)
{
double area = contourArea(contours[i]);
Rect rec = boundingRect(contours[i]);
float ratio = rec.width / float(rec.height);
if (area > 50 && area < 220 && hierarchy[i][2]<0 && (ratio > .5 && ratio < 2) ) # hierarchy[i][2]<0 stands for internal contours
{
k++;
drawContours(tmp, contours, i, Scalar(255, 255, 255), -1);
}
}
cout << "k= " << k << "\n";
imshow("1", img__1);
imshow("2", tmp);
waitKey(0);
There are many methods to solve this problem but i doubt there will be a single method without some kind of ad-hod measures. Here is another attempt to this problem.
Instead of using the edge information, i suggest a LBP(local binary pattern)-like filter that compares the surrounding pixel with the center value. If a certain percentage of surrounding pixel is larger than the center pixel, the center pixel will be labeled 255. if the condition is not met, then the center pixel will be labeled 0.
This intensity based method is run on the assumption that the pipe center is always darker than the pipe edges. Since it is comparing intensity,it should work well as long as some contrast remains.
Through this process, you will obtain an image with binary blobs for every pipe and some noises. You will have to remove them with some pre-known condition such as, size, shape, fill_ratio, color and etc. The condition can be found in the given code.
import cv2
import matplotlib.pyplot as plt
import numpy as np
# Morphological function sets
def morph_operation(matinput):
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
morph = cv2.erode(matinput,kernel,iterations=1)
morph = cv2.dilate(morph,kernel,iterations=2)
morph = cv2.erode(matinput,kernel,iterations=1)
morph = cv2.dilate(morph,kernel,iterations=1)
return morph
# Analyze blobs
def analyze_blob(matblobs,display_frame):
_,blobs,_ = cv2.findContours(matblobs,cv2.RETR_LIST ,cv2.CHAIN_APPROX_SIMPLE)
valid_blobs = []
for i,blob in enumerate(blobs):
rot_rect = cv2.minAreaRect(blob)
b_rect = cv2.boundingRect(blob)
(cx,cy),(sw,sh),angle = rot_rect
rx,ry,rw,rh = b_rect
box = cv2.boxPoints(rot_rect)
box = np.int0(box)
# Draw the segmented Box region
frame = cv2.drawContours(display_frame,[box],0,(0,0,255),1)
on_count = cv2.contourArea(blob)
total_count = sw*sh
if total_count <= 0:
continue
if sh > sw :
temp = sw
sw = sh
sh = temp
# minimum area
if sw * sh < 20:
continue
# maximum area
if sw * sh > 100:
continue
# ratio of box
rect_ratio = sw / sh
if rect_ratio <= 1 or rect_ratio >= 3.5:
continue
# ratio of fill
fill_ratio = on_count / total_count
if fill_ratio < 0.4 :
continue
# remove blob that is too bright
if display_frame[int(cy),int(cx),0] > 75:
continue
valid_blobs.append(blob)
if valid_blobs:
print("Number of Blobs : " ,len(valid_blobs))
cv2.imshow("display_frame_in",display_frame)
return valid_blobs
def lbp_like_method(matinput,radius,stren,off):
height, width = np.shape(matinput)
roi_radius = radius
peri = roi_radius * 8
matdst = np.zeros_like(matinput)
for y in range(height):
y_ = y - roi_radius
_y = y + roi_radius
if y_ < 0 or _y >= height:
continue
for x in range(width):
x_ = x - roi_radius
_x = x + roi_radius
if x_ < 0 or _x >= width:
continue
r1 = matinput[y_:_y,x_]
r2 = matinput[y_:_y,_x]
r3 = matinput[y_,x_:_x]
r4 = matinput[_y,x_:_x]
center = matinput[y,x]
valid_cell_1 = len(r1[r1 > center + off])
valid_cell_2 = len(r2[r2 > center + off])
valid_cell_3 = len(r3[r3 > center + off])
valid_cell_4 = len(r4[r4 > center + off])
total = valid_cell_1 + valid_cell_2 + valid_cell_3 + valid_cell_4
if total > stren * peri:
matdst[y,x] = 255
return matdst
def main_process():
img = cv2.imread('image.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Blured to remove noise
blurred = cv2.GaussianBlur(gray,(3,3),-1)
# Parameter tuning
winsize = 5
peri = 0.6
off = 4
matlbp = lbp_like_method(gray,winsize,peri,off)
cv2.imshow("matlbp",matlbp)
cv2.waitKey(1)
matmorph = morph_operation(matlbp)
cv2.imshow("matmorph",matmorph)
cv2.waitKey(1)
display_color = cv2.cvtColor(gray,cv2.COLOR_GRAY2BGR)
valid_blobs = analyze_blob(matmorph,display_color)
for b in range(len(valid_blobs)):
cv2.drawContours(display_color,valid_blobs,b,(0,255,255),-1)
cv2.imshow("display_color",display_color)
cv2.waitKey(0)
if __name__ == '__main__':
main_process()
Result from the LBP-like processing
After cleaning with morphological process
Final result with the red boxes showing all the blob candidates and the yellow segments showing blobs that pass all the condition we set. There are some false alarms below and on top of the pipe bundle but they can be omitted with some boundary conditions.
Total pipe found : 943

Get rotational shift using phase correlation and log polar transform

I have been working on a script which calculates the rotational shift between two images using cv2's phaseCorrelate method.
I have two images, the second is a 90 degree rotated version of the first image. After loading in the images, I convert them to log-polar before passing them into the phaseCorrelate function.
From what I have read, I believe that this should yield a rotational shift between two images.
The code below describes the implementation.
#bitwise right binary shift function
def rshift(val, n): return (val % 0x100000000)
base_img = cv2.imread('img1.jpg')
cur_img = cv2.imread('dataa//t_sv_1.jpg')
curr_img = rotateImage(cur_img, 90)
rows,cols,chan = base_img.shape
x, y, c = curr_img.shape
#convert images to valid type
ref32 = np.float32(cv2.cvtColor(base_img, cv2.COLOR_BGR2GRAY))
curr32 = np.float32(cv2.cvtColor(curr_img, cv2.COLOR_BGR2GRAY))
value = np.sqrt(((rows/2.0)**2.0)+((cols/2.0)**2.0))
value2 = np.sqrt(((x/2.0)**2.0)+((y/2.0)**2.0))
polar_image = cv2.linearPolar(ref32,(rows/2, cols/2), value, cv2.WARP_FILL_OUTLIERS)
log_img = cv2.linearPolar(curr32,(x/2, y/2), value2, cv2.WARP_FILL_OUTLIERS)
shift = cv2.phaseCorrelate(polar_image, log_img)
sx = shift[0][0]
sy = shift[0][1]
sf = shift[1]
polar_image = polar_image.astype(np.uint8)
log_img = log_img.astype(np.uint8)
cv2.imshow("Polar Image", polar_image)
cv2.imshow('polar', log_img)
#get rotation from shift along y axis
rotation = sy * 180 / (rshift(y, 1));
print(rotation)
cv2.waitKey(0)
cv2.destroyAllWindows()
I am unsure how to interpret the results of this function. The expected outcome is a value similar to 90 degrees, however, I get the value below.
Output: -0.00717516014538333
How can I make the output correct?
A method, typically referred to as the Fourier Mellin transform, and published as:
B. Srinivasa Reddy and B.N. Chatterji, "An FFT-Based Technique for Translation, Rotation, and Scale-Invariant Image Registration", IEEE Trans. on Image Proc. 5(8):1266-1271, 1996
uses the FFT and the log-polar transform to obtain the translation, rotation and scaling of one image to match the other. I find this tutorial to be very clear and informative, I will give a summary here:
Compute the magnitude of the FFT of the two images (apply a windowing function first to avoid issues with periodicity of the FFT).
Compute the log-polar transform of the magnitude of the frequency-domain images (typically a high-pass filter is applied first, but I have not seen its usefulness).
Compute the cross-correlation (actually phase correlation) between the two. This leads to a knowledge of scale and rotation.
Apply the scaling and rotation to one of the original input images.
Compute the cross-correlation (actually phase correlation) of the original input images, after correction for scaling and rotation. This leads to knowledge of the translation.
This works because:
The magnitude of the FFT is translation-invariant, we can solely focus on scaling and rotation without worrying about translation. Note that the rotation of the image is identical to the rotation of the FFT, and that scaling of the image is inverse to the scaling of the FFT.
The log-polar transform converts rotation into a vertical translation, and scaling into a horizontal translation. Phase correlation allows us to determine these translations. Converting them to a rotation and scaling is non-trivial (especially the scaling is hard to get right, but a bit of math shows the way).
If the tutorial linked above is not clear enough, one can look at the C++ code that comes with it, or at this other Python code.
OP is interested only in the rotation aspect of the method above. If we can assume that the translation is 0 (this means we know around which point the rotation was made, if we don't know the origin we need to estimate it as a translation), then we don't need to compute the magnitude of the FFT (remember it is used to make the problem translation invariant), we can apply the log-polar transform directly to the images. But note that we need to use the center of rotation as the origin for the log-polar transform. If we additionally assume that the scaling is 1, we can further simplify things by taking the linear-polar transform. That is, we logarithmic scaling of the radius axis is only necessary to estimate scaling.
OP is doing this more or less correctly, I believe. Where OP's code goes wrong is in the extent of the radius axis in the polar transform. By going all the way to the extreme corners of the image, OpenCV needs to fill in parts of the transformed image with zeros. These parts are dictated by the shape of the image, not by the contents of the image. That is, both polar images contain exactly the same sharp, high-contrast curve between image content and filled-in zeros. The phase correlation is aligning these curves, leading to an estimate of 0 degree rotation. The image content is more or less ignored because its contrast is much lower.
Instead, make the extent of the radius axis that of the largest circle that fits completely inside the image. This way, no parts of the output need to be filled with zeros, and the phase correlation can focus on the actual image content. Furthermore, considering the two images are rotated versions of each other, it is likely that the data in the corners of the images do not match, there is no need to take that into account at all!
Here is code I implemented quickly based on OP's code. I read in Lena, rotated the image by 38 degrees, computed the linear-polar transform of the original and rotated images, then the phase correlation between these two, and then determined a rotation angle based on the vertical translation. The result was 37.99560, plenty close to 38.
import cv2
import numpy as np
base_img = cv2.imread('lena512color.tif')
base_img = np.float32(cv2.cvtColor(base_img, cv2.COLOR_BGR2GRAY)) / 255.0
(h, w) = base_img.shape
(cX, cY) = (w // 2, h // 2)
angle = 38
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
curr_img = cv2.warpAffine(base_img, M, (w, h))
cv2.imshow("base_img", base_img)
cv2.imshow("curr_img", curr_img)
base_polar = cv2.linearPolar(base_img,(cX, cY), min(cX, cY), 0)
curr_polar = cv2.linearPolar(curr_img,(cX, cY), min(cX, cY), 0)
cv2.imshow("base_polar", base_polar)
cv2.imshow("curr_polar", curr_polar)
(sx, sy), sf = cv2.phaseCorrelate(base_polar, curr_polar)
rotation = -sy / h * 360;
print(rotation)
cv2.waitKey(0)
cv2.destroyAllWindows()
These are the four image windows shown by the code:
I created a figure that shows the phase correlation values for multiple rotations. This has been edited to reflect Cris Luengo's comment. The image is cropped to get rid of the edges of the square insert.
import cv2
import numpy as np
paths = ["lena.png", "rotate45.png", "rotate90.png", "rotate135.png", "rotate180.png"]
import os
os.chdir('/home/stephen/Desktop/rotations/')
images, rotations, polar = [],[], []
for image_path in paths:
alignedImage = cv2.imread('lena.png')
rotatedImage = cv2.imread(image_path)
rows,cols,chan = alignedImage.shape
x, y, c = rotatedImage.shape
x,y,w,h = 220,220,360,360
alignedImage = alignedImage[y:y+h, x:x+h].copy()
rotatedImage = rotatedImage[y:y+h, x:x+h].copy()
#convert images to valid type
ref32 = np.float32(cv2.cvtColor(alignedImage, cv2.COLOR_BGR2GRAY))
curr32 = np.float32(cv2.cvtColor(rotatedImage, cv2.COLOR_BGR2GRAY))
value = np.sqrt(((rows/2.0)**2.0)+((cols/2.0)**2.0))
value2 = np.sqrt(((x/2.0)**2.0)+((y/2.0)**2.0))
polar_image = cv2.linearPolar(ref32,(rows/2, cols/2), value, cv2.WARP_FILL_OUTLIERS)
log_img = cv2.linearPolar(curr32,(x/2, y/2), value2, cv2.WARP_FILL_OUTLIERS)
shift = cv2.phaseCorrelate(polar_image, log_img)
(sx, sy), sf = shift
polar_image = polar_image.astype(np.uint8)
log_img = log_img.astype(np.uint8)
sx, sy, sf = round(sx, 4), round(sy, 4), round(sf, 4)
text = image_path + "\n" + "sx: " + str(sx) + " \nsy: " + str(sy) + " \nsf: " + str(sf)
images.append(rotatedImage)
rotations.append(text)
polar.append(polar_image)
Here's an approach to determine the rotational shift between two images in degrees. The idea is to find the skew angle for each image in relation to a horizontal line. If we can find this skewed angle then we can calculate the angle difference between the two images. Here are some example images to illustrate this concept
Original unrotated image
Rotated counterclockwise by 10 degrees (neg_10) and counterclockwise by 35 degrees (neg_35)
Rotated clockwise by 7.9 degrees (pos_7_9) and clockwise by 21 degrees (pos_21)
For each image, we want to determine the skew angle in relation to a horizontal line with negative being rotated counterclockwise and positive being rotated clockwise
Here's the helper function to determine this skew angle
def compute_angle(image):
# Convert to grayscale, invert, and Otsu's threshold
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = 255 - gray
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find coordinates of all pixel values greater than zero
# then compute minimum rotated bounding box of all coordinates
coords = np.column_stack(np.where(thresh > 0))
angle = cv2.minAreaRect(coords)[-1]
# The cv2.minAreaRect() function returns values in the range
# [-90, 0) so need to correct angle
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
# Rotate image to horizontal position
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, \
borderMode=cv2.BORDER_REPLICATE)
return (angle, rotated)
After determining the skew angle for each image, we can simply calculate the difference
angle1, rotated1 = compute_angle(image1)
angle2, rotated2 = compute_angle(image2)
# Both angles are positive
if angle1 >= 0 and angle2 >= 0:
difference_angle = abs(angle1 - angle2)
# One positive, one negative
elif (angle1 < 0 and angle2 > 0) or (angle1 > 0 and angle2 < 0):
difference_angle = abs(angle1) + abs(angle2)
# Both negative
elif angle1 < 0 and angle2 < 0:
angle1 = abs(angle1)
angle2 = abs(angle2)
difference_angle = max(angle1, angle2) - min(angle1, angle2)
Here's the step by step walk through of whats going on. Using pos_21 and neg_10, the compute_angle() function will return the skew angle and the normalized image
For pos_21, we normalize the image and determine the skew angle. Left (before) -> right (after)
20.99871826171875
Similarly for neg_10, we also normalize the image and determine the skew angle. Left (before) -> right (after)
-10.007980346679688
Now that we have both angles, we can compute the difference angle. Here's the result
31.006698608398438
Here's results with other combinations. With neg_10 and neg_35 we get
24.984039306640625
With pos_7_9 and pos_21,
13.09155559539795
Finally with pos_7_9 and neg_35,
42.89918231964111
Here's the full code
import cv2
import numpy as np
def rotational_shift(image1, image2):
def compute_angle(image):
# Convert to grayscale, invert, and Otsu's threshold
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = 255 - gray
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find coordinates of all pixel values greater than zero
# then compute minimum rotated bounding box of all coordinates
coords = np.column_stack(np.where(thresh > 0))
angle = cv2.minAreaRect(coords)[-1]
# The cv2.minAreaRect() function returns values in the range
# [-90, 0) so need to correct angle
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
# Rotate image to horizontal position
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, \
borderMode=cv2.BORDER_REPLICATE)
return (angle, rotated)
angle1, rotated1 = compute_angle(image1)
angle2, rotated2 = compute_angle(image2)
# Both angles are positive
if angle1 >= 0 and angle2 >= 0:
difference_angle = abs(angle1 - angle2)
# One positive, one negative
elif (angle1 < 0 and angle2 > 0) or (angle1 > 0 and angle2 < 0):
difference_angle = abs(angle1) + abs(angle2)
# Both negative
elif angle1 < 0 and angle2 < 0:
angle1 = abs(angle1)
angle2 = abs(angle2)
difference_angle = max(angle1, angle2) - min(angle1, angle2)
return (difference_angle, rotated1, rotated2)
if __name__ == '__main__':
image1 = cv2.imread('pos_7_9.png')
image2 = cv2.imread('neg_35.png')
angle, rotated1, rotated2 = rotational_shift(image1, image2)
print(angle)

Categories

Resources