Count gear (Python, OpenCV) - python

For a prototype I need to build a 3d model of a gear. This have a "many" number of teeth.
So I am trying to count them using OpenCV and Python. I found this (only?) post which explain how to do it in C++.
I am following the steps and, for now this is the code I made.
import numpy as np
import cv2
img = cv2.imread('C:\\Users\\Link\\Desktop\\gear.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
kernel = np.ones((3, 3), np.uint8)
img_erosion = cv2.erode(thresh, kernel, iterations=1)
edges = cv2.Canny(img_erosion, 50, 150)
img_dilate = cv2.dilate(edges, kernel, iterations=1)
cv2.imshow('i', thresh)
cv2.waitKey(0)
cv2.imshow('i', img_erosion)
cv2.waitKey(0)
cv2.imshow('i', edges)
cv2.waitKey(0)
cv2.imshow('i', img_dilate)
cv2.waitKey(0)
What stopped me from go ahead is this: the image at some point became really a mess.
This is the original on which I am working:
And this is the output of image_dilate
As you can see, the teeth at the bottom is not displayed properly, maybe because of the shaddow in the original image. How can I get rid of this ?

Because your source image is cleaner than the link your post, so you can do approx on the max-area-contour, then get half number of points, the result is 84.
Sample code:
#!/usr/bin/python3
# 2018.01.22 11:53:24 CST
import cv2
import myutils
## Read
img = cv2.imread("img13_2.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
## threshold and find contours
ret, threshed = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY_INV)
cnts= cv2.findContours(threshed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2]
## Find the max-area-contour
cnt = max(contours, key=cv2.contourArea)
## Approx the contour
arclen = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.002*arclen, True)
## Draw and output the result
for pt in approx:
cv2.circle(img, (pt[0][0],pt[0][1]), 3, (0,255,0), -1, cv2.LINE_AA)
msg = "Total: {}".format(len(approx)//2)
cv2.putText(img, msg, (20,40),cv2.FONT_HERSHEY_PLAIN, 2, (0,0,255), 2, cv2.LINE_AA)
## Display
cv2.imshow("res", img);cv2.waitKey()
Result:

Solved it..
This is the code. The count is wrong by one because one teeth, on the right is lower than the others and because it found two points by itself. Don't know why this happens.
Also, it has been made with another image. It's not the source I posted above as long as it is in low definition.
import numpy as np
import cv2
img = cv2.imread('C:\\Users\\Link\\Desktop\\gear.png')
img2 = cv2.imread('C:\\Users\\Link\\Desktop\\gear.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
kernel = np.ones((3, 3), np.uint8)
img_dilate = cv2.dilate(thresh, kernel, iterations=1)
im2, contours, hierarchy = cv2.findContours(img_dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cv2.drawContours(img, contours, -1, (0, 255, 0), -1)
edges = cv2.Canny(cnts, 350, 350)
cnt = contours[0]
hull = cv2.convexHull(cnt, returnPoints=False)
defects = cv2.convexityDefects(cnt, hull)
for i in range(defects.shape[0]):
s, e, f, d = defects[i, 0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
cv2.line(edges, start, end, [0, 255, 255], 1)
circles = cv2.circle(img2, end, 5, [0, 255, 0], -1)
# print(len(defects)) - number of points
cv2.imshow('thresh', thresh)
cv2.waitKey(0)
cv2.imshow('dilate', img_dilate)
cv2.waitKey(0)
cv2.imshow('edges', edges)
cv2.waitKey(0)
cv2.imshow('cnts', cnts)
cv2.waitKey(0)
cv2.imshow('points', circles)
cv2.waitKey(0)

Related

How to find a center of an object in image in python

I have a following image:
I would like to find the center of the main object in the image - the book in this case.
I follow this answer: Center of mass in contour (Python, OpenCV)
and try:
import cv2
import numpy as np
image = cv2.imread("29289.jpg")
imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
cnts = cv2.drawContours(image, contours[0], -1, (0, 255, 0), 1)
kpCnt = len(contours[0])
x = 0
y = 0
for kp in contours[0]:
x = x+kp[0][0]
y = y+kp[0][1]
cv2.circle(image, (np.uint8(np.ceil(x/kpCnt)), np.uint8(np.ceil(y/kpCnt))), 1, (0, 0, 255), 30)
cv2.namedWindow("Result", cv2.WINDOW_NORMAL)
cv2.imshow("Result", cnts)
cv2.waitKey(0)
cv2.destroyAllWindows()
But the result is a nonsense (see the red point which should be the center):
Do you have any idea how to solve this problem? Thanks a lot

OpenCV - Can't find correct contours in similar images

the task I want to do looks pretty simple: I take as input several images with an object centered in the photo and a little color chart needed for other purposes. My code normally works for the majority of the cases, but sometimes fails miserably and I just can't understand why.
For example (these are the source images), it works correctly on this https://imgur.com/PHfIqcb but not on this https://imgur.com/qghzO3V
Here's the code of the interested part:
img = cv2.imread(path)
height, width, channel = img.shape
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = np.ones((31, 31), np.uint8)
dil = cv2.dilate(gray, kernel, iterations=1)
_, th = cv2.threshold(dil, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
th_er1 = cv2.bitwise_not(th)
_, contours, _= cv2.findContours(th_er1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt=contours[max_index]
x,y,w,h = cv2.boundingRect(cnt)
After that I'm just going to crop the image accordingly to the given results (getting the biggest rectangle contour), basically cutting off the photo only the main object.
But as I said, using very similar images sometimes works and sometimes not.
Thank you in advance.
maybe you could try not using otsu's method, and just set threshold manually, if it's possible... ;)
You can use the Canny edge detector. In the two images, there is a good threshold value to isolate the object in the center of the image. After applying the threshold, we blur the results and apply the Canny edge detector before finding the contours:
import cv2
import numpy as np
def process(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(img_gray, 190, 255, cv2.THRESH_BINARY_INV)
img_blur = cv2.GaussianBlur(thresh, (3, 3), 1)
img_canny = cv2.Canny(img_blur, 0, 0)
kernel = np.ones((5, 5))
img_dilate = cv2.dilate(img_canny, kernel, iterations=1)
return cv2.erode(img_dilate, kernel, iterations=1)
def get_contours(img):
contours, hierarchies = cv2.findContours(process(img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
cv2.drawContours(img, [cnt], -1, (0, 255, 0), 30)
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 30)
img = cv2.imread("image.jpeg")
get_contours(img)
cv2.imshow("Result", img)
cv2.waitKey(0)
Input images:
Output images:
The green outlines are the contours of the objects, and the red outlines are the bounding boxes of the objects.

How to put the contours in a image numbers? (OCR)

I've been studying CV for a few months now but I ran into a problem on my second project, I needed to remove the noise from a sequence of numbers, in order to apply ocr. I managed to clean it up, but the numbers lost some internal pixels.
See the initial and current final image.
Initial
Final
Code used:
blur = cv2.GaussianBlur(img, (15, 15), 2)
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
lower_gray = np.array([1, 1, 1])
upper_gray = np.array([102, 102, 102])
mask = cv2.inRange(hsv, lower_gray, upper_gray)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
opened_mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
masked_img = cv2.bitwise_and(img, img, mask=opened_mask)
coloured = masked_img.copy()
coloured[mask == 0] = (255, 255, 255)
gray = cv2.cvtColor(coloured, cv2.COLOR_BGR2GRAY)
des = cv2.bitwise_not(gray)
contour, hier = cv2.findContours(des, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contour:
cv2.drawContours(des, [cnt], 0, 255, -1)
#des is the final image
Is there a better way to clean the background for OCR, or maybe close the lost pixels in the characters?
I managed to solve it, I didn't use the method you mentioned, but it was a good way, I was apprehensive that it would cause an expansion in the characters and wouldn't be good for OCR reading.
This is my final result:
for mrz in mrz_list:
try:
thresh = cv2.threshold(mrz, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
dist = cv2.distanceTransform(thresh, cv2.DIST_L2, 5)
dist = cv2.normalize(dist, dist, 0, 1.0, cv2.NORM_MINMAX)
dist = (dist * 255).astype("uint8")
thresh = cv2.threshold(dist, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
cnts = cv2.findContours(opening.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
chars = []
for c in cnts:
(x, y, w, h) = cv2.boundingRect(c)
if w >= 20 and h >= 25:
chars.append(c)
chars = np.vstack([chars[i] for i in range(0, len(chars))])
hull = cv2.convexHull(chars)
mask = np.zeros(mrz.shape[:2], dtype="uint8")
cv2.drawContours(mask, [hull], -1, 255, -1)
mask = cv2.dilate(mask, None, iterations=2)
final = cv2.bitwise_and(opening, opening, mask=mask)`
Thanks everyone.
It is easy to get a clean background by morphological dilation or closing, a smooth outline by Gaussian filtering, and then binarization. But I found no way to separate the characters.

How to find contours in dotted text captcha image

I am newbie to OpenCV. I'm trying to find the contours of the captcha image. It does not work only when my captcha image contains the dotted text.
I have done following code for that:
import numpy as np
import cv2 as cv
import imgaug.augmenters as iaa
im = cv.imread('dataset/1.jpg')
imgray = cv.cvtColor(im, cv.COLOR_BGR2GRAY)
imgray = cv.threshold(imgray, 127, 255, 0)[1]
dst = cv.Canny(imgray,0,150)
blured = cv.blur(dst,(5,5),0)
img_thresh = cv.adaptiveThreshold(blured,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, 11, 2)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (3,3))
threshed = cv.morphologyEx(img_thresh,cv.MORPH_CLOSE,kernel)
contours, hierarchy = cv.findContours(dst, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
print(len(contours))
# cv.drawContours(im, contours, -1, (0, 255, 0), 3)
cv.imshow("img_thresh",img_thresh)
cv.imshow("dst",dst)
cv.imshow("threshed",threshed)
cv.waitKey(0)
cv.destroyAllWindows()
Can anyone help in this? Is there any way to find contours in this image?
Here is my code and output
'''
contours
'''
import numpy as np
import cv2
#read image as gray
pic = r'C:\Users\balaji\Desktop\captcha.jpg'
img_color = cv2.imread(pic)
cv2.imshow('CAPTCHA preview',img_color)
cv2.waitKey(0)
img_gray = cv2.cvtColor(img_color,cv2.COLOR_BGR2GRAY)
#Apply thresholding to the image
ret, thresh = cv2.threshold(img_gray, 127, 255, cv2.THRESH_OTSU)
cv2.imshow('Thresholded image', thresh)
cv2.waitKey(0)
#Dilated image - to connect the dots
krn = np.ones((3,3), np.uint8)
img_dilated = cv2.dilate(cv2.bitwise_not(thresh), kernel=krn, iterations=1)
cv2.imshow('Dilated image', img_dilated)
cv2.waitKey(0)
# Finding and draw Contours
contours, hierarchy = cv2.findContours(img_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
black_canvas = np.zeros_like(img_color)
cv2.drawContours(black_canvas, contours, -1, (0, 255, 0), 1)
cv2.imshow('Contoured image', black_canvas)
cv2.waitKey(0)

How to remove background using Canny Edge detection

I want to remove background and sharpen the images of following type:
image 1
Both of these are signatures. I want to be able to remove everything except the signature itself and sharpen the lines of signature.
I am able to get a mask using Canny edge detection using following code
import cv2
im_path = r'test2.png'
image = cv2.imread(im_path) #args["image"]
image = cv2.resize(image, (680, 460))
#rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
cv2.imshow('thresh', thresh) ###for showing
cv2.imwrite('thresh.jpg', thresh) ###for saving
cv2.waitKey()
And these are masks I get;
But Im clueless about what Image processing operations to perform next.
PS: These signatures are same (not forged) and next step would be to find similarity between them.
Try this
import cv2
import numpy as np
image = cv2.imread("r'test2.png'")
original = image.copy()
mask = np.zeros(image.shape, dtype=np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=3)
cnts = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
for c in cnts:
cv2.drawContours(mask, [c], -1, (255,255,255), -1)
break
close = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=4)
close = cv2.cvtColor(close, cv2.COLOR_BGR2GRAY)
result = cv2.bitwise_and(original, original, mask=close)
result[close==0] = (255,255,255)
cv2.imshow('result', result)
cv2.waitKey()

Categories

Resources