How to find colour of contours in OpenCv with python - python

I'm working with a requirement where I need to find the colour of region inside contours. We are using OpenCv with Python and here is my code in Python:
import imutils
import cv2
import numpy as np
path = "multiple_grains_1.jpeg"
img = cv2.imread(path)
resized = imutils.resize(img, width=900)
ratio = img.shape[0] / float(resized.shape[0])
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
(ret, thresh) = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
edge = cv2.Canny(thresh, 100, 200)
( _,cnts, _) = cv2.findContours(edge.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
area = cv2.contourArea(c)
if area > 1:
cv2.drawContours(resized,[box],0,(0,0,255),2)
cv2.drawContours(resized, [c], -1, (0, 255, 0), 2)
#print("area : "+str(area))
#print('\nContours: ' + str(c[0]))
#img[c[0]]
pixelpoints = np.transpose(np.nonzero(c))
#print('\pixelpoints: ' + str(pixelpoints))
# accessed the center of the contour using the followi
M = cv2.moments(c)
if M["m00"] != 0:
cX = int((M["m10"] / M["m00"]) * ratio)
cY = int((M["m01"] / M["m00"]) * ratio)
#print (cX,cY)
cord = img[int(cX)+3,int(cY)+3]
print(cord)
cv2.imshow("Output", resized)
cv2.waitKey(0)
exit()
When I check the centroid colour of the contour, I'm unable to fetch correct colour. Do any one knows how to fetch the colour inside the contour using OpenCv and python?

I simplified your code and was able to get the color of the centroids without using moment.
import imutils
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread("multiplegrains.png")
resized = imutils.resize(img, width=900)
ratio = img.shape[0] / float(resized.shape[0])
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
_, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# if you want cv2.contourArea >1, you can just comment line bellow
cnts = np.array(cnts)[[cv2.contourArea(c)>10 for c in cnts]]
grains = [np.int0(cv2.boxPoints(cv2.minAreaRect(c))) for c in cnts]
centroids =[(grain[2][1]-(grain[2][1]-grain[0][1])//2, grain[2][0]-(grain[2][0]-grain[0][0])//2) for grain in grains]
colors = [resized[centroid] for centroid in centroids]

Related

How to run a loop on max contour

How do I run a loop to get the contour and pixles for 8 objects in an image, rather than just finding max contour and pixels of one object in an image.
import cv2
import numpy as np
img = cv2.imread('C:\\Users\\marnes\\Downloads\\25%.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# get shape of largest contour and count pixels
edges = cv2.Canny(image=img, threshold1=100, threshold2=200)
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
max_contour = max(contours, key=lambda c: cv2.contourArea(c))
mask = np.zeros_like(gray)
cv2.drawContours(mask, [max_contour], 0, 255, -1)
pct_100 = cv2.countNonZero(mask)
cv2.imshow("mask", mask)
# get dark area and count pixels
ret, thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV)
thresh = cv2.bitwise_and(thresh, mask)
pct_dark = cv2.countNonZero(thresh)
cv2.imshow("dark", thresh)
print(f"mask = {pct_100}, dark = {pct_dark}, %dark = {pct_dark / pct_100 * 100}")
first_operator = 100
second_operator = pct_dark / pct_100 * 100
output1 = first_operator - second_operator
parameter = 'starch breakdown'
print([parameter] + [output1])
cv2.waitKey(0)
cv2.destroyAllWindows()

Increase contour detection accuracy of chess board squares using openCV in python

I wanted to detect contours of chess board black squares from the following image.
The following code is detecting only few black squares successfully, how can we increase the accuracy?
import cv2
import numpy as np
imPath = r" " # <----- image path
def imageResize(orgImage, resizeFact):
dim = (int(orgImage.shape[1]*resizeFact),
int(orgImage.shape[0]*resizeFact)) # w, h
return cv2.resize(orgImage, dim, cv2.INTER_AREA)
img = imageResize(cv2.imread(imPath), 0.5)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.inRange(gray, 135, 155) # to pick only black squares
# find contours
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cntImg = img.copy()
minArea, maxArea = 3000, 3500
valid_cnts = []
for c in cnts:
area = cv2.contourArea(c)
if area > minArea and area < maxArea:
valid_cnts.append(c)
# draw centers for troubleshooting
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(cntImg, (cX, cY), 5, (0, 0, 255), -1)
cv2.drawContours(cntImg, valid_cnts, -1, (0, 255, 0), 2)
cv2.imshow('org', img)
cv2.imshow('threshold', thresh)
cv2.imshow('contour', cntImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
Gives threshold and contour -
[0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 7.0, 7.5, 9.5, 3248.5, 3249.0, 6498.0] are the unique cnts areas. Typical areas for desired black squares are 3248.5, 3249.0, here's a quick snippet for getting unique cnts areas -
cntAreas = [cv2.contourArea(x) for x in cnts]
print(sorted(set(cntAreas)))
Highly appreciate any help!!
The problem was due to gaps in canny edges which was initiated from noise in the grayscale image. By using dilate morph operation, the noise is reduced and now giving well connected canny edges to make closed contours.
Full code -
import cv2
import numpy as np
imPath = r" " # <----- image path
def imageResize(orgImage, resizeFact):
dim = (int(orgImage.shape[1]*resizeFact),
int(orgImage.shape[0]*resizeFact)) # w, h
return cv2.resize(orgImage, dim, cv2.INTER_AREA)
img = imageResize(cv2.imread(imPath), 0.5)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2)) # <-----
morphed = cv2.dilate(gray, kernel, iterations=1)
thresh = cv2.inRange(morphed, 135, 155) # to pick only black squares
# find canny edge
edged_wide = cv2.Canny(thresh, 10, 200, apertureSize=3)
cv2.waitKey(0)
# find Contours
contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # cv2.CHAIN_APPROX_NONE stores all coords unlike SIMPLE, cv2.RETR_EXTERNAL
cntImg = img.copy()
minArea, maxArea = 2000, 4000
valid_cnts = []
for c in contours:
area = cv2.contourArea(c)
if area > minArea and area < maxArea:
valid_cnts.append(c)
# draw centers
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(cntImg, (cX, cY), 5, (0, 0, 255), -1)
cv2.drawContours(cntImg, valid_cnts, -1, (0, 255, 0), 2)
cv2.imshow('threshold', thresh)
cv2.imshow('morphed', morphed)
cv2.imshow('canny edge', edged_wide)
cv2.imshow('contour', cntImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here's the contour plot -

How I can detect recognize text in а shape

Need your help. Now I'm writing python script to recognize text in a shape. This shape can be captured from RTSP (IP Camera) at any angle.
For the example see attached file. My code is here, but coords to crop rotated shape is sets manually
import cv2
import numpy as np
def main():
fn = cv2.VideoCapture("rtsp://admin:Admin123-#172.16.10.254")
flag, img = fn.read()
cnt = np.array([
[[64, 49]],
[[122, 11]],
[[391, 326]],
[[308, 373]]
])
print("shape of cnt: {}".format(cnt.shape))
rect = cv2.minAreaRect(cnt)
print("rect: {}".format(rect))
box = cv2.boxPoints(rect)
box = np.int0(box)
print("bounding box: {}".format(box))
cv2.drawContours(img, [box], 0, (0, 255, 0), 2)
img_crop, img_rot = crop_rect(img, rect)
print("size of original img: {}".format(img.shape))
print("size of rotated img: {}".format(img_rot.shape))
print("size of cropped img: {}".format(img_crop.shape))
new_size = (int(img_rot.shape[1]/2), int(img_rot.shape[0]/2))
img_rot_resized = cv2.resize(img_rot, new_size)
new_size = (int(img.shape[1]/2)), int(img.shape[0]/2)
img_resized = cv2.resize(img, new_size)
cv2.imshow("original contour", img_resized)
cv2.imshow("rotated image", img_rot_resized)
cv2.imshow("cropped_box", img_crop)
# cv2.imwrite("crop_img1.jpg", img_crop)
cv2.waitKey(0)
def crop_rect(img, rect):
# get the parameter of the small rectangle
center = rect[0]
size = rect[1]
angle = rect[2]
center, size = tuple(map(int, center)), tuple(map(int, size))
# get row and col num in img
height, width = img.shape[0], img.shape[1]
print("width: {}, height: {}".format(width, height))
M = cv2.getRotationMatrix2D(center, angle, 1)
img_rot = cv2.warpAffine(img, M, (width, height))
img_crop = cv2.getRectSubPix(img_rot, size, center)
return img_crop, img_rot
if __name__ == "__main__":
main()
example pic
You may start with the example in the following post.
The code sample detects the license plate, and it also detects your "shape" with text.
After detecting the "shape" with the text, you may use the following stages:
Apply threshold the cropped area.
Find contours, and find the contour with maximum area.
Build a mask, and mask area outside the contour (like in the license plate example).
Use minAreaRect (as fmw42 commented), and get the angle of the rectangle.
Rotate the cropped area (by angle+90 degrees).
Apply OCR using pytesseract.image_to_string.
Here is the complete code:
import cv2
import numpy as np
import imutils
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe' # I am using Windows
# Read the input image
img = cv2.imread('Admin123.jpg')
# Reused code:
# https://stackoverflow.com/questions/60977964/pytesseract-not-recognizing-text-as-expected/60979089#60979089
################################################################################
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #convert to grey scale
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edged = cv2.Canny(gray, 30, 200) #Perform Edge detection
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]
screenCnt = None
# loop over our contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.018 * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
# Masking the part other than the "shape"
mask = np.zeros(gray.shape,np.uint8)
new_image = cv2.drawContours(mask,[screenCnt],0,255,-1,)
new_image = cv2.bitwise_and(img,img,mask=mask)
# Now crop
(x, y) = np.where(mask == 255)
(topx, topy) = (np.min(x), np.min(y))
(bottomx, bottomy) = (np.max(x), np.max(y))
cropped = gray[topx:bottomx+1, topy:bottomy+1]
################################################################################
# Apply threshold the cropped area
_, thresh = cv2.threshold(cropped, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# Find contours
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
# Get contour with maximum area
c = max(cnts, key=cv2.contourArea)
# Build a mask (same as the code above)
mask = np.zeros(cropped.shape, np.uint8)
new_cropped = cv2.drawContours(mask, [c], 0, 255, -1)
new_cropped = cv2.bitwise_and(cropped, cropped, mask=mask)
# Draw green rectangle for testing
test = cv2.cvtColor(new_cropped, cv2.COLOR_GRAY2BGR)
cv2.drawContours(test, [c], -1, (0, 255, 0), thickness=2)
# Use minAreaRect as fmw42 commented
rect = cv2.minAreaRect(c)
angle = rect[2] # Get angle of the rectangle
# Rotate the cropped rectangle.
rotated_cropped = imutils.rotate(new_cropped, angle + 90)
# Read the text in the "shape"
text = pytesseract.image_to_string(rotated_cropped, config='--psm 3')
print("Extracted text is:\n\n", text)
# Show images for testing:
cv2.imshow('cropped', cropped)
cv2.imshow('thresh', thresh)
cv2.imshow('test', test)
cv2.imshow('rotated_cropped', rotated_cropped)
cv2.waitKey(0)
cv2.destroyAllWindows()
OCR output result:
AB12345
DEPARTMENT OF
INFORMATION
COMMUNICATION
TECHNOLOGY
cropped:
thresh:
test:
rotated_cropped:

Match center of two images (OpenCV, Python)

I asked before a question which was, maybe, too complex. So here I am with a new one a little bit simplier.
I have two images:
image 1
image 2
What I want to do is to center the second image into the center of the first, like below.
desired
What I achieved until now was the center of these images.
The value is a list of two points, X-Y.
How can I match these points to have a result like desired above ?
import cv2
import numpy as np
import os
img1 = cv2.imread(os.path.expanduser('~\\Desktop\\c1.png'))
# ---Read image and obtain threshold---
img0 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(img0, 120, 255, 1)
# ---Obtain contours---
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = contours
center = []
for c in cnts:
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
print(cX, cY)
center.append(cX)
center.append(cY)
print(center)
Thanks
Here is my step:
Find centers by contours
Calc the offset between centers
Do slice-op to paste the object image
For those two image:
This is my result (with 0.3x for img2):
#!/usr/bin/python3
# 2018.01.16 21:07:48 CST
# 2018.01.16 21:23:47 CST
import cv2
import numpy as np
import os
def findCenter(img):
print(img.shape, img.dtype)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
th, threshed = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)
#cv2.imshow("threshed", threshed);cv2.waitKey();cv2.destroyAllWindows()
#_, cnts, hierarchy = cv2.findContours(threshed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cv2.findContours(threshed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
M = cv2.moments(cnts[0])
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
return (cX,cY)
img1 = cv2.imread("img1.jpg")
img2 = cv2.resize(cv2.imread("img2.jpg"), None, fx=0.3, fy=0.3)
## (1) Find centers
pt1 = findCenter(img1)
pt2 = findCenter(img2)
## (2) Calc offset
dx = pt1[0] - pt2[0]
dy = pt1[1] - pt2[1]
## (3) do slice-op `paste`
h,w = img2.shape[:2]
dst = img1.copy()
dst[dy:dy+h, dx:dx+w] = img2
cv2.imwrite("res.png", dst)

cv2.drawContours() - unfill circles inside characters (Python, OpenCV)

As suggested by #Silencer, I used the code he posted here to draw contours around the numbers in my image.
At some point, working with numbers like 0,6,8,9 I saw that their inside contours (the circles) are being filled as well.
How can I prevent this ? Is there a min/max area of action to set for cv2.drawContours() so I can exclude the inner area ?
I tried to pass cv2.RETR_EXTERNAL but with this parameter only the whole external area is considered.
The code is this (again, thanks Silencer. Was searching for this for months..):
import numpy as np
import cv2
im = cv2.imread('imgs\\2.png')
imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#contours.sort(key=lambda x: int(x.split('.')[0]))
for i, cnts in enumerate(contours):
## this contour is a 3D numpy array
cnt = contours[i]
res = cv2.drawContours(im, [cnt], 0, (255, 0, 0), 1)
cv2.imwrite("contours.png", res)
'''
## Method 1: crop the region
x,y,w,h = cv2.boundingRect(cnt)
croped = res[y:y+h, x:x+w]
cv2.imwrite("cnts\\croped{}.png".format(i), croped)
'''
## Method 2: draw on blank
# get the 0-indexed coords
offset = cnt.min(axis=0)
cnt = cnt - cnt.min(axis=0)
max_xy = cnt.max(axis=0) + 1
w, h = max_xy[0][0], max_xy[0][1]
# draw on blank
canvas = np.ones((h, w, 3), np.uint8) * 255
cv2.drawContours(canvas, [cnt], -1, (0, 0, 0), -1)
#if h > 15 and w < 60:
cv2.imwrite("cnts\\canvas{}.png".format(i), canvas)
The main image on which I am working..
Thanks
UPDATE
I implemented Fiver answer below and this is the result:
import cv2
import numpy as np
img = cv2.imread('img.png')
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_v = img_hsv[:, :, 2]
ret, thresh = cv2.threshold(~img_v, 127, 255, 0)
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for i, c in enumerate(contours):
tmp_img = np.zeros(img_v.shape, dtype=np.uint8)
res = cv2.drawContours(tmp_img, [c], -1, 255, cv2.FILLED)
tmp_img = np.bitwise_and(tmp_img, ~img_v)
ret, inverted = cv2.threshold(tmp_img, 127, 255, cv2.THRESH_BINARY_INV)
cnt = contours[i]
x, y, w, h = cv2.boundingRect(cnt)
cropped = inverted[y:y + h, x:x + w]
cv2.imwrite("roi{}.png".format(i), cropped)
To draw the char without filled the closed inner regions:
find the contours on the threshed binary image with hierarchy.
find the outer contours that don't have inner objects (by flag hierarchyi).
for each outer contour:
3.1 fill it(maybe need check whether needed);
3.2 then iterate in it's inner children contours, fill then with other color(such as inversed color).
combine with the crop code, crop them.
maybe you need sort them, resplit them, normalize them.
maybe, now you can do ocr with the trained model.
FindContours, refill the inner closed regions.
Combine with this answer Copy shape to blank canvas (OpenCV, Python), do more steps, maybe you can get this or better:
The core code to refill the inner closed regions is as follow:
#!/usr/bin/python3
# 2018.01.14 09:48:15 CST
# 2018.01.15 17:56:32 CST
# 2018.01.15 20:52:42 CST
import numpy as np
import cv2
img = cv2.imread('img02.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
## Threshold
ret, threshed = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)
## FindContours
cnts, hiers = cv2.findContours(threshed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
canvas = np.zeros_like(img)
n = len(cnts)
hiers = hiers[0]
for i in range(n):
if hiers[i][3] != -1:
## If is inside, the continue
continue
## draw
cv2.drawContours(canvas, cnts, i, (0,255,0), -1, cv2.LINE_AA)
## Find all inner contours and draw
ch = hiers[i][2]
while ch!=-1:
print(" {:02} {}".format(ch, hiers[ch]))
cv2.drawContours(canvas, cnts, ch, (255,0,255), -1, cv2.LINE_AA)
ch = hiers[ch][0]
cv2.imwrite("001_res.png", canvas)
Run this code with this image:
You will get:
Of course, this is for two hierarchies. I haven't test for more than two. You who need can do test by yourself.
Update:
Notice in different OpenCVs, the cv2.findContours return different values. To keep code executable, we can just get the last two returned values use: cnts, hiers = cv2.findContours(...)[-2:]
In OpenCV 3.4:
In OpenCV 4.0:
Since you already have a mask from your threshold step, you can also use it to bitwise_and against the drawn contour:
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('drawn_chars.png')
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_v = img_hsv[:, :, 2]
ret, thresh = cv2.threshold(~img_v, 127, 255, 0)
image, contours, hierarchy = cv2.findContours(
thresh,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE
)
for c in contours:
tmp_img = np.zeros(img_v.shape, dtype=np.uint8)
cv2.drawContours(tmp_img, [c], -1, 255, cv2.FILLED)
tmp_img = np.bitwise_and(tmp_img, ~img_v)
plt.figure(figsize=(16, 2))
plt.imshow(tmp_img, cmap='gray')
I've inverted the image so the contours are white and I left out the cropping as you already solved that. Here is the result on one of the "O" characters:
Full code...
This will not sort the images.
import numpy as np
import cv2
im = cv2.imread('imgs\\1.png')
imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
## Threshold
ret, threshed = cv2.threshold(imgray, 127, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
## FindContours
image, cnts, hiers = cv2.findContours(threshed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
canvas = np.zeros_like(im)
n = len(cnts)
hiers = hiers[0]
for i, imgs in enumerate(cnts):
cnt = cnts[i]
res = cv2.drawContours(im, [cnt], 0, (0, 0, 0), -1)
x, y, w, h = cv2.boundingRect(cnt)
croped = res[y:y + h, x:x + w]
if h > 10:
cv2.imwrite("out\\croped{}.png".format(i), croped)
cv2.imshow('i', croped)
cv2.waitKey(0)
for i, value in enumerate(cnts):
## this contour is a 3D numpy array
cnt = cnts[i]
res = cv2.drawContours(im, [cnt], 0, (0, 0, 0), -1)
# cv2.imwrite("out\\contours{}.png".format(i), res)
## Find all inner contours and draw
ch = hiers[i][2]
while ch != -1:
print(" {:02} {}".format(ch, hiers[ch]))
res1 = cv2.drawContours(im, cnts, ch, (255, 255, 255), -1)
ch = hiers[ch][0]
x, y, w, h = cv2.boundingRect(cnt)
croped = res[y:y + h, x:x + w]
if h > 10:
cv2.imwrite("out\\croped{}.png".format(i), croped)
Any correction is accepted.
This will do definetively the job...
import cv2
import os
import numpy as np
img = cv2.imread("image.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
retval, thresholded = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
medianFiltered = cv2.medianBlur(thresholded, 3)
_, contours, hierarchy = cv2.findContours(medianFiltered, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contour_list = []
for contour in contours:
area = cv2.contourArea(contour)
if area > 80:
contour_list.append(contour)
numbers = cv2.drawContours(img, contour_list, -1, (0, 0, 0), 2)
cv2.imshow('i', numbers)
cv2.waitKey(0)
sorted_ctrs = sorted(contours, key=lambda ctr: cv2.boundingRect(ctr)[0])
for i, cnts in enumerate(contours):
cnt = contours[i]
x, y, w, h = cv2.boundingRect(cnt)
croped = numbers[y:y + h, x:x + w]
h, w = croped.shape[:2]
print(h, w)
if h > 15:
cv2.imwrite("croped{}.png".format(i), croped)
This is conceptually similar to Fivers answer, just that bitwise_and occurs outside the for loop and perhaps is better in terms of performance. Source code is in C++ for those looking for C++ answer for this problem.
int thWin = 3;
int thOffset = 1;
cv::adaptiveThreshold(image, th, 255, cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY_INV, thWin, thOffset);
int minMoveCharCtrArea = 140;
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(th.clone(), contours, hierarchy, cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE);
cv::Mat filtImg = cv::Mat::zeros(img.rows, img.cols, CV_8UC1 );
for (int i = 0; i< contours.size(); ++i) {
int ctrArea = cv::contourArea(contours[i]);
if (ctrArea > minMoveCharCtrArea) {
cv::drawContours(filtImg, contours, i, 255, -1);
}
}
cv::bitwise_and(th, filtImg, filtImg);
Remember to clone the image (for python it should be copy) when passing source image argument to findContours, since findContours modifies the original image. I reckon later versions of opencv (perhaps opencv3 +) don't require cloning.

Categories

Resources