OpenCV - Can't find correct contours in similar images - python

the task I want to do looks pretty simple: I take as input several images with an object centered in the photo and a little color chart needed for other purposes. My code normally works for the majority of the cases, but sometimes fails miserably and I just can't understand why.
For example (these are the source images), it works correctly on this https://imgur.com/PHfIqcb but not on this https://imgur.com/qghzO3V
Here's the code of the interested part:
img = cv2.imread(path)
height, width, channel = img.shape
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = np.ones((31, 31), np.uint8)
dil = cv2.dilate(gray, kernel, iterations=1)
_, th = cv2.threshold(dil, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
th_er1 = cv2.bitwise_not(th)
_, contours, _= cv2.findContours(th_er1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt=contours[max_index]
x,y,w,h = cv2.boundingRect(cnt)
After that I'm just going to crop the image accordingly to the given results (getting the biggest rectangle contour), basically cutting off the photo only the main object.
But as I said, using very similar images sometimes works and sometimes not.
Thank you in advance.

maybe you could try not using otsu's method, and just set threshold manually, if it's possible... ;)

You can use the Canny edge detector. In the two images, there is a good threshold value to isolate the object in the center of the image. After applying the threshold, we blur the results and apply the Canny edge detector before finding the contours:
import cv2
import numpy as np
def process(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(img_gray, 190, 255, cv2.THRESH_BINARY_INV)
img_blur = cv2.GaussianBlur(thresh, (3, 3), 1)
img_canny = cv2.Canny(img_blur, 0, 0)
kernel = np.ones((5, 5))
img_dilate = cv2.dilate(img_canny, kernel, iterations=1)
return cv2.erode(img_dilate, kernel, iterations=1)
def get_contours(img):
contours, hierarchies = cv2.findContours(process(img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
cv2.drawContours(img, [cnt], -1, (0, 255, 0), 30)
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 30)
img = cv2.imread("image.jpeg")
get_contours(img)
cv2.imshow("Result", img)
cv2.waitKey(0)
Input images:
Output images:
The green outlines are the contours of the objects, and the red outlines are the bounding boxes of the objects.

Related

OCR. Character segmentation stage

I am trying to implement one of the stages of the OCR system. The character segmentation stage. The code is shown below. The code is quite simple:
the image is being read
grayscale image translation
image binarization
application of dilation operation
selection of contours
It is assumed that each selected contour is a symbol.
The results of the algorithm are not satisfactory. Sometimes-the characters stand out well. Sometimes only parts of characters are highlighted, sometimes several characters are highlighted. Please help with the code, I really want it to correctly highlight the characters.
UPDATE 1. I am trying to implement a character segmentation system for different fonts. It turned out that there are no universal parameters of erosion and dilation operations for different fonts
Test image:
Result of character selection 1 (Small parts of characters):
Result of character selection 2 (Big parts of characters):
Full result (All parts of characters):
import cv2
import numpy as np
def letters_extract(image_file):
img = cv2.imread(image_file)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
img_dilate = cv2.dilate(thresh, np.ones((1, 1), np.uint8), iterations=1)
# img_erode = cv2.erode(img_dilate, np.ones((3, 3), np.uint8), iterations=1)
# Get contours
contours, hierarchy = cv2.findContours(img_dilate, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
letters = []
for idx, contour in enumerate(contours):
(x, y, w, h) = cv2.boundingRect(contour)
if hierarchy[0][idx][3] == 0:
letter_crop = gray[y:y + h, x:x + w]
letters.append(letter_crop)
cv2.imwrite(r'D:\projects\proj\test\tnr\{}.png'.format(idx), letter_crop)
return letters
letters_extract(r'D:\projects\proj\test\test_tnr.png')
Run your code (a bit modified for debugging) and it looks pretty good (I've only changed the dilation mask):
import cv2
import numpy as np
import matplotlib.pyplot as plt
def letters_extract(image_file):
img = cv2.imread(image_file)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
plt.figure(figsize=(20, 20))
plt.imshow(thresh)
plt.show()
img_dilate = cv2.erode(thresh, np.ones((2,1), np.uint8))
plt.figure(figsize=(20, 20))
plt.imshow(img_dilate)
plt.show()
contours, hierarchy = cv2.findContours(img_dilate, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
im_with_aabb = img.copy()
for idx, contour in enumerate(contours):
(x, y, w, h) = cv2.boundingRect(contour)
if hierarchy[0][idx][3] == 0:
color = (255, 0, 0)
thickness = 1
im_with_aabb = cv2.rectangle(im_with_aabb, (x,y), (x+w,y+h), color, thickness)
return im_with_aabb
im_with_aabb = letters_extract('test.png')
plt.figure(figsize=(20, 20))
plt.imshow(im_with_aabb)
plt.show()
But there are problems with several chars still. If your input images looks this good (no high variability between the same char in different places) I can suggest perhaps tamplate matching with each char as template.
If the data is with high variability maybe you should use a pretrained NN like tesseract.
If your data is always as clear as the image you have shared, you do not have to do dilation or erosion. I set threshold to 190 and inverse the gray image with cv2.THRESH_BINARY_INV parameter such that countours will be find around the letters. Finally, I change contour search algorithm to find only external contours with cv2.RETR_EXTERNAL parameter.
import cv2
import numpy as np
def letters_extract(image_file):
img = cv2.imread(image_file)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, 190, 255, cv2.THRESH_BINARY_INV)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
letters = []
for idx, contour in enumerate(contours):
(x, y, w, h) = cv2.boundingRect(contour)
letter_crop = gray[y:y + h, x:x + w]
letters.append(letter_crop)
cv2.rectangle(img, (x,y), (x + w, y + h), (0,0,255))
cv2.namedWindow("win", cv2.WINDOW_FREERATIO)
cv2.imshow("win",img)
cv2.waitKey()
return letters
letters_extract('text.png')
Final image is as follows:

How to detect paragraphs in a text document image for a non-consistent text structure in Python OpenCV

I am trying to identify paragraphs of text in a .pdf document by first converting it into an image then using OpenCV. But I am getting bounding boxes on lines of text instead of paragraphs. How can I set some threshold or some other limit to get paragraphs instead of lines?
Here is the sample input image:
Here is the output I am getting for the above sample:
I am trying to get a single bounding box on the paragraph in the middle. I am using this code.
import cv2
import numpy as np
large = cv2.imread('sample image.png')
rgb = cv2.pyrDown(large)
small = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
kernel = np.ones((5, 5), np.uint8)
grad = cv2.morphologyEx(small, cv2.MORPH_GRADIENT, kernel)
_, bw = cv2.threshold(grad, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 1))
connected = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
# using RETR_EXTERNAL instead of RETR_CCOMP
contours, hierarchy = cv2.findContours(connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#For opencv 3+ comment the previous line and uncomment the following line
#_, contours, hierarchy = cv2.findContours(connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
mask = np.zeros(bw.shape, dtype=np.uint8)
for idx in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[idx])
mask[y:y+h, x:x+w] = 0
cv2.drawContours(mask, contours, idx, (255, 255, 255), -1)
r = float(cv2.countNonZero(mask[y:y+h, x:x+w])) / (w * h)
if r > 0.45 and w > 8 and h > 8:
cv2.rectangle(rgb, (x, y), (x+w-1, y+h-1), (0, 255, 0), 2)
cv2.imshow('rects', rgb)
cv2.waitKey(0)
This is a classic situation for dilate. Whenever you want to connect multiple items together, you can dilate them to join adjacent contours into a single contour. Here's a simple approach:
Obtain binary image. Load the image, convert to grayscale, Gaussian blur, then Otsu's threshold to obtain a binary image.
Connect adjacent words together. We create a rectangular kernel and dilate to merge individual contours together.
Detect paragraphs. From here we find contours, obtain the rectangular bounding rectangle coordinates and highlight the rectangular contours.
Otsu's threshold to obtain a binary image
Here's where the magic happens. We can assume that a paragraph is a section of words that are close together, to achieve this we dilate to connect adjacent words
Result
import cv2
import numpy as np
# Load image, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Create rectangular structuring element and dilate
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilate = cv2.dilate(thresh, kernel, iterations=4)
# Find contours and draw rectangle
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)
cv2.imshow('thresh', thresh)
cv2.imshow('dilate', dilate)
cv2.imshow('image', image)
cv2.waitKey()

How to extract multiple objects from an image using Python OpenCV?

I am trying to extract object from an image using the color using OpenCV, I have tried by inverse thresholding and grayscale combined with cv2.findContours() but I am unable to use it recursively. Furthermore I can't figure out how to "cut out" the match from the original image and save it to a single file.
EDIT
~
import cv2
import numpy as np
# load the images
empty = cv2.imread("empty.jpg")
full = cv2.imread("test.jpg")
# save color copy for visualization
full_c = full.copy()
# convert to grayscale
empty_g = cv2.cvtColor(empty, cv2.COLOR_BGR2GRAY)
full_g = cv2.cvtColor(full, cv2.COLOR_BGR2GRAY)
empty_g = cv2.GaussianBlur(empty_g, (51, 51), 0)
full_g = cv2.GaussianBlur(full_g, (51, 51), 0)
diff = full_g - empty_g
# thresholding
diff_th =
cv2.adaptiveThreshold(full_g,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY,11,2)
# combine the difference image and the inverse threshold
zone = cv2.bitwise_and(diff, diff_th, None)
# threshold to get the mask instead of gray pixels
_, zone = cv2.threshold(bag, 100, 255, 0)
# dilate to account for the blurring in the beginning
kernel = np.ones((15, 15), np.uint8)
bag = cv2.dilate(bag, kernel, iterations=1)
# find contours, sort and draw the biggest one
contours, _ = cv2.findContours(bag, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:3]
i = 0
while i < len(contours):
x, y, width, height = cv2.boundingRect(contours[i])
roi = full_c[y:y+height, x:x+width]
cv2.imwrite("piece"+str(i)+".png", roi)
i += 1
Where empty is just a white image size 1500 * 1000 as the one above and test is the one above.
This is what I came up with, only downside, I have a third image instead of only the 2 expected showing a shadow zone now...
Here's a simple approach:
Obtain binary image. Load the image, grayscale, Gaussian blur, Otsu's threshold, then dilate to obtain a binary black/white image.
Extract ROI. Find contours, obtain bounding boxes, extract ROI using Numpy slicing, and save each ROI
Binary image (Otsu's thresholding + dilation)
Detected ROIs highlighted in green
To extract each ROI, you can find the bounding box coordinates using cv2.boundingRect(), crop the desired region, then save the image
x,y,w,h = cv2.boundingRect(c)
ROI = original[y:y+h, x:x+w]
First object
Second object
import cv2
# Load image, grayscale, Gaussian blur, Otsu's threshold, dilate
image = cv2.imread('1.jpg')
original = image.copy()
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7,7))
dilate = cv2.dilate(thresh, kernel, iterations=1)
# Find contours, obtain bounding box coordinates, and extract ROI
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
image_number = 0
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)
ROI = original[y:y+h, x:x+w]
cv2.imwrite("ROI_{}.png".format(image_number), ROI)
image_number += 1
cv2.imshow('image', image)
cv2.imshow('thresh', thresh)
cv2.imshow('dilate', dilate)
cv2.waitKey()

Scale contours up/grow outward

I have a small script (GitHub) (based on this answer) to detect objects on a white background. The script is working fine and detects the objects. For example, this image:
becomes this:
and I crop the boundingRect (red one).
I'll be doing further operations on this image. For example instead of a rectangle crop, I will be cropping just the contour. (Anyway, these are further problems to be faced.)
What I want to do, now, is scale up/grow the contour (green one). I'm not sure if scale and grow means the same thing in this context, because when I think of scale, there's usually a single point of origin/anchor point. With grow, it's relative to the edges. I want to have something like this (created in Photoshop):
So after I detect the object/find contours, I want to grow it by some value/ratio, so that I have some space/pixels to modify which won't affect the object. How can I do that?
Mentioned script:
# drop an image on this script file
img_path = Path(sys.argv[1])
# open image with Pillow and convert it to RGB if the image is CMYK
img = Image.open(str(img_path))
if img.mode == "CMYK":
img = ImageCms.profileToProfile(img, "Color Profiles\\USWebCoatedSWOP.icc", "Color Profiles\\sRGB_Color_Space_Profile.icm", outputMode="RGB")
img = cv2.cvtColor(numpy.array(img), cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshed = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY_INV)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11,11))
morphed = cv2.morphologyEx(threshed, cv2.MORPH_CLOSE, kernel)
contours = cv2.findContours(morphed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
contour = sorted(contours, key=cv2.contourArea)[-1]
x, y, w, h = cv2.boundingRect(contour)
final = cv2.drawContours(img, contours, -1, (0,255,0), 2)
cv2.rectangle(final, (x,y), (x+w,y+h), (0,0,255), 2)
cv2.imshow("final", final)
cv2.waitKey(0)
cv2.destroyAllWindows()
Images posted here are scaled down to keep the question short. Original images and the script(s) can be found on the mentioned (first paragraph) GitHub page.
Thanks to HansHirse's suggestion (using morphological dilation), I've managed to make it work.
img_path = Path(sys.argv[1])
def cmyk_to_rgb(cmyk_img):
img = Image.open(cmyk_img)
if img.mode == "CMYK":
img = ImageCms.profileToProfile(img, "Color Profiles\\USWebCoatedSWOP.icc", "Color Profiles\\sRGB_Color_Space_Profile.icm", outputMode="RGB")
return cv2.cvtColor(numpy.array(img), cv2.COLOR_RGB2BGR)
def cv_threshold(img, thresh=128, maxval=255, type=cv2.THRESH_BINARY):
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshed = cv2.threshold(img, thresh, maxval, type)[1]
return threshed
def find_contours(img, to_gray=None):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11,11))
morphed = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
contours = cv2.findContours(morphed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return contours[-2]
def mask_from_contours(ref_img, contours):
mask = numpy.zeros(ref_img.shape, numpy.uint8)
mask = cv2.drawContours(mask, contours, -1, (255,255,255), -1)
return cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
def dilate_mask(mask, kernel_size=10):
kernel = numpy.ones((kernel_size,kernel_size), numpy.uint8)
dilated = cv2.dilate(mask, kernel, iterations=1)
return dilated
def draw_contours(src_img, contours):
canvas = cv2.drawContours(src_img.copy(), contours, -1, (0,255,0), 2)
x, y, w, h = cv2.boundingRect(contours[-1])
cv2.rectangle(canvas, (x,y), (x+w,y+h), (0,0,255), 2)
return canvas
orig_img = cmyk_to_rgb(str(img_path))
orig_threshed = cv_threshold(orig_img, 240, type=cv2.THRESH_BINARY_INV)
orig_contours = find_contours(orig_threshed)
orig_mask = mask_from_contours(orig_img, orig_contours)
orig_output = draw_contours(orig_img, orig_contours)
dilated_mask = dilate_mask(orig_mask, 50)
dilated_contours = find_contours(dilated_mask)
dilated_output = draw_contours(orig_img, dilated_contours)
cv2.imshow("orig_output", orig_output)
cv2.imshow("dilated_output", dilated_output)
cv2.waitKey(0)
cv2.destroyAllWindows()
I believe the code is self-explonatory enough. An example output:
Full script (again) can be found at show_dilated_contours.py
Update
As a bonus, later I wanted to smooth the contours. I've came across this blog post in which the author talks about how to smooth the edges of a shape (in Photoshop). The idea is really simple and can also be applied in OpenCV to smooth the contours. The steps are:
Create a mask from contours (or from the shape)
Blur the mask
Threshold the blurred mask (now, we have a smoother mask than the mask in step 1)
Find the contours again on the blurred + thresholded image. Since the mask/shape is smoother, we'll get smoother contours.
Example code and output:
# ... continuing previos code
# pass 1
smooth_mask_blurred = cv2.GaussianBlur(dilated_mask, (21,21), 0)
smooth_mask_threshed1 = cv_threshold(smooth_mask_blurred)
# pass 2
smooth_mask_blurred = cv2.GaussianBlur(smooth_mask_threshed1, (21,21), 0)
smooth_mask_threshed2 = cv_threshold(smooth_mask_blurred)
# find contours from smoothened mask
smooth_mask_contours = find_contours(smooth_mask_threshed2)
# draw the contours on the original image
smooth_mask_output = draw_contours(orig_img, smooth_mask_contours)
cv2.imshow("dilated_output", dilated_output)
cv2.imshow("smooth_mask_output", smooth_mask_output)
Full code at show_smooth_contours.py.

Detecting vertical lines using Hough transforms in opencv

I'm trying to remove the square boxes(vertical and horizontal lines) using Hough transform in opencv (Python). The problem is none of the vertical lines are being detected. I've tried looking through contours and hierarchy but there are too many contours in this image and I'm confused how to use them.
After looking through related posts, I've played with the threshold and rho parameters but that didn't help.
I've attached the code for more details. Why does Hough transform not find the vertical lines in the image?. Any suggestions in solving this task are welcome. Thanks.
Input Image :
Hough transformed Image:
Drawing contours:
import cv2
import numpy as np
import pdb
img = cv2.imread('/home/user/Downloads/cropped/robust_blaze_cpp-300-0000046A-02-HW.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 140, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0,0,255), 2)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
minLineLength = 5
maxLineGap = 100
lines = cv2.HoughLinesP(edges,rho=1,theta=np.pi/180,threshold=100,minLineLength=minLineLength,maxLineGap=maxLineGap)
for x1,y1,x2,y2 in lines[0]:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imwrite('probHough.jpg',img)
To be honest, rather than looking for the lines, I'd instead look for the white boxes.
Preparation
import cv2
import numpy as np
Load the image
img = cv2.imread("digitbox.jpg", 0)
Binarize it, so that both the boxes and the digits are black, rest is white
_, thresh = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)
cv2.imwrite('digitbox_step1.png', thresh)
Find contours. In this example image, it's fine to just look for external contours.
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
Process the contours, filtering out any with too small an area. Find convex hull of each contour, create a mask of all areas outside the contour. Store the bounding boxes of each found contour, sorted by x coordinate.
mask = np.ones_like(img) * 255
boxes = []
for contour in contours:
if cv2.contourArea(contour) > 100:
hull = cv2.convexHull(contour)
cv2.drawContours(mask, [hull], -1, 0, -1)
x,y,w,h = cv2.boundingRect(contour)
boxes.append((x,y,w,h))
boxes = sorted(boxes, key=lambda box: box[0])
cv2.imwrite('digitbox_step2.png', mask)
Dilate the mask (to shrink the black parts), to clip off any remains the the gray frames.
mask = cv2.dilate(mask, np.ones((5,5),np.uint8))
cv2.imwrite('digitbox_step3.png', mask)
Fill all the masked pixels with white, to erase the frames.
img[mask != 0] = 255
cv2.imwrite('digitbox_step4.png', img)
Process the digits as you desire -- i'll just draw the bounding boxes.
result = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for n,box in enumerate(boxes):
x,y,w,h = box
cv2.rectangle(result,(x,y),(x+w,y+h),(255,0,0),2)
cv2.putText(result, str(n),(x+5,y+17), cv2.FONT_HERSHEY_SIMPLEX, 0.6,(255,0,0),2,cv2.LINE_AA)
cv2.imwrite('digitbox_step5.png', result)
The whole script in one piece:
import cv2
import numpy as np
img = cv2.imread("digitbox.jpg", 0)
_, thresh = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
mask = np.ones_like(img) * 255
boxes = []
for contour in contours:
if cv2.contourArea(contour) > 100:
hull = cv2.convexHull(contour)
cv2.drawContours(mask, [hull], -1, 0, -1)
x,y,w,h = cv2.boundingRect(contour)
boxes.append((x,y,w,h))
boxes = sorted(boxes, key=lambda box: box[0])
mask = cv2.dilate(mask, np.ones((5,5),np.uint8))
img[mask != 0] = 255
result = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for n,box in enumerate(boxes):
x,y,w,h = box
cv2.rectangle(result,(x,y),(x+w,y+h),(255,0,0),2)
cv2.putText(result, str(n),(x+5,y+17), cv2.FONT_HERSHEY_SIMPLEX, 0.6,(255,0,0),2,cv2.LINE_AA)
cv2.imwrite('digitbox_result.png', result)

Categories

Resources