I'm using the code below for segmenting the articles from an image of newspaper.
def segmenter(image_received):
# Process 1: Lines Detection
img = image_received
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to binary gray image
edges = cv2.Canny(gray, 75, 150) # determine contours
lines = cv2.HoughLinesP(edges, 0.017, np.pi / 180, 60, minLineLength=100, maxLineGap=0.1) # houghlines generation
# drawing houghlines
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 128), 12) # the houghlines of color (0,0,128) is drawn
# Drawing brown border
bold = cv2.copyMakeBorder(
img, # image source
5, # top width
5, # bottomm width
5, # left width
5, # right width
cv2.BORDER_CONSTANT,
value=(0, 0, 128) # brown color value
)
image = bold
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if int(len(c) >= 10):
cv2.drawContours(image, [c], 0, (0, 17, 255), 1)
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if int(len(c) >= 10):
cv2.drawContours(image, [c], 0, (0, 17, 255), 1)
cv2.imwrite(f'tmp/{str(str(uuid.uuid4()))}.jpg', image)
for instance
the input image is
and the output image is :
There are three problems:
the output rectangles aren't complete in all cases.
Images also are segmented inside articles as part of articles. But what I need is to segment only the text of the newspaper and crop all the other things out. Something like this one:
Consider the following image:
The article indicated by borders is not rectangular and is much more complicated. How can I achieve the correct borders using python open-cv or other image processing libraries?
(the question has an answer here for matlab. But I need a python code.
here my pipeline.
I think can be optimized.
Initialization
%matplotlib inline
import numpy as np
import cv2
from matplotlib import pyplot as plt
Load image
image_file_name = 'paper.jpg'
image = cv2.imread(image_file_name)
# gray convertion
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
The first important thing is to remove the lines. So I search the lines.
grad_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
grad_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
# threshold
thresh_x = cv2.threshold(abs_grad_x, 0, 255, cv2.THRESH_OTSU)[1]
thresh_y = cv2.threshold(abs_grad_y, 0, 255, cv2.THRESH_OTSU)[1]
# bluring
kernel_size = 3
blur_thresh_x = cv2.GaussianBlur(thresh_x,(kernel_size, kernel_size),0)
blur_thresh_y = cv2.GaussianBlur(thresh_y,(kernel_size, kernel_size),0)
# Run Hough on edge detected image
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 200 # minimum number of pixels making up a line
max_line_gap = 1 # maximum gap in pixels between connectable line segments
line_image = np.copy(gray) * 0 # creating a blank to draw lines on
# Vertical lines
vertical_lines = cv2.HoughLinesP(blur_thresh_x, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
if vertical_lines is not None:
for line in vertical_lines:
for x1,y1,x2,y2 in line:
# here it's possible to add a selection of only vertical lines
if np.abs(y1-y2)> 0.1 * np.abs(x1-x2):
cv2.line(line_image,(x1,y1),(x2,y2),255,5)
# Horizontal lines
horizontal_lines = cv2.HoughLinesP(blur_thresh_y, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
if horizontal_lines is not None:
for line in horizontal_lines:
for x1,y1,x2,y2 in line:
# here it's possible to add a selection of only horizontal lines
if np.abs(x1-x2)> 0.1 * np.abs(y1-y2):
cv2.line(line_image,(x1,y1),(x2,y2),255,5)
After I remove the lines from the threshold
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# remove lines
clean_thresh = cv2.subtract(thresh, line_image)
Then I search the phrases
# search the phrases
dilatation_type = cv2.MORPH_RECT
horizontal_dilatation = 20 #This is the gap. 20 for the first image, 10 for the second image
vertical_dilatation = 1
element = cv2.getStructuringElement(dilatation_type, (2*horizontal_dilatation + 1, 2*vertical_dilatation+1), (horizontal_dilatation, vertical_dilatation))
dilatation_thresh = cv2.dilate(clean_thresh, element)
# Fill
filled_tresh = dilatation_thresh.copy()
contours, hierarchy = cv2.findContours(dilatation_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
cv2.drawContours(filled_tresh, [cnt], -1, 255, cv2.FILLED)
Now I detect the bounding boxes
# Draw bounding boxes
bounding_box1 = filled_tresh.copy()
contours, hierarchy = cv2.findContours(bounding_box1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(bounding_box1,(x,y),(x+w,y+h),255,cv2.FILLED)
# REPEAT Draw bounding boxes and Find the mean text width
mean_bb_width = 0 # mean bounding box width
bounding_box2 = bounding_box1.copy()
contours, hierarchy = cv2.findContours(bounding_box2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
num_cnt=0
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(bounding_box2,(x,y),(x+w,y+h),255,cv2.FILLED)
mean_bb_width = mean_bb_width+w
num_cnt=num_cnt+1
mean_bb_width=mean_bb_width/num_cnt
Now I separate the titles from the text
# define title what has width bigger than 1.5* mean_width
min_title_width = 1.5 * mean_bb_width
raw_title = np.copy(gray) * 0
raw_text = np.copy(gray) * 0
# separate titles from phrases
contours, hierarchy = cv2.findContours(bounding_box2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
if w >=min_title_width :
cv2.drawContours(raw_title, [cnt], -1, 255, cv2.FILLED)
else :
cv2.drawContours(raw_text, [cnt], -1, 255, cv2.FILLED)
and then the final processing
image_out = image.copy()
# Closing parameters
horizontal_closing = 1
vertical_closing = 20
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(horizontal_closing,vertical_closing))
# Processing titles
# Closing
closing_title = cv2.morphologyEx(raw_title, cv2.MORPH_CLOSE, kernel)
# Find contours
contours, hierarchy = cv2.findContours(closing_title, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Draw bounding boxes
bounding_title = closing_title.copy()
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image_out,(x,y),(x+w,y+h),(255,0,0),2)
# Processing text
# Closing
closing_text = cv2.morphologyEx(raw_text, cv2.MORPH_CLOSE, kernel)
# Find contours
contours, hierarchy = cv2.findContours(closing_text , cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Draw bounding boxes
bounding_text = closing_text.copy()
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image_out,(x,y),(x+w,y+h),(0,255,0),2)
The result is
Changing the parameter horizontal_dilatation from 20 to 10, I obtain for the second image (where I remove the red border that you added) the following result
Related
I have an image that contains a table, the table can be in many sizes and the image too, and the table can be fully gridded (with only some blank spot that needs to be filled), it can be with only vertical grid lines and can be only with horizontal grid lines.
I've searched the web for a long time and found no solution that worked for me.
I found the following questions that seem to be suitable for me:
Python & OpenCV: How to add lines to the gridless table
Draw a line on a gridless image Python Opencv
How to repair incomplete grid cells and fix missing sections in image
Python & OpenCV: How to crop half-formed bounding boxes
My code is taken from the answers to the above questions and the "best" result I got from the above question codes is that it drew 2 lines one at the rightmost part and one on the leftmost part.
I'm kind of new to OpenCV and the image processing field so I am not sure how can I fix the above questions codes to suit my needs or how to accomplish my needs exactly, I would appreciate any help you can provide.
Example of an image table:
Update:
To remove the horizontal lines I use exactly the code you can find in here, but the result I get on the example image is this:
as you can see it removed most of them but not all of them, and then when I try to apply the same for the vertical ones (I tried the same code with rotation, or flipping the kernel) it does not work at all...
I also tried this code but it didn't work at all also.
Update 2:
I was able to remove the lines using this code:
def removeLines(result, axis) -> np.ndarray:
img = result.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
if axis == "horizontal":
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 25))
elif axis == "vertical":
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 1))
else:
raise ValueError("Axis must be either 'horizontal' or 'vertical'")
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
result = img.copy()
for c in cnts:
cv2.drawContours(result, [c], -1, (255, 255, 255), 2)
return result
gridless = removeLines(removeLines(cv2.imread(image_path), 'horizontal'), 'vertical')
Result:
Problem:
After I remove lines, when I try to draw the vertical lines using this code:
# read image
img = old_image.copy() # cv2.imread(image_path1)
hh, ww = img.shape[:2]
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# average gray image to one column
column = cv2.resize(gray, (ww,1), interpolation = cv2.INTER_AREA)
# threshold on white
thresh = cv2.threshold(column, 248, 255, cv2.THRESH_BINARY)[1]
# get contours
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
# Draw vertical
for cntr in contours_v:
x,y,w,h = cv2.boundingRect(cntr)
xcenter = x+w//2
cv2.line(original_image, (xcenter,0), (xcenter,hh-1), (0, 0, 0), 1)
I get this result:
Update 3:
when I try even thresh = cv2.threshold(column, 254, 255, cv2.THRESH_BINARY)[1] (I tried lowering it 1 by 1 until 245, for both the max value and the threshold value, each time I get a different or similar result but always too much lines or too less lines) I get the following:
Input:
Output:
It's putting too many lines instead of just 1 line in each column
Code:
# read image
img = old_image.copy() # cv2.imread(image_path1)
hh, ww = img.shape[:2]
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# average gray image to one column
column = cv2.resize(gray, (ww, 1), interpolation = cv2.INTER_AREA)
# threshold on white
thresh = cv2.threshold(column, 254, 255, cv2.THRESH_BINARY)[1]
# get contours
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
for cntr in contours:
x, y, w, h = cv2.boundingRect(cntr)
xcenter = x + w // 2
cv2.line(original_image, (xcenter,0), (xcenter, hh_-1), (0, 0, 0), 1)
Here is one way to get the lines in Python/OpenCV. Average the image down to 1 column. Then threshold and get the contours. Then get the bounding boxes and find the vertical centers. Draw lines at those places.
If you do not want the extra lines, crop your image first to get the inside of the table.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread("table4.png")
hh, ww = img.shape[:2]
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# average gray image to one column
column = cv2.resize(gray, (1,hh), interpolation = cv2.INTER_AREA)
# threshold on white
thresh = cv2.threshold(column, 248, 255, cv2.THRESH_BINARY)[1]
# get contours
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
# loop over contours and get bounding boxes and ycenter and draw horizontal line at ycenter
result = img.copy()
for cntr in contours:
x,y,w,h = cv2.boundingRect(cntr)
ycenter = y+h//2
cv2.line(result, (0,ycenter), (ww-1,ycenter), (0, 0, 255), 1)
# write results
cv2.imwrite("table4_lines3.png", result)
# display results
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Result:
You wrote that you tried to remove the lines using the code, but it did not work.
It works fine for me in Python/OpenCV.
Read the input
Convert to grayscale
Threshold to show the horizontal lines
Apply morphology open with a horizontal kernel to isolate the horizontal lines
Get their contours
Draw the contours on a copy of the input as white to cover over the black horizontal lines
Save the results
Input:
import cv2
import numpy as np
# read the input
img = cv2.imread('table4.png')
# convert to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# do morphology to detect lines
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25,1))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
# get contours
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
# draw contours as white on copy of input
result = img.copy()
for c in cnts:
cv2.drawContours(result, [c], -1, (255,255,255), 2)
# save results
cv2.imwrite('table4_horizontal_lines_threshold.png', thresh)
cv2.imwrite('table4_horizontal_lines_detected.png', detected_lines)
cv2.imwrite('table4_horizontal_lines_removed.png', result)
# show results
cv2.imshow('thresh', thresh)
cv2.imshow('morphology', detected_lines)
cv2.imshow('result', result)
cv2.waitKey(0)
Threshold Image:
Morphology Detected Lines Image:
Result:
Input:
The output should be:
How can I do this using OpenCV or any other method?
I tried this
img = cv2.imread('test2.JPG')
imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 150, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
print("Number of contours = " + str(len(contours)))
print(contours[0])
# cv2.drawContours(img, contours, -1, (0, 255, 0), 1)
# cv2.drawContours(imgray, contours, -1, (0, 255, 0), 3)
for cnt in contours:
area = cv2.contourArea(cnt)
if area>20:
peri = cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
x,y,w,h = cv2.boundingRect(approx)
cv2.rectangle(img,(x,y-3),(x+w,y+h-3),(255,0,0),1)
You have found the contours and drawn only those above certain area. So far so good.
To capture each line as an individual entity, you need to find a way to connect the text in each line. Since the lines given in the image a straight, a simple approach would be to use a horizontal kernel ([1, 1, 1, 1, 1]) of certain length and perform morphology.
Code:
img = cv2.imread('text.jpg',1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
th = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
Using horizontal kernel 8 pixels in length. This is the parameter you would need to change when trying out for other images of different font size and text length.
hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 1))
# array([[1, 1, 1, 1, 1, 1, 1, 1]], dtype=uint8)
dilated = cv2.dilate(th, hor_kernel, iterations=1)
Looking at the image above, hope you have an idea of what dilation using a horizontal kernel does. From here on, we find outermost contours above certain area.
contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
img2 = img.copy()
for i, c in enumerate(contours):
area = cv2.contourArea(c)
if area > 100:
x,y,w,h = cv2.boundingRect(c)
img2 = cv2.rectangle(img2, (x, y), (x + w, y + h), (0,255,0), 1)
I am trying to extract handwritten numbers and alphabet from an image, for that i followed this stackoverflow link. It is working fine for most of the images where letter is written using marker but when i am using image where data is written using Pen it is failing miserably. Need some help to fix this.
Below is my code:
import cv2
import imutils
from imutils import contours
# Load image, grayscale, Otsu's threshold
image = cv2.imread('xxx/pic_crop_7.png')
image = imutils.resize(image, width=350)
img=image.copy()
# Remove border
kernel_vertical = cv2.getStructuringElement(cv2.MORPH_RECT, (1,50))
temp1 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel_vertical)
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50,1))
temp2 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, horizontal_kernel)
temp3 = cv2.add(temp1, temp2)
result = cv2.add(temp3, image)
# Convert to grayscale and Otsu's threshold
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(5,5),0)
_,thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
# thresh=cv2.dilate(thresh,None,iterations=1)
# Find contours and filter using contour area
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[0]
MIN_AREA=45
digit_contours = []
for c in cnts:
if cv2.contourArea(c)>MIN_AREA:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(img, (x, y), (x + w, y + h), (36,255,12), 2)
digit_contours.append(c)
# cv2.imwrite("C:/Samples/Dataset/ocr/segmented" + str(i) + ".png", image[y:y+h,x:x+w])
sorted_digit_contours = contours.sort_contours(digit_contours, method='left-to-right')[0]
contour_number = 0
for c in sorted_digit_contours:
x,y,w,h = cv2.boundingRect(c)
ROI = image[y:y+h, x:x+w]
cv2.imwrite('xxx/segment_{}.png'.format(contour_number), ROI)
contour_number += 1
cv2.imshow('thresh', thresh)
cv2.imshow('img', img)
cv2.waitKey()
It is correctly able to extract the numbers when written using marker.
Below is an example:
Original Image
Correctly extracting charachters
Image where it fails to read.
Original Image
Incorrectly Extracting
In this case, you only need to adjust your parameter.
Because there is no vertical line in your handwritten characters' background, so I decided to delete them.
# Remove border
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50,1))
temp2 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, horizontal_kernel)
result = cv2.add(temp2, image)
And it works.
The solution that CodingPeter has given is perfectly fine, except that it may not be generic apropos the two test images you have posted. So, here's my take on it that might work on both of your test images, albeit with a little lesser accuracy.
import numpy as np
import cv2
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (20, 20)
plt.rcParams["image.cmap"] = 'gray'
img_rgb = cv2.imread('path/to/your/image.jpg')
img = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
th = cv2.adaptiveThreshold(img,255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,2)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15,1))
horiz = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel, iterations=3)
ctrs, _ = cv2.findContours(horiz,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for ctr in ctrs:
x,y,w,h = cv2.boundingRect(ctr)
if w < 20:
cv2.drawContours(horiz, [ctr], 0, 0, cv2.FILLED)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,10))
vert = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel, iterations=3)
ctrs, _ = cv2.findContours(vert,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for ctr in ctrs:
x,y,w,h = cv2.boundingRect(ctr)
if h < 25:
cv2.drawContours(vert, [ctr], 0, 0, cv2.FILLED)
th = th - (horiz | vert)
ctrs, _ = cv2.findContours(th,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
min_ctr_area = 400 # Min character bounding box area
for ctr in ctrs:
x, y, w, h = cv2.boundingRect(ctr)
# Filter contours based on size
if w * h > min_ctr_area and \
w < 100 and h < 100:
cv2.rectangle(img_rgb, (x, y), (x+w, y+h), (0, 255, 0), 1)
plt.imshow(img_rgb)
Of course some of the parameters here are hard-coded for filtering, which compare the contour height and width to ascertain whether it is a part of a line or maybe a character. With different images you may have to smartly change these values.
I want to get the bounding boxes from an image.
I want the coordinates of the two white boxes.
This is an example image:
I tried out
_a, _b, stats, _c = cv2.connectedComponentsWithStats(image, connectivity=8)
and then the boxes are in the stats object.
But I got for the image more then 2 boxes. This is strange.
Maybe somebody has an other solution?
import cv2
# Read image
img = cv2.imread("/Users/sb/Desktop/7n8uq.png", cv2.IMREAD_COLOR)
# Convert to grayscale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Threshold (Produces a binary image)
_, thresh = cv2.threshold(
img_gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imwrite("thresh.png", thresh)
# Find contours
contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
print("Total number of contours: {}".format(len(contours)))
all_contours_drawn = cv2.drawContours(
img.copy(), contours, -1, (0, 255, 0), 2) # draw all contours
cv2.imwrite("all_contours.png", all_contours_drawn)
box_center_x = []
box_center_y = []
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
box_center_x.append(x+w/2)
box_center_y.append(y+h/2)
print("x-coordinate of boxes: {}".format(box_center_x))
print("y-coordinate of boxes: {}".format(box_center_y))
# Draw box centers
all_box_centers_drawn = img.copy()
for i in range(len(box_center_x)):
cv2.circle(
all_box_centers_drawn,
(int(box_center_x[i]), int(box_center_y[i])),
2, (0 , 0, 255), 2)
cv2.imwrite("box-centers.png", all_box_centers_drawn)
Try using find contours
contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i, contour in enumerate(contours[1::]):
bbox = cv2.boundingRect(contour)
find contours example
I'm trying to remove the square boxes(vertical and horizontal lines) using Hough transform in opencv (Python). The problem is none of the vertical lines are being detected. I've tried looking through contours and hierarchy but there are too many contours in this image and I'm confused how to use them.
After looking through related posts, I've played with the threshold and rho parameters but that didn't help.
I've attached the code for more details. Why does Hough transform not find the vertical lines in the image?. Any suggestions in solving this task are welcome. Thanks.
Input Image :
Hough transformed Image:
Drawing contours:
import cv2
import numpy as np
import pdb
img = cv2.imread('/home/user/Downloads/cropped/robust_blaze_cpp-300-0000046A-02-HW.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 140, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0,0,255), 2)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
minLineLength = 5
maxLineGap = 100
lines = cv2.HoughLinesP(edges,rho=1,theta=np.pi/180,threshold=100,minLineLength=minLineLength,maxLineGap=maxLineGap)
for x1,y1,x2,y2 in lines[0]:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imwrite('probHough.jpg',img)
To be honest, rather than looking for the lines, I'd instead look for the white boxes.
Preparation
import cv2
import numpy as np
Load the image
img = cv2.imread("digitbox.jpg", 0)
Binarize it, so that both the boxes and the digits are black, rest is white
_, thresh = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)
cv2.imwrite('digitbox_step1.png', thresh)
Find contours. In this example image, it's fine to just look for external contours.
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
Process the contours, filtering out any with too small an area. Find convex hull of each contour, create a mask of all areas outside the contour. Store the bounding boxes of each found contour, sorted by x coordinate.
mask = np.ones_like(img) * 255
boxes = []
for contour in contours:
if cv2.contourArea(contour) > 100:
hull = cv2.convexHull(contour)
cv2.drawContours(mask, [hull], -1, 0, -1)
x,y,w,h = cv2.boundingRect(contour)
boxes.append((x,y,w,h))
boxes = sorted(boxes, key=lambda box: box[0])
cv2.imwrite('digitbox_step2.png', mask)
Dilate the mask (to shrink the black parts), to clip off any remains the the gray frames.
mask = cv2.dilate(mask, np.ones((5,5),np.uint8))
cv2.imwrite('digitbox_step3.png', mask)
Fill all the masked pixels with white, to erase the frames.
img[mask != 0] = 255
cv2.imwrite('digitbox_step4.png', img)
Process the digits as you desire -- i'll just draw the bounding boxes.
result = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for n,box in enumerate(boxes):
x,y,w,h = box
cv2.rectangle(result,(x,y),(x+w,y+h),(255,0,0),2)
cv2.putText(result, str(n),(x+5,y+17), cv2.FONT_HERSHEY_SIMPLEX, 0.6,(255,0,0),2,cv2.LINE_AA)
cv2.imwrite('digitbox_step5.png', result)
The whole script in one piece:
import cv2
import numpy as np
img = cv2.imread("digitbox.jpg", 0)
_, thresh = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
mask = np.ones_like(img) * 255
boxes = []
for contour in contours:
if cv2.contourArea(contour) > 100:
hull = cv2.convexHull(contour)
cv2.drawContours(mask, [hull], -1, 0, -1)
x,y,w,h = cv2.boundingRect(contour)
boxes.append((x,y,w,h))
boxes = sorted(boxes, key=lambda box: box[0])
mask = cv2.dilate(mask, np.ones((5,5),np.uint8))
img[mask != 0] = 255
result = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for n,box in enumerate(boxes):
x,y,w,h = box
cv2.rectangle(result,(x,y),(x+w,y+h),(255,0,0),2)
cv2.putText(result, str(n),(x+5,y+17), cv2.FONT_HERSHEY_SIMPLEX, 0.6,(255,0,0),2,cv2.LINE_AA)
cv2.imwrite('digitbox_result.png', result)