I have this image of a table (seen below). And I'm trying to get the data from the table, similar to this form (first row of table image):
rows[0] = [x,x, , , , ,x, ,x,x, ,x, ,x, , , , ,x, , , ,x,x,x, ,x, ,x, , , , ]
I need the number of x's as well as the number of spaces.
There will also be other table images that are similar to this one (all having x's and the same number of columns).
So far, I am able to detect all of the x's using an image of an x. And I can somewhat detect the lines. I'm using open cv2 for python. I'm also using a houghTransform to detect the horizontal and vertical lines (that works really well).
I'm trying to figure out how I can go row by row and store the information in a list.
These are the training images:
used to detect x (train1.png in the code)
used to detect lines (train2.png in the code)
used to detect lines (train3.png in the code)
This is the code I have so far:
# process images
from pytesser import *
from PIL import Image
from matplotlib import pyplot as plt
import pytesseract
import numpy as np
import cv2
import math
import os
# the table images
images = ['table1.png', 'table2.png', 'table3.png', 'table4.png', 'table5.png']
# the template images used for training
templates = ['train1.png', 'train2.png', 'train3.png']
def hough_transform(im):
img = cv2.imread('imgs/'+im)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi/180, 200)
i = 1
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
#print '%s - 0:(%s,%s) 1:(%s,%s), 2:(%s,%s)' % (i,x0,y0,x1,y1,x2,y2)
cv2.line(img, (x1,y1), (x2,y2), (0,0,255), 2)
i += 1
fn = os.path.splitext(im)[0]+'-lines'
cv2.imwrite('imgs/'+fn+'.png', img)
def match_exes(im, te):
img_rgb = cv2.imread('imgs/'+im)
img_gry = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('imgs/'+te, 0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gry, template, cv2.TM_CCOEFF_NORMED)
threshold = 0.71
loc = np.where(res >= threshold)
pts = []
exes = []
blanks = []
for pt in zip(*loc[::-1]):
pts.append(pt)
cv2.rectangle(img_rgb, pt, (pt[0]+w, pt[1]+h), (0,0,255), 1)
fn = os.path.splitext(im)[0]+'-exes'
cv2.imwrite('imgs/'+fn+'.png', img_rgb)
return pts, exes, blanks
def match_horizontal_lines(im, te, te2):
img_rgb = cv2.imread('imgs/'+im)
img_gry = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('imgs/'+te, 0)
w1, h1 = template.shape[::-1]
template2 = cv2.imread('imgs/'+te2, 0)
w2, h2 = template2.shape[::-1]
# first line template (the downward facing line)
res1 = cv2.matchTemplate(img_gry, template, cv2.TM_CCOEFF_NORMED)
threshold1 = 0.8
loc1 = np.where(res1 >= threshold1)
# second line template (the upward facing line)
res2 = cv2.matchTemplate(img_gry, template2, cv2.TM_CCOEFF_NORMED)
threshold2 = 0.8
loc2 = np.where(res2 >= threshold2)
pts = []
exes = []
blanks = []
# find first line template (the downward facing line)
for pt in zip(*loc1[::-1]):
pts.append(pt)
cv2.rectangle(img_rgb, pt, (pt[0]+w1, pt[1]+h1), (0,0,255), 1)
# find second line template (the upward facing line)
for pt in zip(*loc2[::-1]):
pts.append(pt)
cv2.rectangle(img_rgb, pt, (pt[0]+w2, pt[0]+h2), (0,0,255), 1)
fn = os.path.splitext(im)[0]+'-horiz'
cv2.imwrite('imgs/'+fn+'.png', img_rgb)
return pts, exes, blanks
# process
text = ''
for img in images:
print 'processing %s' % img
hough_transform(img)
pts, exes, blanks = match_exes(img, templates[0])
pts1, exes1, blanks1 = match_horizontal_lines(img, templates[1], templates[2])
text += '%s: %s x\'s & %s horizontal lines\n' % (img, len(pts), len(pts1))
# statistics file
outputFile = open('counts.txt', 'w')
outputFile.write(text)
outputFile.close()
And, the output images look like this (as you can see, all x's are detected but not all lines)
x's
horizontal lines
hough transform
As I said, I'm actually just trying to get the data from the table, similar to this form (first row of table image):
row a = [x,x, , , , ,x, ,x,x, ,x, ,x, , , , ,x, , , ,x,x,x, ,x, ,x, , , , ]
I need the number of x's as well as the number of spaces.
There will also be other table images that are similar to this one (all having x's and the same number of columns and a different number of rows).
Also, I am using python 2.7
Ok, I have figured it out. I used the suggestion provided by #beaker of looking between the grid lines.
Before doing that I had to remove the duplicate lines from the hough transformation code. Then, I sorted those remaining lines into 2 lists, vertical and horizontal. From there, I could loop through the horizontal and then vertical and then create a region of interest (roi) image. Each roi image represents a 'cell' in the table master image. I checked each of those cells for contours and noticed that when there was an 'x' in the cell, len(contours) >= 2. So, any len(contours) < 2 was a blank space (I did several test programs to figure this out). Here is the code I used to get it working:
import cv2
import numpy as np
import os
# the list of images (tables)
images = ['table1.png', 'table2.png', 'table3.png', 'table4.png', 'table5.png']
# the list of templates (used for template matching)
templates = ['train1.png']
def remove_duplicates(lines):
# remove duplicate lines (lines within 10 pixels of eachother)
for x1, y1, x2, y2 in lines:
for index, (x3, y3, x4, y4) in enumerate(lines):
if y1 == y2 and y3 == y4:
diff = abs(y1-y3)
elif x1 == x2 and x3 == x4:
diff = abs(x1-x3)
else:
diff = 0
if diff < 10 and diff is not 0:
del lines[index]
return lines
def sort_line_list(lines):
# sort lines into horizontal and vertical
vertical = []
horizontal = []
for line in lines:
if line[0] == line[2]:
vertical.append(line)
elif line[1] == line[3]:
horizontal.append(line)
vertical.sort()
horizontal.sort(key=lambda x: x[1])
return horizontal, vertical
def hough_transform_p(image, template, tableCnt):
# open and process images
img = cv2.imread('imgs/'+image)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
# probabilistic hough transform
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 200, minLineLength=20, maxLineGap=999)[0].tolist()
# remove duplicates
lines = remove_duplicates(lines)
# draw image
for x1, y1, x2, y2 in lines:
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 1)
# sort lines into vertical & horizontal lists
horizontal, vertical = sort_line_list(lines)
# go through each horizontal line (aka row)
rows = []
for i, h in enumerate(horizontal):
if i < len(horizontal)-1:
row = []
for j, v in enumerate(vertical):
if i < len(horizontal)-1 and j < len(vertical)-1:
# every cell before last cell
# get width & height
width = horizontal[i+1][1] - h[1]
height = vertical[j+1][0] - v[0]
else:
# last cell, width = cell start to end of image
# get width & height
width = tW
height = tH
tW = width
tH = height
# get roi (region of interest) to find an x
roi = img[h[1]:h[1]+width, v[0]:v[0]+height]
# save image (for testing)
dir = 'imgs/table%s' % (tableCnt+1)
if not os.path.exists(dir):
os.makedirs(dir)
fn = '%s/roi_r%s-c%s.png' % (dir, i, j)
cv2.imwrite(fn, roi)
# if roi contains an x, add x to array, else add _
roi_gry = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(roi_gry, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 1:
# there is an x for 2 or more contours
row.append('x')
else:
# there is no x when len(contours) is <= 1
row.append('_')
row.pop()
rows.append(row)
# save image (for testing)
fn = os.path.splitext(image)[0] + '-hough_p.png'
cv2.imwrite('imgs/'+fn, img)
def process():
for i, img in enumerate(images):
# perform probabilistic hough transform on each image
hough_transform_p(img, templates[0], i)
if __name__ == '__main__':
process()
So, the sample image:
And, the output (code to generate text file was deleted for brevity):
As you can see, the text file contains the same number of x's in the same position as the image. Now that the hard part is over, I can continue on with my assignment!
Related
I'm trying to make it so my programme only detects an overhead wire on a train/tram but when the wire holders come into frame it detects the horizontal line of them which I don't want. I didn't know if anyone knew how to make it so it will only detect vertical lines. I tried using cv2.erode along with np.ones to only show vertical lines but I couldn't seem to get anywhere with that. Someone did mention that HoughLines can be made so there just vertical but I don't know if that's true or not. Here's my code:
import cv2
import numpy as np
import window_names
import track_bars
vid = 'blackpool_tram_result.mp4'
cap = cv2.VideoCapture(vid)
frame_counter = 0
while (True):
ret, frame = cap.read()
frame_counter += 1
if frame_counter == cap.get(cv2.CAP_PROP_FRAME_COUNT):
frame_counter = 0
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
blank = np.zeros(frame.shape[:2], dtype='uint8')
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
output = np.empty(grey.shape, dtype=np.uint8)
cv2.normalize(
grey,
output,
alpha=0,
beta=255,
norm_type=cv2.NORM_MINMAX)
hist = cv2.equalizeHist(output)
track_bars.lower_threshold = cv2.getTrackbarPos("lower", window_names.window_canny)
track_bars.upper_threshold = cv2.getTrackbarPos("upper", window_names.window_canny)
track_bars.smoothing_neighbourhood = cv2.getTrackbarPos("smoothing", window_names.window_canny)
track_bars.sobel_size = cv2.getTrackbarPos("sobel size", window_names.window_canny)
track_bars.smoothing_neighbourhood = max(3, track_bars.smoothing_neighbourhood)
if not (track_bars.smoothing_neighbourhood % 2):
track_bars.smoothing_neighbourhood = track_bars.smoothing_neighbourhood + 1
track_bars.sobel_size = max(3, track_bars.sobel_size)
if not (track_bars.sobel_size % 2):
track_bars.sobel_size = track_bars.sobel_size + 1
smoothed = cv2.GaussianBlur(
hist, (track_bars.smoothing_neighbourhood, track_bars.smoothing_neighbourhood), 0)
edges = cv2.Canny(
smoothed,
track_bars.lower_threshold,
track_bars.upper_threshold,
apertureSize=track_bars.sobel_size)
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
minLineLength = 50 # minimum number of pixels making up a line
maxLineGap = 20
line_image = np.copy(frame) * 0
mask = cv2.rectangle(blank, (edges.shape[1]//2 + 150, edges.shape[0]//2 - 150), (edges.shape[1]//2 - 150, edges.shape[0]//2 - 300), 255, -1)
masked = cv2.bitwise_and(edges,edges,mask=mask)
lines = cv2.HoughLinesP(masked, rho, theta, threshold, np.array([]), minLineLength, maxLineGap)
if lines is not None:
for x1, y1, x2, y2 in lines[0]:
cv2.line(frame,(x1,y1),(x2,y2),(255,0,0),5)
lines_edges = cv2.addWeighted(frame, 0.8, line_image, 1, 0)
cv2.imshow(window_names.window_hough, frame)
cv2.imshow(window_names.window_canny, edges)
cv2.imshow(window_names.window_mask, mask)
cv2.imshow(window_names.window_masked_image, masked)
key = cv2.waitKey(27)
if (key == ord('x')) & 0xFF:
break
cv2.destroyAllWindows()
HoughLines() gives you the ability to configure minimum and maximun line angles to detect. You can check here for details.
However, HoughLinesP doesn't have this option. What you can do is that filtering lines which HoughLinesP gives as output. According to the documentation:
Output vector of lines. Each line is represented by a 4-element vector
(x1,y1,x2,y2) , where (x1,y1) and (x2,y2) are the ending points of
each detected line segment.
So just get the starting(x1,y1) and ending(x2,y2) points and calculate the angles with a simple math.
By getting the results you can filter each line according to the desired angle value.
I have the next Image:
I want to find all the occurrences of an specific character, let's say "a", and then replacing it with another character, let's say "e"
This can be done in Python3 using OpenCV and Numpy, to do that I'm using the next two images in order to do template matching:
my code is the next:
import cv2 as cv
import numpy as np
img = cv.imread('source_image.jpg') # Importing Source Image
imgray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # Converting Source Image to Gray Scale
template_a = cv.imread('template_a.jpg') # Reading Template "a" Character
template_a_gray = cv.cvtColor(template_a, cv.COLOR_BGR2GRAY) # Converting Template "a" Character to Gray Scale
template_e = cv.imread('template_e.jpg') # Reading Template "e" Character
template_e_gray = cv.cvtColor(template_e, cv.COLOR_BGR2GRAY) # Converting Template "e" Character to Gray Scale
# Template Matching
w, h = template_a_gray.shape[::-1] # Obtaining Width and Height of Template "a" Character
res = cv.matchTemplate(imgray, template_a_gray, cv.TM_CCOEFF_NORMED) # Template Matching
threshold = 0.6
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
cv.rectangle(imgray, pt, (pt[0] + w, pt[1] + h), color=(255,255,255), thickness=cv.FILLED) # Removing Matched Template "a" Characters
y_offset = pt[1]
x_offset = pt[0]
x_end = x_offset + template_e_gray.shape[1]
y_end = y_offset + template_e_gray.shape[0]
imgray[y_offset:y_end,x_offset:x_end] = template_e_gray # Pasting Template "e" Characters
filename = 'savedImage.jpg'
cv.imwrite(filename, imgray) # Saving Image
cv.waitKey(0)
cv.destroyAllWindows()
When executed the result is:
The problem is that when zoom in, the pasted image isn't exactly the same as the template character for e
Blobs appear above and in the left of the original "e" template character. Why is this happening? How do I get rid of them?
Edit: in Response of Christoph Rackwitz's Comment:
Edit: Code fixed upon Christoph Rackwitz's Suggestions
This is because template_e_gray is pasted multiple times. Blobs are the edges of another e pasted just before. Print pt and see where e is pasted.
There are several ways to deal with this problem. The easiest way is to set the threshold to 0.8.
threshold = 0.8
If this is sufficient, use this. But may not be sufficient for some applications because the threshold needs to be tuned per image.
Another way is to fill white with one pixel larger.
imgray[y_offset - 1:y_end + 1, x_offset - 1:x_end + 1] = 255
imgray[y_offset:y_end, x_offset:x_end] = template_e_gray # Pasting Template "e" Characters
This is easy too, but inefficient, misaligned, and may not work with large images.
My recommandation is to use non maximum suppression (NMS). NMS is a technique commonly used in object detection that delete all overlapping rectangles except the one with the highest score.
Borrowing the implementation from here, this is the complete code.
import cv2 as cv
import numpy as np
# https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py
def nms(dets, thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
img = cv.imread('source_image.jpg') # Importing Source Image
imgray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # Converting Source Image to Gray Scale
template_a = cv.imread('temp/template_a.jpg') # Reading Template "a" Character
template_a_gray = cv.cvtColor(template_a, cv.COLOR_BGR2GRAY) # Converting Template "a" Character to Gray Scale
template_e = cv.imread('temp/template_e.jpg') # Reading Template "e" Character
template_e_gray = cv.cvtColor(template_e, cv.COLOR_BGR2GRAY) # Converting Template "e" Character to Gray Scale
# Template Matching
w, h = template_a_gray.shape[::-1] # Obtaining Width and Height of Template "a" Character
res = cv.matchTemplate(imgray, template_a_gray, cv.TM_CCOEFF_NORMED) # Template Matching
threshold = 0.6
loc = np.where(res >= threshold)
top, left = loc
score = res[loc]
iou_threshold = 0.5
matched_boxes = np.array([left, top, left + w, top + h, score]).T
valid_indices = nms(matched_boxes, iou_threshold)
boxes = matched_boxes[valid_indices]
boxes = boxes[..., :-1].astype(int)
for pt in boxes:
print(pt)
cv.rectangle(imgray, pt[:2], pt[2:], color=(255, 255, 255),
thickness=cv.FILLED) # Removing Matched Template "a" Characters
y_offset = pt[1]
x_offset = pt[0]
x_end = x_offset + template_e_gray.shape[1]
y_end = y_offset + template_e_gray.shape[0]
imgray[y_offset:y_end, x_offset:x_end] = template_e_gray # Pasting Template "e" Characters
filename = 'temp/savedImage.jpg'
cv.imwrite(filename, imgray) # Saving Image
cv.waitKey(0)
cv.destroyAllWindows()
This code works perfectly as expected, but you will see the following results. See the area circled in red.
This is a trace of a, not e.
This is caused by the difference in size between a in the image and a in the template, and is unrelated to the original issue. I mentioned this because I thought you would suspect that the same issue remains.
I have this simple opencv template matching function written in Python.
image:
template:
def find(object, sensitivity):
screen = "tool.png"
screen_read = cv2.imread(screen)
screen_gray = cv2.cvtColor(screen_read, cv2.COLOR_BGR2GRAY)
obj = cv2.imread(object, cv2.IMREAD_GRAYSCALE)
w, h = obj.shape[::-1]
location = np.where(cv2.matchTemplate(screen_gray, obj, cv2.TM_CCORR_NORMED) >= sensitivity)
positions = []
for xy in zip(*location[::-1]):
cv2.rectangle(screen_read, xy, (xy[0] + w, xy[1] + h), (0, 0, 255), 1)
x = random(xy[0], (xy[0] + w) - 2)
y = random(xy[1], (xy[1] + h) - 2)
print(x, y)
positions.append(str(x) + ", " + str(y))
#cv2.imshow("Test", screen_read)
#cv2.waitKey(0)
find("enemylogo.png", 0.90)
It will find all the templates correctly, as shown here:
However, my goal here is to pass the center coordinate to be used in loop, outside the function. For this, I need to store the x, y coordinates in an array (positions), as tuples.
However, I'm not getting desired results, it's adding too many tuples instead of only 2.
What I'm trying to do is:
for x in find("enemylogo.png", 0.90):
click(x) #x would be the coordinate of every template matched.
Could someone help me, please?
The line location = np.where.... will give you a lot of matches, and many of them will be right next to each other. Another technique is to recursively use minMaxLoc. This function will only give you the best result. But if you overwrite the best match with zeros on the first pass through, the second pass will find another match.
import cv2
import numpy as np
def find_templates(obj, sensitivity):
image = cv2.imread('tool.png', cv2.IMREAD_COLOR )
template = cv2.imread(obj, cv2.IMREAD_COLOR)
h, w = template.shape[:2]
print('h', h, 'w', w)
method = cv2.TM_CCORR_NORMED
threshold = 0.90
res = cv2.matchTemplate(image, template, method)
res_h, res_w = res.shape[:2]
# fake out max_val for first run through loop
max_val = 1
centers = []
while max_val > sensitivity:
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val > sensitivity:
centers.append( (max_loc[0] + w//2, max_loc[1] + h//2) )
x1 = max(max_loc[0] - w//2, 0)
y1 = max(max_loc[1] - h//2, 0)
x2 = min(max_loc[0] + w//2, res_w)
y2 = min(max_loc[1] + h//2, res_h)
res[y1:y2, x1:x2] = 0
image = cv2.rectangle(image,(max_loc[0],max_loc[1]), (max_loc[0]+w+1, max_loc[1]+h+1), (0,255,0) )
print(centers)
cv2.imwrite('output.png', image)
find_templates("enemy_logo.png", 0.90)
which gives
[(52, 52), (169, 52)]
Extracting table data from digital PDFs have been simple using camelot and tabula. However, the solution doesn't work with scanned images of the document pages specifically when the table doesn't have borders and inner grids. I have been trying to generate vertical and horizontal lines using OpenCV. However, since the scanned images will have slight rotation angles, it is difficult to proceed with the approach.
How can we utilize OpenCV to generate grids (horizontal and vertical lines) and borders for the scanned document page which contains table data (along with paragraphs of text)? If this is feasible, how to nullify the rotation angle of the scanned image?
I wrote some code to estimate the horizontal lines from the printed letters in the page. The same could be done for vertical ones I guess. The code below follows some general assumptions, here
some basic steps in pseudo code style:
prepare picture for contour detection
do contour detection
we assume most contours are letters
calc mean width of all contours
calc mean area of contours
filter all contours with two conditions:
a) contour (letter) heigths < meanHigh * 2
b) contour area > 4/5 meanArea
calc center point of all remaining contours
assume we have line regions (bins)
list all center point which are inside the region
do linear regression of region points
save slope and intercept
calc mean slope and intercept
here the full code:
import cv2
import numpy as np
from scipy import stats
def resizeImageByPercentage(img,scalePercent = 60):
width = int(img.shape[1] * scalePercent / 100)
height = int(img.shape[0] * scalePercent / 100)
dim = (width, height)
# resize image
return cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
def calcAverageContourWithAndHeigh(contourList):
hs = list()
ws = list()
for cnt in contourList:
(x, y, w, h) = cv2.boundingRect(cnt)
ws.append(w)
hs.append(h)
return np.mean(ws),np.mean(hs)
def calcAverageContourArea(contourList):
areaList = list()
for cnt in contourList:
a = cv2.minAreaRect(cnt)
areaList.append(a[2])
return np.mean(areaList)
def calcCentroid(contour):
houghMoments = cv2.moments(contour)
# calculate x,y coordinate of centroid
if houghMoments["m00"] != 0: #case no contour could be calculated
cX = int(houghMoments["m10"] / houghMoments["m00"])
cY = int(houghMoments["m01"] / houghMoments["m00"])
else:
# set values as what you need in the situation
cX, cY = -1, -1
return cX,cY
def getCentroidWhenSizeInRange(contourList,letterSizeWidth,letterSizeHigh,deltaOffset,minLetterArea=10.0):
centroidList=list()
for cnt in contourList:
(x, y, w, h) = cv2.boundingRect(cnt)
area = cv2.minAreaRect(cnt)
#calc diff
diffW = abs(w-letterSizeWidth)
diffH = abs(h-letterSizeHigh)
#thresold A: almost smaller than mean letter size +- offset
#when almost letterSize
if diffW < deltaOffset and diffH < deltaOffset:
#threshold B > min area
if area[2] > minLetterArea:
cX,cY = calcCentroid(cnt)
if cX!=-1 and cY!=-1:
centroidList.append((cX,cY))
return centroidList
DEBUGMODE = True
#read image, do git clone https://github.com/WZBSocialScienceCenter/pdftabextract.git for the example
img = cv2.imread('pdftabextract/examples/catalogue_30s/data/ALA1934_RR-excerpt.pdf-2_1.png')
#get some basic infos
imgHeigh, imgWidth, imgChannelAmount = img.shape
if DEBUGMODE:
cv2.imwrite("img00original.jpg",resizeImageByPercentage(img,30))
cv2.imshow("original",img)
# prepare img
imgGrey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# apply Gaussian filter
imgGaussianBlur = cv2.GaussianBlur(imgGrey,(5,5),0)
#make binary img, black or white
_, imgBinThres = cv2.threshold(imgGaussianBlur, 130, 255, cv2.THRESH_BINARY)
## detect contours
contours, _ = cv2.findContours(imgBinThres, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#we get some letter parameter
averageLetterWidth, averageLetterHigh = calcAverageContourWithAndHeigh(contours)
threshold1AllowedLetterSizeOffset = averageLetterHigh * 2 # double size
averageContourAreaSizeOfMinRect = calcAverageContourArea(contours)
threshHold2MinArea = 4 * averageContourAreaSizeOfMinRect / 5 # 4/5 * mean
print("mean letter Width: ", averageLetterWidth)
print("mean letter High: ", averageLetterHigh)
print("threshold 1 tolerance: ", threshold1AllowedLetterSizeOffset)
print("mean letter area ", averageContourAreaSizeOfMinRect)
print("thresold 2 min letter area ", threshHold2MinArea)
#we get all centroid of letter sizes contours, the other we ignore
centroidList = getCentroidWhenSizeInRange(contours,averageLetterWidth,averageLetterHigh,threshold1AllowedLetterSizeOffset,threshHold2MinArea)
if DEBUGMODE:
#debug print all centers:
imgFilteredCenter = img.copy()
for cX,cY in centroidList:
#draw in red color as BGR
cv2.circle(imgFilteredCenter, (cX, cY), 5, (0, 0, 255), -1)
cv2.imwrite("img01letterCenters.jpg",resizeImageByPercentage(imgFilteredCenter,30))
cv2.imshow("letterCenters",imgFilteredCenter)
#we estimate a bin widths
amountPixelFreeSpace = averageLetterHigh #TODO get better estimate out of histogram
estimatedBinWidth = round( averageLetterHigh + amountPixelFreeSpace) #TODO round better ?
binCollection = dict() #range(0,imgHeigh,estimatedBinWidth)
#we do seperate the center points into bins by y coordinate
for i in range(0,imgHeigh,estimatedBinWidth):
listCenterPointsInBin = list()
yMin = i
yMax = i + estimatedBinWidth
for cX,cY in centroidList:
if yMin < cY < yMax:#if fits in bin
listCenterPointsInBin.append((cX,cY))
binCollection[i] = listCenterPointsInBin
#we assume all point are in one line ?
#model = slope (x) + intercept
#model = m (x) + n
mList = list() #slope abs in img
nList = list() #intercept abs in img
nListRelative = list() #intercept relative to bin start
minAmountRegressionElements = 12 #is also alias for letter amount we expect
#we do regression for every point in the bin
for startYOfBin, values in binCollection.items():
#we reform values
xValues = [] #TODO use more short transform
yValues = []
for x,y in values:
xValues.append(x)
yValues.append(y)
#we assume a min limit of point in bin
if len(xValues) >= minAmountRegressionElements :
slope, intercept, r, p, std_err = stats.linregress(xValues, yValues)
mList.append(slope)
nList.append(intercept)
#we calc the relative intercept
nRelativeToBinStart = intercept - startYOfBin
nListRelative.append(nRelativeToBinStart)
if DEBUGMODE:
#we debug print all lines in one picute
imgLines = img.copy()
colorOfLine = (0, 255, 0) #green
for i in range(0,len(mList)):
slope = mList[i]
intercept = nList[i]
startPoint = (0, int( intercept)) #better round ?
endPointY = int( (slope * imgWidth + intercept) )
if endPointY < 0:
endPointY = 0
endPoint = (imgHeigh,endPointY)
cv2.line(imgLines, startPoint, endPoint, colorOfLine, 2)
cv2.imwrite("img02lines.jpg",resizeImageByPercentage(imgLines,30))
cv2.imshow("linesOfLetters ",imgLines)
#we assume in mean we got it right
meanIntercept = np.mean(nListRelative)
meanSlope = np.mean(mList)
print("meanIntercept :", meanIntercept)
print("meanSlope ", meanSlope)
#TODO calc angle with math.atan(slope) ...
if DEBUGMODE:
cv2.waitKey(0)
original:
center point of letters:
lines:
I had the same problem some time ago and this tutorial is the solution to that. It explains using pdftabextract which is a Python library by Markus Konrad and leverages OpenCV’s Hough transform to detect the lines and works even if the scanned document is a bit tilted. The tutorial walks your through parsing a 1920s German newspaper
I am doing a college class project on image processing. This is my original image:
I want to join nearby/overlapping bounding boxes on individual text line images, but I don't know how. My code looks like this so far (thanks to #HansHirse for the help):
import os
import cv2
import numpy as np
from scipy import stats
image = cv2.imread('example.png')
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#dilation
kernel = np.ones((5,5), np.uint8)
img_dilation = cv2.dilate(thresh, kernel, iterations=1)
#find contours
ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# https://www.pyimagesearch.com/2015/04/20/sorting-contours-using-python-and-opencv/
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b: b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
sortedctrs,sortedbbs=sort_contours(ctrs)
xyminmax=[]
for cnt in sortedctrs:
x, y, w, h = cv2.boundingRect(cnt)
xyminmax.append([x,y,x+w,y+h])
distances=[]
for i in range(len(xyminmax)):
try:
first_xmax = xyminmax[i][2]
second_xmin = xyminmax[i + 1][0]
distance=abs(second_xmin-first_xmax)
distances.append(distance)
except IndexError:
pass
THRESHOLD=stats.mode(distances, axis=None)[0][0]
new_rects=[]
for i in range(len(xyminmax)):
try:
# [xmin,ymin,xmax,ymax]
first_ymin=xyminmax[i][1]
first_ymax=xyminmax[i][3]
second_ymin=xyminmax[i+1][1]
second_ymax=xyminmax[i+1][3]
first_xmax = xyminmax[i][2]
second_xmin = xyminmax[i+1][0]
firstheight=abs(first_ymax-first_ymin)
secondheight=abs(second_ymax-second_ymin)
distance=abs(second_xmin-first_xmax)
if distance<THRESHOLD:
new_xmin=xyminmax[i][0]
new_xmax=xyminmax[i+1][2]
if first_ymin>second_ymin:
new_ymin=second_ymin
else:
new_ymin = first_ymin
if firstheight>secondheight:
new_ymax = first_ymax
else:
new_ymax = second_ymax
new_rects.append([new_xmin,new_ymin,new_xmax,new_ymax])
else:
new_rects.append(xyminmax[i])
except IndexError:
pass
for rect in new_rects:
cv2.rectangle(image, (rect[0], rect[1]), (rect[2], rect[3]), (121, 11, 189), 2)
cv2.imwrite("result.png",image)
which produces this image as a result:
I want to join very close or overlapping bounding boxes such as these
into a single bounding box so the formula doesn't get separated into single characters. I have tried using cv2.groupRectangles but the print results were just NULL.
So, here comes my solution. I partially modified your (initial) code to my preferred naming, etc. Also, I commented all the stuff, I added.
import cv2
import numpy as np
image = cv2.imread('images/example.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
kernel = np.ones((5, 5), np.uint8)
img_dilated = cv2.dilate(thresh, kernel, iterations = 1)
cnts, _ = cv2.findContours(img_dilated.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Array of initial bounding rects
rects = []
# Bool array indicating which initial bounding rect has
# already been used
rectsUsed = []
# Just initialize bounding rects and set all bools to false
for cnt in cnts:
rects.append(cv2.boundingRect(cnt))
rectsUsed.append(False)
# Sort bounding rects by x coordinate
def getXFromRect(item):
return item[0]
rects.sort(key = getXFromRect)
# Array of accepted rects
acceptedRects = []
# Merge threshold for x coordinate distance
xThr = 5
# Iterate all initial bounding rects
for supIdx, supVal in enumerate(rects):
if (rectsUsed[supIdx] == False):
# Initialize current rect
currxMin = supVal[0]
currxMax = supVal[0] + supVal[2]
curryMin = supVal[1]
curryMax = supVal[1] + supVal[3]
# This bounding rect is used
rectsUsed[supIdx] = True
# Iterate all initial bounding rects
# starting from the next
for subIdx, subVal in enumerate(rects[(supIdx+1):], start = (supIdx+1)):
# Initialize merge candidate
candxMin = subVal[0]
candxMax = subVal[0] + subVal[2]
candyMin = subVal[1]
candyMax = subVal[1] + subVal[3]
# Check if x distance between current rect
# and merge candidate is small enough
if (candxMin <= currxMax + xThr):
# Reset coordinates of current rect
currxMax = candxMax
curryMin = min(curryMin, candyMin)
curryMax = max(curryMax, candyMax)
# Merge candidate (bounding rect) is used
rectsUsed[subIdx] = True
else:
break
# No more merge candidates possible, accept current rect
acceptedRects.append([currxMin, curryMin, currxMax - currxMin, curryMax - curryMin])
for rect in acceptedRects:
img = cv2.rectangle(image, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (121, 11, 189), 2)
cv2.imwrite("images/result.png", image)
For your example
I get the following output
Now, you have to find a proper threshold to meet your expectations. Maybe, there is even some more work to do, especially to get the whole formula, since the distances don't vary that much.
Disclaimer: I'm new to Python in general, and specially to the Python API of OpenCV (C++ for the win). Comments, improvements, highlighting Python no-gos are highly welcome!
Here is a slightly different approach, using the OpenCV Wrapper library.
import cv2
import opencv_wrapper as cvw
image = cv2.imread("example.png")
gray = cvw.bgr2gray(image)
thresh = cvw.threshold_otsu(gray, inverse=True)
# dilation
img_dilation = cvw.dilate(thresh, 5)
# Find contours
contours = cvw.find_external_contours(img_dilation)
# Map contours to bounding rectangles, using bounding_rect property
rects = map(lambda c: c.bounding_rect, contours)
# Sort rects by top-left x (rect.x == rect.tl.x)
sorted_rects = sorted(rects, key=lambda r: r.x)
# Distance threshold
dt = 5
# List of final, joined rectangles
final_rects = [sorted_rects[0]]
for rect in sorted_rects[1:]:
prev_rect = final_rects[-1]
# Shift rectangle `dt` back, to find out if they overlap
shifted_rect = cvw.Rect(rect.tl.x - dt, rect.tl.y, rect.width, rect.height)
intersection = cvw.rect_intersection(prev_rect, shifted_rect)
if intersection is not None:
# Join the two rectangles
min_y = min((prev_rect.tl.y, rect.tl.y))
max_y = max((prev_rect.bl.y, rect.bl.y))
max_x = max((prev_rect.br.x, rect.br.x))
width = max_x - prev_rect.tl.x
height = max_y - min_y
new_rect = cvw.Rect(prev_rect.tl.x, min_y, width, height)
# Add new rectangle to final list, making it the new prev_rect
# in the next iteration
final_rects[-1] = new_rect
else:
# If no intersection, add the box
final_rects.append(rect)
for rect in sorted_rects:
cvw.rectangle(image, rect, cvw.Color.MAGENTA, line_style=cvw.LineStyle.DASHED)
for rect in final_rects:
cvw.rectangle(image, rect, cvw.Color.GREEN, thickness=2)
cv2.imwrite("result.png", image)
And the result
The green boxes are the final result, while the magenta boxes are the original ones.
I used the same threshold as #HansHirse.
The equals sign still needs some work. Either a higher dilation kernel size or use the same technique vertically.
Disclosure: I am the author of OpenCV Wrapper.
Easy-to-read solution:
contours = get_contours(frame)
boxes = [cv2.boundingRect(c) for c in contours]
boxes = merge_boxes(boxes, x_val=40, y_val=20) # Where x_val and y_val are axis thresholds
def get_contours(frame): # Returns a list of contours
contours = cv2.findContours(frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
return contours
def merge_boxes(boxes, x_val, y_val):
size = len(boxes)
if size < 2:
return boxes
if size == 2:
if boxes_mergeable(boxes[0], boxes[1], x_val, y_val):
boxes[0] = union(boxes[0], boxes[1])
del boxes[1]
return boxes
boxes = sorted(boxes, key=lambda r: r[0])
i = size - 2
while i >= 0:
if boxes_mergeable(boxes[i], boxes[i + 1], x_val, y_val):
boxes[i] = union(boxes[i], boxes[i + 1])
del boxes[i + 1]
i -= 1
return boxes
def boxes_mergeable(box1, box2, x_val, y_val):
(x1, y1, w1, h1) = box1
(x2, y2, w2, h2) = box2
return max(x1, x2) - min(x1, x2) - minx_w(x1, w1, x2, w2) < x_val \
and max(y1, y2) - min(y1, y2) - miny_h(y1, h1, y2, h2) < y_val
def minx_w(x1, w1, x2, w2):
return w1 if x1 <= x2 else w2
def miny_h(y1, h1, y2, h2):
return h1 if y1 <= y2 else h2
def union(a, b):
x = min(a[0], b[0])
y = min(a[1], b[1])
w = max(a[0] + a[2], b[0] + b[2]) - x
h = max(a[1] + a[3], b[1] + b[3]) - y
return x, y, w, h
--> If you have bounding boxes and want to merge along both X and Y directions, use this snippet
--> Adjust x_pixel_value and y_pixel_value to your preferences
--> But for this, you need to have the bounding boxes
import cv2
img = cv2.imread(your image path)
x_pixel_value = 5
y_pixel_value = 6
bboxes_list = [] # your bounding boxes list
rects_used = []
for i in bboxes_list:
rects_used.append(False)
end_bboxes_list = []
for enum,i in enumerate(bboxes_list):
if rects_used[enum] == True:
continue
xmin = i[0]
xmax = i[2]
ymin = i[1]
ymax = i[3]
for enum1,j in enumerate(bboxes_list[(enum+1):], start = (enum+1)):
i_xmin = j[0]
i_xmax = j[2]
i_ymin = j[1]
i_ymax = j[3]
if rects_used[enum1] == False:
if abs(ymin - i_ymin) < x_pixel_value:
if abs(xmin-i_xmax) < y_pixel_value or abs(xmax-i_xmin) < y_pixel_value:
rects_used[enum1] = True
xmin = min(xmin,i_xmin)
xmax = max(xmax,i_xmax)
ymin = min(ymin,i_ymin)
ymax = max(ymax,i_ymax)
final_box = [xmin,ymin,xmax,ymax]
end_bboxes_list.append(final_box)
for i in end_bboxes_list:
cv2.rectangle(img,(i[0],i[1]),(i[2],i[3]), color = [0,255,0], thickness = 2)
cv2.imshow("Image",img)
cv2.waitKey(10000)
cv2.destroyAllWindows()