Is there a way of loading images better with OpenCV - python

I want to load images and use hough transformation to capture the black bounded area inside the paper and then perform a few counting operations inside the boxes. Naturally, this requires that I am able to load the images in relatively good quality.
Issue is when I am loading using openCV's cv2.imread() I get a very watered down version of the picture which I can't quickly process. To make it worse, I can't render the image using cv2.imshow(), my IDE hangs every time I try to see it. So I have to use matplotlib to render and see the image step by step.
I don't know any other packages for image processing (maybe pillow, but I don't know whether it will do what I need it to do).
My original image is this:
img = cv2.imread("img1-min.jpg")
Because the cv2.imshow() method results in a window that crashes, I resorted to matplotlib:
plt.imshow(img)
plt.title('my picture')
plt.show()
The result is:
After that:
gray = cv2.cvtColor (img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 75, 150)
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 50, 50, 5)
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 5)
plt.imshow(img)
plt.title('my picture')
plt.show()
The output is:
As you can see, very messy. My hunch is that this is because of the way the original image is loaded. Is there a way I can improve the loading process so that it makes it easier to apply the Hough Lines?

I believe that the image is loading fine with cv2.imread() but its so large with a dimension of 2976x3838, your IDE has trouble displaying the image. I believe you're applying cv2.HoughLinesP() incorrectly. Instead of using cv2.HoughLinesP(), here's an alternative approach to detect the lines
The idea is to threshold then find the bounding box of the board to create a mask. From this mask, we perform a perspective transform to obtain a top down image. This will allow us to detect lines better
Once we have the detected board, we can extract the ROI
Then we simply detect vertical and horizontal lines
Result
import cv2
import numpy as np
def perspective_transform(image, corners):
def order_corner_points(corners):
# Separate corners into individual points
# Index 0 - top-right
# 1 - top-left
# 2 - bottom-left
# 3 - bottom-right
corners = [(corner[0][0], corner[0][1]) for corner in corners]
top_r, top_l, bottom_l, bottom_r = corners[0], corners[1], corners[2], corners[3]
return (top_l, top_r, bottom_r, bottom_l)
# Order points in clockwise order
ordered_corners = order_corner_points(corners)
top_l, top_r, bottom_r, bottom_l = ordered_corners
# Determine width of new image which is the max distance between
# (bottom right and bottom left) or (top right and top left) x-coordinates
width_A = np.sqrt(((bottom_r[0] - bottom_l[0]) ** 2) + ((bottom_r[1] - bottom_l[1]) ** 2))
width_B = np.sqrt(((top_r[0] - top_l[0]) ** 2) + ((top_r[1] - top_l[1]) ** 2))
width = max(int(width_A), int(width_B))
# Determine height of new image which is the max distance between
# (top right and bottom right) or (top left and bottom left) y-coordinates
height_A = np.sqrt(((top_r[0] - bottom_r[0]) ** 2) + ((top_r[1] - bottom_r[1]) ** 2))
height_B = np.sqrt(((top_l[0] - bottom_l[0]) ** 2) + ((top_l[1] - bottom_l[1]) ** 2))
height = max(int(height_A), int(height_B))
# Construct new points to obtain top-down view of image in
# top_r, top_l, bottom_l, bottom_r order
dimensions = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1],
[0, height - 1]], dtype = "float32")
# Convert to Numpy format
ordered_corners = np.array(ordered_corners, dtype="float32")
# Find perspective transform matrix
matrix = cv2.getPerspectiveTransform(ordered_corners, dimensions)
# Return the transformed image
return cv2.warpPerspective(image, matrix, (width, height))
image = cv2.imread('1.jpg')
original = image.copy()
blur = cv2.bilateralFilter(image,9,75,75)
gray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray,0,255, cv2.THRESH_OTSU + cv2.THRESH_BINARY_INV)[1]
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
mask = np.zeros(image.shape, dtype=np.uint8)
for c in cnts:
area = cv2.contourArea(c)
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.015 * peri, True)
if area > 150000 and len(approx) == 4:
cv2.drawContours(image,[c], 0, (36,255,12), 3)
cv2.drawContours(mask,[c], 0, (255,255,255), -1)
transformed = perspective_transform(original, approx)
mask = cv2.bitwise_and(mask, original)
# Remove horizontal lines
gray = cv2.cvtColor(transformed, cv2.COLOR_BGR2GRAY)
board_thresh = cv2.threshold(gray,0,255, cv2.THRESH_OTSU + cv2.THRESH_BINARY_INV)[1]
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (55,1))
detect_horizontal = cv2.morphologyEx(board_thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
cnts = cv2.findContours(detect_horizontal, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
cv2.drawContours(transformed, [c], -1, (36,255,12), 9)
pass
# Remove vertical lines
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,55))
detect_vertical = cv2.morphologyEx(board_thresh, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
cnts = cv2.findContours(detect_vertical, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
cv2.drawContours(transformed, [c], -1, (36,255,12), 9)
cv2.imwrite('thresh.png', thresh)
cv2.imwrite('image.png', image)
cv2.imwrite('mask.png', mask)
cv2.imwrite('transformed.png', transformed)
cv2.waitKey()

Related

How to segment the text part of a document image when the backgroud is quite similar as the paper?

I am working on a OCR project and the document image needs unwarping operation. But I find that the color of the background is very similar as the paper.
To deal with this issue, my solution is to find the text part and filled as a connected polygon. The code is as follows.
import cv2
import imutils
import numpy as np
#function to order points to proper rectangle
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype="float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
#function to transform image to four points
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
# # multiply the rectangle by the original ratio
# rect *= ratio
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
#function to find two largest countours which ones are may be
# full image and our rectangle edged object
def findLargestCountours(cntList, cntWidths):
newCntList = []
newCntWidths = []
#finding 1st largest rectangle
first_largest_cnt_pos = cntWidths.index(max(cntWidths))
# adding it in new
newCntList.append(cntList[first_largest_cnt_pos])
newCntWidths.append(cntWidths[first_largest_cnt_pos])
#removing it from old
cntList.pop(first_largest_cnt_pos)
cntWidths.pop(first_largest_cnt_pos)
#finding second largest rectangle
seccond_largest_cnt_pos = cntWidths.index(max(cntWidths))
# adding it in new
newCntList.append(cntList[seccond_largest_cnt_pos])
newCntWidths.append(cntWidths[seccond_largest_cnt_pos])
#removing it from old
cntList.pop(seccond_largest_cnt_pos)
cntWidths.pop(seccond_largest_cnt_pos)
print('Old Screen Dimentions filtered', cntWidths)
print('Screen Dimentions filtered', newCntWidths)
return newCntList, newCntWidths
#driver function which identifieng 4 corners and doing four point transformation
def convert_object(image, screen_size = None, isDebug = False):
image = imutils.resize(image,height=600)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17) # 11 //TODO 11 FRO OFFLINE MAY NEED TO TUNE TO 5 FOR ONLINE
gray = cv2.medianBlur(gray, 5)
kernel = np.ones((5, 5), np.uint8)
grad = cv2.morphologyEx(gray, cv2.MORPH_GRADIENT, kernel)
if isDebug :
cv2.imshow('grad', grad)
_, bw = cv2.threshold(grad, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
if isDebug :
cv2.imshow('bw', bw)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (30, 50))
connected = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
kernel=np.ones((5,5),np.uint8)
connected=cv2.dilate(connected,kernel,iterations=3)
# using RETR_EXTERNAL instead of RETR_CCOMP
countours, hierarchy = cv2.findContours(connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if isDebug : print('length of countours ', len(countours))
imageCopy = image.copy()
if isDebug : cv2.imshow('drawn countours', cv2.drawContours(imageCopy, countours, -1, (0, 255, 0), 1))
cnts = sorted(countours, key=cv2.contourArea, reverse=True)
hull = np.zeros(image.shape, dtype=np.uint8)
for cnt in cnts:
approx = cv2.convexHull(cnt)
cv2.drawContours(hull, [approx], -1, (255, 0, 0), thickness=cv2.FILLED)
if isDebug : cv2.imshow("hull", hull)
grayhull = cv2.cvtColor(hull, cv2.COLOR_BGR2GRAY)
countours, hierarchy = cv2.findContours(grayhull.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if isDebug : print('length of countours ', len(countours))
imageCopy = image.copy()
if isDebug : cv2.imshow('drawn countours', cv2.drawContours(imageCopy, countours, -1, (0, 255, 0), 3))
mask = np.zeros(bw.shape, dtype=np.uint8)
rgb = image.copy()
for idx in range(len(countours)):
x, y, w, h = cv2.boundingRect(countours[idx])
mask[y:y+h, x:x+w] = 0
cv2.drawContours(mask, countours, idx, (255, 255, 255), -1)
r = float(cv2.countNonZero(mask[y:y+h, x:x+w])) / (w * h)
if r > 0.45 and w > 8 and h > 8:
cv2.rectangle(rgb, (x, y), (x+w-1, y+h-1), (0, 255, 0), 2)
if isDebug : cv2.imshow("rgb", rgb)
# approximate the contour
cnts = sorted(countours, key=cv2.contourArea, reverse=True)
screenCntList = []
scrWidths = []
for cnt in cnts:
peri = cv2.arcLength(cnt, True) # cnts[1] always rectangle O.o
approx = cv2.approxPolyDP(cnt, 0.05 * peri, True)
screenCnt = approx
print(len(approx))
if (len(screenCnt) == 4):
(X, Y, W, H) = cv2.boundingRect(cnt)
# print('X Y W H', (X, Y, W, H))
screenCntList.append(screenCnt)
scrWidths.append(W)
print('Screens found :', len(screenCntList))
print('Screen Dimentions', scrWidths)
if len(screenCntList) > 1:
screenCntList, scrWidths = findLargestCountours(screenCntList, scrWidths)
print(screenCntList)
if isDebug : cv2.imshow(" Screen", cv2.drawContours(image.copy(), [screenCntList[0]], -1, (0, 255, 0), 3))
pts = screenCntList[0].reshape(4, 2)
print('Found bill rectagle at ', pts)
rect = order_points(pts)
print(rect)
# apply the four point tranform to obtain a "birds eye view" of
# the image
warp = four_point_transform(image, pts)
# show the original and warped images
if(isDebug):
cv2.imshow("Original", image)
cv2.imshow("warp", warp)
cv2.waitKey(0)
cv2.imwrite("result.png",warp.astype(np.uint8))
convert_object(cv2.imread('abc.jpg'), isDebug=True)
But because of the paragram indentation, the approxPolyDP can not find the boundary well.
Is there any effective solution for this case? thanks in advance.

Cropping in OpenCV return images rotated 90 degrees

I want to crop images according to their right frame. I have about 10000 of hand X-ray images to preprocess, and what I have done so far:
Apply Gaussian Blur and Threshold (Binary + Otsu) on the image.
Apply dilation to get a single object (in this case a hand).
Used cv2.findContours() to draw outline along the edges around the hand.
Used cv2.boundingRect() to find the right frame, and then cv2.minAreaRect() and cv2.boxPoints to get the right points for the bounding box.
Used cv2.warpPerspective to adjust image according to height and width.
The code below describes the above:
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Load image, create mask, grayscale, Gaussian blur, Otsu's threshold
img_path = "sample_image.png"
image = cv2.imread(image_path)
original = image.copy()
blank = np.zeros(image.shape[:2], dtype = np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (33,33), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Merge text into a single contour
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilate = cv2.dilate(thresh, kernel, iterations = 3)
# Find contours
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key = lambda x: cv2.boundingRect(x)[0])
for c in cnts:
# Filter using contour area and aspect ratio (x1 = width, y1 = height)
x, y, x1, y1 = cv2.boundingRect(c)
if (x1 > 500) and (y1 > 700):
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
width = int(rect[1][0])
height = int(rect[1][1])
src_pts = box.astype("float32")
dst_pts = np.array([[0, height-1], [0, 0],
[width-1, 0], [width-1, height-1]], dtype="float32")
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
warped = cv2.warpPerspective(image, M, (width, height))
plt.imshow(warped)
If you have a look at some of the images in the folder, those are the inputs. When I run these images through the code above, I get an output like this. Some of them are cropped nicely (straightened), however, some of them are cropped with 90 degree rotations. Is there a code to counter the 'rotating 90 degrees output' problem?
Here are some images:
Image Inputs: Four X-ray examples
Image Outputs: Returns images that are 90 degrees rotated
Image Outputs wanted: Straightened image (Just used Photoshop to straighten them. Dont want to do this for 10000 images...)
UPDATE:
I edited the code according to below-mentioned suggestions. After running the some samples, it now returns images that are now 90 degrees slanted to the right.
Input images:
Output images:
I doubt it's because of the quality of the images. Maybe it's got to do with OpenCV's minAreaRect()? or boxPoints?
FINAL UPDATE:
According to #Prashant Maurya, the code was updated with a function added to detect whether the position of the hand is left or right. And then mapping src_pts to right dst_pts. Full code is shown below.
Hi there are two changes which will correct the output:
The width and height taken in the code is in the wrong order ie: width: 1470 & height: 1118 just switch the values:
Map src_pts to right dst_pts the current code is mapping top left
corner to bottom left therefore the image is being rotated.
Added function to detect whether image is right tilted or left and rotate and rotate it accordingly
Full code with changes is:
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Load image, create mask, grayscale, Gaussian blur, Otsu's threshold
img_path = "xray1.png"
image = cv2.imread(img_path)
cv2.imshow("image original", image)
cv2.waitKey(10000)
original = image.copy()
blank = np.zeros(image.shape[:2], dtype = np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (33,33), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Merge text into a single contour
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilate = cv2.dilate(thresh, kernel, iterations = 3)
# Find contours
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key = lambda x: cv2.boundingRect(x)[0])
def get_tilt(box):
tilt = "Left"
x_list = [coord[0] for coord in box]
y_list = [coord[1] for coord in box]
print(x_list)
print(y_list)
x_list = sorted(x_list)
y_list = sorted(y_list)
print(x_list)
print(y_list)
for coord in box:
if coord[0] == x_list[0]:
index = y_list.index(coord[1])
print("Index: ", index)
if index == 1:
tilt = "Left"
else:
tilt = "Right"
return tilt
for c in cnts:
# Filter using contour area and aspect ratio (x1 = width, y1 = height)
x, y, x1, y1 = cv2.boundingRect(c)
if (x1 > 500) and (y1 > 700):
rect = cv2.minAreaRect(c)
print("rect",rect)
box = cv2.boxPoints(rect)
box = np.int0(box)
# print("rect:", box)
tilt = get_tilt(box)
src_pts = box.astype("float32")
if tilt == "Left":
width = int(rect[1][1])
height = int(rect[1][0])
dst_pts = np.array([[0, 0],
[width-1, 0], [width-1, height-1], [0, height-1]], dtype="float32")
else:
width = int(rect[1][0])
height = int(rect[1][1])
dst_pts = np.array([[0, height-1], [0, 0],
[width-1, 0], [width-1, height-1]], dtype="float32")
print("Src pts:", src_pts)
print("Dst pts:", dst_pts)
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
warped = cv2.warpPerspective(image, M, (width, height))
print("Showing image ..")
# plt.imshow(warped)
cv2.imshow("image crop", warped)
cv2.waitKey(10000)

use python open-cv for segmenting newspaper article

I'm using the code below for segmenting the articles from an image of newspaper.
def segmenter(image_received):
# Process 1: Lines Detection
img = image_received
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to binary gray image
edges = cv2.Canny(gray, 75, 150) # determine contours
lines = cv2.HoughLinesP(edges, 0.017, np.pi / 180, 60, minLineLength=100, maxLineGap=0.1) # houghlines generation
# drawing houghlines
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 128), 12) # the houghlines of color (0,0,128) is drawn
# Drawing brown border
bold = cv2.copyMakeBorder(
img, # image source
5, # top width
5, # bottomm width
5, # left width
5, # right width
cv2.BORDER_CONSTANT,
value=(0, 0, 128) # brown color value
)
image = bold
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if int(len(c) >= 10):
cv2.drawContours(image, [c], 0, (0, 17, 255), 1)
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if int(len(c) >= 10):
cv2.drawContours(image, [c], 0, (0, 17, 255), 1)
cv2.imwrite(f'tmp/{str(str(uuid.uuid4()))}.jpg', image)
for instance
the input image is
and the output image is :
There are three problems:
the output rectangles aren't complete in all cases.
Images also are segmented inside articles as part of articles. But what I need is to segment only the text of the newspaper and crop all the other things out. Something like this one:
Consider the following image:
The article indicated by borders is not rectangular and is much more complicated. How can I achieve the correct borders using python open-cv or other image processing libraries?
(the question has an answer here for matlab. But I need a python code.
here my pipeline.
I think can be optimized.
Initialization
%matplotlib inline
import numpy as np
import cv2
from matplotlib import pyplot as plt
Load image
image_file_name = 'paper.jpg'
image = cv2.imread(image_file_name)
# gray convertion
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
The first important thing is to remove the lines. So I search the lines.
grad_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
grad_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
# threshold
thresh_x = cv2.threshold(abs_grad_x, 0, 255, cv2.THRESH_OTSU)[1]
thresh_y = cv2.threshold(abs_grad_y, 0, 255, cv2.THRESH_OTSU)[1]
# bluring
kernel_size = 3
blur_thresh_x = cv2.GaussianBlur(thresh_x,(kernel_size, kernel_size),0)
blur_thresh_y = cv2.GaussianBlur(thresh_y,(kernel_size, kernel_size),0)
# Run Hough on edge detected image
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 200 # minimum number of pixels making up a line
max_line_gap = 1 # maximum gap in pixels between connectable line segments
line_image = np.copy(gray) * 0 # creating a blank to draw lines on
# Vertical lines
vertical_lines = cv2.HoughLinesP(blur_thresh_x, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
if vertical_lines is not None:
for line in vertical_lines:
for x1,y1,x2,y2 in line:
# here it's possible to add a selection of only vertical lines
if np.abs(y1-y2)> 0.1 * np.abs(x1-x2):
cv2.line(line_image,(x1,y1),(x2,y2),255,5)
# Horizontal lines
horizontal_lines = cv2.HoughLinesP(blur_thresh_y, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
if horizontal_lines is not None:
for line in horizontal_lines:
for x1,y1,x2,y2 in line:
# here it's possible to add a selection of only horizontal lines
if np.abs(x1-x2)> 0.1 * np.abs(y1-y2):
cv2.line(line_image,(x1,y1),(x2,y2),255,5)
After I remove the lines from the threshold
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# remove lines
clean_thresh = cv2.subtract(thresh, line_image)
Then I search the phrases
# search the phrases
dilatation_type = cv2.MORPH_RECT
horizontal_dilatation = 20 #This is the gap. 20 for the first image, 10 for the second image
vertical_dilatation = 1
element = cv2.getStructuringElement(dilatation_type, (2*horizontal_dilatation + 1, 2*vertical_dilatation+1), (horizontal_dilatation, vertical_dilatation))
dilatation_thresh = cv2.dilate(clean_thresh, element)
# Fill
filled_tresh = dilatation_thresh.copy()
contours, hierarchy = cv2.findContours(dilatation_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
cv2.drawContours(filled_tresh, [cnt], -1, 255, cv2.FILLED)
Now I detect the bounding boxes
# Draw bounding boxes
bounding_box1 = filled_tresh.copy()
contours, hierarchy = cv2.findContours(bounding_box1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(bounding_box1,(x,y),(x+w,y+h),255,cv2.FILLED)
# REPEAT Draw bounding boxes and Find the mean text width
mean_bb_width = 0 # mean bounding box width
bounding_box2 = bounding_box1.copy()
contours, hierarchy = cv2.findContours(bounding_box2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
num_cnt=0
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(bounding_box2,(x,y),(x+w,y+h),255,cv2.FILLED)
mean_bb_width = mean_bb_width+w
num_cnt=num_cnt+1
mean_bb_width=mean_bb_width/num_cnt
Now I separate the titles from the text
# define title what has width bigger than 1.5* mean_width
min_title_width = 1.5 * mean_bb_width
raw_title = np.copy(gray) * 0
raw_text = np.copy(gray) * 0
# separate titles from phrases
contours, hierarchy = cv2.findContours(bounding_box2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
if w >=min_title_width :
cv2.drawContours(raw_title, [cnt], -1, 255, cv2.FILLED)
else :
cv2.drawContours(raw_text, [cnt], -1, 255, cv2.FILLED)
and then the final processing
image_out = image.copy()
# Closing parameters
horizontal_closing = 1
vertical_closing = 20
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(horizontal_closing,vertical_closing))
# Processing titles
# Closing
closing_title = cv2.morphologyEx(raw_title, cv2.MORPH_CLOSE, kernel)
# Find contours
contours, hierarchy = cv2.findContours(closing_title, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Draw bounding boxes
bounding_title = closing_title.copy()
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image_out,(x,y),(x+w,y+h),(255,0,0),2)
# Processing text
# Closing
closing_text = cv2.morphologyEx(raw_text, cv2.MORPH_CLOSE, kernel)
# Find contours
contours, hierarchy = cv2.findContours(closing_text , cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Draw bounding boxes
bounding_text = closing_text.copy()
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image_out,(x,y),(x+w,y+h),(0,255,0),2)
The result is
Changing the parameter horizontal_dilatation from 20 to 10, I obtain for the second image (where I remove the red border that you added) the following result

How I can detect recognize text in а shape

Need your help. Now I'm writing python script to recognize text in a shape. This shape can be captured from RTSP (IP Camera) at any angle.
For the example see attached file. My code is here, but coords to crop rotated shape is sets manually
import cv2
import numpy as np
def main():
fn = cv2.VideoCapture("rtsp://admin:Admin123-#172.16.10.254")
flag, img = fn.read()
cnt = np.array([
[[64, 49]],
[[122, 11]],
[[391, 326]],
[[308, 373]]
])
print("shape of cnt: {}".format(cnt.shape))
rect = cv2.minAreaRect(cnt)
print("rect: {}".format(rect))
box = cv2.boxPoints(rect)
box = np.int0(box)
print("bounding box: {}".format(box))
cv2.drawContours(img, [box], 0, (0, 255, 0), 2)
img_crop, img_rot = crop_rect(img, rect)
print("size of original img: {}".format(img.shape))
print("size of rotated img: {}".format(img_rot.shape))
print("size of cropped img: {}".format(img_crop.shape))
new_size = (int(img_rot.shape[1]/2), int(img_rot.shape[0]/2))
img_rot_resized = cv2.resize(img_rot, new_size)
new_size = (int(img.shape[1]/2)), int(img.shape[0]/2)
img_resized = cv2.resize(img, new_size)
cv2.imshow("original contour", img_resized)
cv2.imshow("rotated image", img_rot_resized)
cv2.imshow("cropped_box", img_crop)
# cv2.imwrite("crop_img1.jpg", img_crop)
cv2.waitKey(0)
def crop_rect(img, rect):
# get the parameter of the small rectangle
center = rect[0]
size = rect[1]
angle = rect[2]
center, size = tuple(map(int, center)), tuple(map(int, size))
# get row and col num in img
height, width = img.shape[0], img.shape[1]
print("width: {}, height: {}".format(width, height))
M = cv2.getRotationMatrix2D(center, angle, 1)
img_rot = cv2.warpAffine(img, M, (width, height))
img_crop = cv2.getRectSubPix(img_rot, size, center)
return img_crop, img_rot
if __name__ == "__main__":
main()
example pic
You may start with the example in the following post.
The code sample detects the license plate, and it also detects your "shape" with text.
After detecting the "shape" with the text, you may use the following stages:
Apply threshold the cropped area.
Find contours, and find the contour with maximum area.
Build a mask, and mask area outside the contour (like in the license plate example).
Use minAreaRect (as fmw42 commented), and get the angle of the rectangle.
Rotate the cropped area (by angle+90 degrees).
Apply OCR using pytesseract.image_to_string.
Here is the complete code:
import cv2
import numpy as np
import imutils
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe' # I am using Windows
# Read the input image
img = cv2.imread('Admin123.jpg')
# Reused code:
# https://stackoverflow.com/questions/60977964/pytesseract-not-recognizing-text-as-expected/60979089#60979089
################################################################################
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #convert to grey scale
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edged = cv2.Canny(gray, 30, 200) #Perform Edge detection
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]
screenCnt = None
# loop over our contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.018 * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
# Masking the part other than the "shape"
mask = np.zeros(gray.shape,np.uint8)
new_image = cv2.drawContours(mask,[screenCnt],0,255,-1,)
new_image = cv2.bitwise_and(img,img,mask=mask)
# Now crop
(x, y) = np.where(mask == 255)
(topx, topy) = (np.min(x), np.min(y))
(bottomx, bottomy) = (np.max(x), np.max(y))
cropped = gray[topx:bottomx+1, topy:bottomy+1]
################################################################################
# Apply threshold the cropped area
_, thresh = cv2.threshold(cropped, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# Find contours
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
# Get contour with maximum area
c = max(cnts, key=cv2.contourArea)
# Build a mask (same as the code above)
mask = np.zeros(cropped.shape, np.uint8)
new_cropped = cv2.drawContours(mask, [c], 0, 255, -1)
new_cropped = cv2.bitwise_and(cropped, cropped, mask=mask)
# Draw green rectangle for testing
test = cv2.cvtColor(new_cropped, cv2.COLOR_GRAY2BGR)
cv2.drawContours(test, [c], -1, (0, 255, 0), thickness=2)
# Use minAreaRect as fmw42 commented
rect = cv2.minAreaRect(c)
angle = rect[2] # Get angle of the rectangle
# Rotate the cropped rectangle.
rotated_cropped = imutils.rotate(new_cropped, angle + 90)
# Read the text in the "shape"
text = pytesseract.image_to_string(rotated_cropped, config='--psm 3')
print("Extracted text is:\n\n", text)
# Show images for testing:
cv2.imshow('cropped', cropped)
cv2.imshow('thresh', thresh)
cv2.imshow('test', test)
cv2.imshow('rotated_cropped', rotated_cropped)
cv2.waitKey(0)
cv2.destroyAllWindows()
OCR output result:
AB12345
DEPARTMENT OF
INFORMATION
COMMUNICATION
TECHNOLOGY
cropped:
thresh:
test:
rotated_cropped:

Straighten largest line in image

I have a large number of images of food items on a trey, the tray is the largest thing in the picture and it always contains the largest line. i.e.
I had to Photoshop out the items on the trey
i want a script that can align and straighten the trey, like so:
Is this possible using python?
Here's an approach
Convert image to grayscale
Otsu's threshold to get a binary image
Find contours and filter using approximate contour
Perspective transform for top-down view
Rotate image to get correct orientation
After converting to grayscale, we Otsu's threshold to obtain a binary image
Now we find contours on this image and filter using cv2.arcLength() and cv2.approxPolyDP(). The idea is that if the contour has approximately 4 corners, then it must be our desired object. Additional filtering steps could be to use cv2.contourArea() to ensure that only the largest contour is used. Here's the detected contour
From here we perform a perspective transform to get a top-down view
Finally we rotate the image depending on the desired orientation. Here's the result
import cv2
import numpy as np
import imutils
def perspective_transform(image, corners):
def order_corner_points(corners):
# Separate corners into individual points
# Index 0 - top-right
# 1 - top-left
# 2 - bottom-left
# 3 - bottom-right
corners = [(corner[0][0], corner[0][1]) for corner in corners]
top_r, top_l, bottom_l, bottom_r = corners[0], corners[1], corners[2], corners[3]
return (top_l, top_r, bottom_r, bottom_l)
# Order points in clockwise order
ordered_corners = order_corner_points(corners)
top_l, top_r, bottom_r, bottom_l = ordered_corners
# Determine width of new image which is the max distance between
# (bottom right and bottom left) or (top right and top left) x-coordinates
width_A = np.sqrt(((bottom_r[0] - bottom_l[0]) ** 2) + ((bottom_r[1] - bottom_l[1]) ** 2))
width_B = np.sqrt(((top_r[0] - top_l[0]) ** 2) + ((top_r[1] - top_l[1]) ** 2))
width = max(int(width_A), int(width_B))
# Determine height of new image which is the max distance between
# (top right and bottom right) or (top left and bottom left) y-coordinates
height_A = np.sqrt(((top_r[0] - bottom_r[0]) ** 2) + ((top_r[1] - bottom_r[1]) ** 2))
height_B = np.sqrt(((top_l[0] - bottom_l[0]) ** 2) + ((top_l[1] - bottom_l[1]) ** 2))
height = max(int(height_A), int(height_B))
# Construct new points to obtain top-down view of image in
# top_r, top_l, bottom_l, bottom_r order
dimensions = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1],
[0, height - 1]], dtype = "float32")
# Convert to Numpy format
ordered_corners = np.array(ordered_corners, dtype="float32")
# Find perspective transform matrix
matrix = cv2.getPerspectiveTransform(ordered_corners, dimensions)
# Return the transformed image
return cv2.warpPerspective(image, matrix, (width, height))
image = cv2.imread('1.jpg')
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.015 * peri, True)
if len(approx) == 4:
cv2.drawContours(image,[c], 0, (36,255,12), 3)
transformed = perspective_transform(original, approx)
rotated = imutils.rotate_bound(transformed, angle=-90)
cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.imshow('transformed', transformed)
cv2.imshow('rotated', rotated)
cv2.waitKey()

Categories

Resources