Pytesseract-OCR wrong japanese detection - python

I installed Pystesseract 5.0 from Here and marked the Japanese language during installation.
It is not correctly detecting Japanese letters.
Note: the same code is detecting English characters alright.
Here is my code :
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
try:
from PIL import Image
except ImportError:
import Image
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
#################################################################################################
# Read your file
file = 'removed.png'
img = cv2.imread(file, 0)
img.shape
# thresholding the image to a binary image
thresh, img_bin = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# inverting the image
img_bin = 255 - img_bin
# Plotting the image to see the output
plotting = plt.imshow(img_bin, cmap='gray')
plt.show()
# Define a kernel to detect rectangular boxes
# Length(width) of kernel as 100th of total width
kernel_len = np.array(img).shape[1] // 100
# Defining a vertical kernel to detect all vertical lines of image
ver_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_len))
# Defining a horizontal kernel to detect all horizontal lines of image
hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_len, 1))
# A kernel of 2x2
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
#### Vertical LINES ####
# Use vertical kernel to detect and save the vertical lines in a jpg
image_1 = cv2.erode(img_bin, ver_kernel, iterations=5)
vertical_lines = cv2.dilate(image_1, ver_kernel, iterations=45)
# Plot the generated image
plotting = plt.imshow(image_1, cmap='gray')
plt.show()
#### HORTIZONAL LINES ####
# Use horizontal kernel to detect and save the horizontal lines in a jpg
image_2 = cv2.erode(img_bin, hor_kernel, iterations=5)
horizontal_lines = cv2.dilate(image_2, hor_kernel, iterations=45)
# Plot the generated image
plotting = plt.imshow(image_2, cmap='gray')
plt.show()
# Combining both H and V
# Combine horizontal and vertical lines in a new third image, with both having same weight.
img_vh = cv2.addWeighted(vertical_lines, 0.5, horizontal_lines, 0.5, 0.0)
# Eroding and thesholding the image
img_vh = cv2.erode(~img_vh, kernel, iterations=1)
thresh, img_vh = cv2.threshold(img_vh, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
plotting = plt.imshow(img_vh, cmap='gray')
plt.show()
##
bitxor = cv2.bitwise_xor(img, img_vh)
bitnot = cv2.bitwise_not(bitxor)
# Plotting the generated image
plotting = plt.imshow(bitnot, cmap='gray')
plt.show()
# Detect contours for following box detection
contours, hierarchy = cv2.findContours(img_vh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b: b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
# Sort all the contours by top to bottom.
contours, boundingBoxes = sort_contours(contours, method="bottom-to-top")
################
# Creating a list of heights for all detected boxes
heights = [boundingBoxes[i][3] for i in range(len(boundingBoxes))]
# Get mean of heights
mean = np.mean(heights)
# Create list box to store all boxes in
box = []
# Get position (x,y), width and height for every contour and show the contour on image
for c in contours:
x, y, w, h = cv2.boundingRect(c)
if (w > 110 and h < 85):
image = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
box.append([x, y, w, h])
plotting = plt.imshow(image, cmap="cividis")
plt.show()
# Creating two lists to define row and column in which cell is located
row = []
column = []
j = 0
# Sorting the boxes to their respective row and column
for i in range(len(box)):
if (i == 0):
column.append(box[i])
previous = box[i]
else:
if (box[i][1] <= previous[1] + mean / 2):
column.append(box[i])
previous = box[i]
if (i == len(box) - 1):
row.append(column)
else:
row.append(column)
column = []
previous = box[i]
column.append(box[i])
print("Columns:", column)
print("Rows: ", row)
# calculating maximum number of cells
countcol = 0
for i in range(len(row)):
countcol = len(row[i])
if countcol > countcol:
countcol = countcol
print("Max cells: ", countcol)
# Retrieving the center of each column
center = [int(row[i][j][0] + row[i][j][2] / 2) for j in range(len(row[i])) if row[0]]
center = np.array(center)
center.sort()
# Regarding the distance to the columns center, the boxes are arranged in respective order
finalboxes = []
for i in range(len(row)):
lis = []
for k in range(countcol):
lis.append([])
for j in range(len(row[i])):
diff = abs(center - (row[i][j][0] + row[i][j][2] / 4))
minimum = min(diff)
indexing = list(diff).index(minimum)
lis[indexing].append(row[i][j])
finalboxes.append(lis)
# from every single image-based cell/box the strings are extracted via pytesseract and stored in a list
outer = []
for i in range(len(finalboxes)):
for j in range(len(finalboxes[i])):
inner = ''
if (len(finalboxes[i][j]) == 0):
outer.append(' ')
else:
for k in range(len(finalboxes[i][j])):
y, x, w, h = finalboxes[i][j][k][0], finalboxes[i][j][k][1], finalboxes[i][j][k][2], \
finalboxes[i][j][k][3]
finalimg = bitnot[x:x + h, y:y + w]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 1))
border = cv2.copyMakeBorder(finalimg, 2, 2, 2, 2, cv2.BORDER_CONSTANT, value=[255, 255])
resizing = cv2.resize(border, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
dilation = cv2.dilate(resizing, kernel, iterations=1)
erosion = cv2.erode(dilation, kernel, iterations=1)
out = pytesseract.image_to_string(erosion)
if (len(out) == 0):
out = pytesseract.image_to_string(erosion, config='--psm 3')
inner = inner + " " + out
outer.append(inner)
##Printing all detected cells
for element in outer:
if len(element) > 10:
for line in element.splitlines():
if len(line)>2:
print("##", line.strip())
And here is the output for the last segment of code:
## LotNo. |
## fo
## 900-1829-B
## 2021/09/17
## 1, 000
## Wes FE —Cu-ESEIRNI
## LCH2
## 4X17. 05
## TYYY GISA-5AIB
## /448642-2 B
As you can see it is finding difficulty detecting japanese letters

tesseract as default uses only English and you may have to set other language(s) as parameter.
At console you can test it as
tesseract.exe image.png output.txt -l jpn
or even with many languages
tesseract.exe image.png output.txt -l jpn+eng
(instead of output.txt you can use - to display text directly in console)
In code it can be
pytesseract.image_to_string('image.png', config='-l jpn')
pytesseract.image_to_string('image.png', config='-l jpn+eng')
or
pytesseract.image_to_string('image.png', lang='jpn')
pytesseract.image_to_string('image.png', lang='jpn+eng')

Related

Correct the object orientation in the image. Calculate the correct angle of rotation and correct the alignment of the object in the image

I have cropped images of electronic meter reading. Those readings are taken in random style. I need the orientation of the object(not the image) in the image to be aligned.
The detection of contours is not working. As there are lots of contours are formed in the image and in order to calculate the angle I need to select the right contour. Some times contour is not formed.
2.I want set of rotated images as shown in figure above. I tried some code of rotating image from the OpenCV. But due to two type of use case ( as we don't know from code that the reading style may be any of the two) The images are turned out as below.
Using the code below I am able to find the angle of rotation but for any one case. I need it to be done automatically for both type of cases. Also see the data set I have attached for other type of examples.
import cv2
import numpy as np
debug = True
# Display image
def display(img, frameName="OpenCV Image"):
if not debug:
return
h, w = img.shape[0:2]
neww = 800
newh = int(neww*(h/w))
img = cv2.resize(img, (neww, newh))
plt.imshow(img)
plt.show()
# cv2.imshow(frameName, img)
# cv2.waitKey(0)
#rotate the image with given theta value
def rotate(img, theta):
rows, cols = img.shape[0], img.shape[1]
image_center = (cols/2, rows/2)
M = cv2.getRotationMatrix2D(image_center,theta,1)
abs_cos = abs(M[0,0])
abs_sin = abs(M[0,1])
bound_w = int(rows * abs_sin + cols * abs_cos)
bound_h = int(rows * abs_cos + cols * abs_sin)
M[0, 2] += bound_w/2 - image_center[0]
M[1, 2] += bound_h/2 - image_center[1]
# rotate orignal image to show transformation
rotated = cv2.warpAffine(img,M,(bound_w,bound_h),borderValue=(255,255,255))
return rotated
def slope(x1, y1, x2, y2):
if x1 == x2:
return 0
slope = (y2-y1)/(x2-x1)
theta = np.rad2deg(np.arctan(slope))
return theta
def main(filePath):
img = cv2.imread(filePath)
(hi, wi) = img.shape[:2]
textImg = img.copy()
small = cv2.cvtColor(textImg, cv2.COLOR_BGR2GRAY)
# find the gradient map
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
grad = cv2.morphologyEx(small, cv2.MORPH_GRADIENT, kernel)
display(grad)
# Binarize the gradient image
_, bw = cv2.threshold(grad, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
display(bw)
# connect horizontally oriented regions
# kernal value (9,1) can be changed to improved the text detection
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 1))
connected = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
display(connected)
# using RETR_EXTERNAL instead of RETR_CCOMP
# _ , contours, hierarchy = cv2.findContours(connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours, hierarchy = cv2.findContours(connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) #opencv >= 4.0
mask = np.zeros(bw.shape, dtype=np.uint8)
display(mask)
# cumulative theta value
cummTheta = 0
# number of detected text regions
ct = 0
flag=False
for idx in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[idx])
mask[y:y+h, x:x+w] = 0
# fill the contour
cv2.drawContours(mask, contours, idx, (255, 255, 255), -1)
display(mask)
# ratio of non-zero pixels in the filled region
r = float(cv2.countNonZero(mask[y:y+h, x:x+w])) / (w * h)
# assume at least 45% of the area is filled if it contains text
# if r > 0.39 and w > 8 and h > 8:
if (h/hi)>0.4 and (w/wi)>0.4:
flag=True
print(r,w,h)
# cv2.rectangle(textImg, (x1, y), (x+w-1, y+h-1), (0, 255, 0), 2)
rect = cv2.minAreaRect(contours[idx])
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(textImg,[box],0,(0,0,255),2)
center = (int(rect[0][0]),int(rect[0][1]))
width = int(rect[1][0])
height = int(rect[1][1])
angle = int(rect[2])
print(angle)
print(width,height)
if width < height:
angle = 90+angle
print(angle,'final')
# we can filter theta as outlier based on other theta values
# this will help in excluding the rare text region with different orientation from ususla value
theta = slope(box[0][0], box[0][1], box[1][0], box[1][1])
cummTheta += theta
ct +=1
# print("Theta", theta)
# find the average of all cumulative theta value
# orientation = cummTheta/ct
print("Image orientation in degress: ", angle)
finalImage = rotate(img, angle)
display(textImg, "Detectd Text minimum bounding box")
display(finalImage)
out_path='cropped_corrected/rotated/'+filePath.split('\\')[-1]
print(out_path)
cv2.imwrite(out_path,finalImage)
print('image svaed here in rotated')
break
if not flag:
out_path='cropped_corrected/not_rotated/'+filePath.split('\\')[-1]
print(out_path)
cv2.imwrite(out_path,img)
print('image svaed here without rotated')
if __name__ == "__main__":
filePath = 'cropped/N3963001963.jpg'
main(filePath)
I am attaching some sample images that need to be rotated and the object inside the image needs to be aligned:

Python - Detect a QR code from an image and crop using OpenCV

I'm working on a project using Python(3.7) and OpenCV in which I have an Image(captured using the camera) of a document with a QR code placed on it.
This QR code has 6 variables respectively as:
Size of QR code image
Top
Right
Bottom
Left
Unit
Latest Update:
Here are the steps I need to perform in the same order:
Detect the qr code & decode it to read size values
So, if the size of QR-code(image) is not equal to the size which is mentioned inside it then scale the image to equal both size values.
Then crop the image towards all sides from QR code image according to the values mentioned inside qr code.
I have tried this code:
def decodeAndCrop(inputImage):
print(str(inputImage))
image = cv2.imread(str(inputImage))
qrCodeDetector = cv2.QRCodeDetector()
decodedText, points, _ = qrCodeDetector.detectAndDecode(image)
qr_data = decodedText.split(",")
print("qr data from fucntion: {}".format(qr_data))
if points is not None:
pts = len(points)
# print(pts)
for i in range(pts):
nextPointIndex = (i + 1) % pts
if str(inputImage) == "scaled_img.jpg":
cv2.line(
image,
tuple(points[i][0]),
tuple(points[nextPointIndex][0]),
(255, 0, 0),
5,
)
print(points[i][0])
width = int(
math.sqrt(
(points[0][0][0] - points[1][0][0]) ** 2
+ (points[0][0][1] - points[1][0][1]) ** 2
)
)
height = int(
math.sqrt(
(points[1][0][0] - points[2][0][0]) ** 2
+ (points[1][0][1] - points[2][0][1]) ** 2
)
)
print("height and width after scaling: {} {}".format(height, width))
if not str(inputImage) == "scaled_img.jpg":
scaled_img = None
if width == qr_data[0] and height == qr_data[0]:
print("Sizes are equal")
# Add the extension values to points and crop
y = int(points[0][0][1]) - int(qr_data[1])
x = int(points[0][0][0]) - int(qr_data[4])
roi = image[
y : y + height + int(qr_data[3]), x : x + width + int(qr_data[2])
]
scaled_img = cv2.imwrite("scaled_img.jpg", roi)
return scaled_img
else:
print(
"Width and height "
+ str(width)
+ "x"
+ str(height)
+ " not equal to "
+ str(qr_data[0])
+ "x"
+ str(qr_data[0])
)
if height > int(qr_data[0]):
scale_width = int(width) - int(qr_data[0])
scale_height = int(height) - int(qr_data[0])
print(f"scaled width: {scale_width} scaled height: {scale_height}")
dimension = (scale_width, scale_height)
scaled_img = cv2.resize(
image, dimension, interpolation=cv2.INTER_AREA
)
print("new img dims: {}".format(scaled_img.shape))
cv2.imshow("scaled image:", scaled_img)
cv2.imwrite("scaled_img.jpg", scaled_img)
elif height < int(qr_data[0]):
scale_width = int(qr_data[0]) - width
scale_height = int(qr_data[0] - height)
print(f"scaled width: {scale_width} scaled height: {scale_height}")
dimension = (scale_width, scale_height)
scaled_img = cv2.resize(
image, dimension, interpolation=cv2.INTER_AREA
)
print("new img dims: {}".format(scaled_img.shape))
cv2.imshow("scaled image:", scaled_img)
cv2.imwrite("scaled_img.jpg", scaled_img)
cv2.imshow("final output:", roi)
return scaled_img
else:
y = int(points[0][0][1]) - int(qr_data[1])
x = int(points[0][0][0]) - int(qr_data[4])
print(" x and y")
print(x)
print(y)
roi = image[
y : y + height + int(qr_data[3]), x : x + width + int(qr_data[2])
]
final_img = cv2.imwrite("finalized_image.jpg", roi)
cv2.imshow("finalized image:", final_img)
return final_img
if __name__ == "__main__":
image_to_crop = decodeAndCrop("example_input_1.jpg")
final_image = decodeAndCrop("scaled_img.jpg")
cv2.imshow("Cropped:", image_to_crop)
# cv2.imshow("Final: ", final_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
The code above gives an error as:
final_img = cv2.imwrite("finalized_image.jpg", roi)
cv2.error: OpenCV(4.2.0) /Users/travis/build/skvark/opencv-python/opencv/modules/imgcodecs/src/loadsave.cpp:715: error: (-215:Assertion failed) !_img.empty() in function 'imwrite'
End of Latest Update:
An example decoded information of a QR code is as: 100, 20, 40, 60, 20, px
Now, I need to detect the QR code from this document image and in the first step I need to compare the size of QR code in captured image of document with the size which is mentioned in the decoded information for example if in the captured image the size of the QR image is 90X90px and the size from decoded info is 100X100px we need to compare that.
Then, in the second step I have to crop the complete image by using the Top, Right, Bottom & Left variables accordingly. According to the above example we need to crop the image from the position of detected QR code to 20px Top, 40px Right, 60px Bottom and 20px Right. I have added an example Image below.
I have done to decode the QR code information but how can I take the detected QR code area as a seprate image and compare it's size with the mentioned size and then crop the Image accordingly?
Here's what I have tried so far:
import cv2
image = cv2.imread('/Users/abdul/PycharmProjects/QScanner/images/second.jpg')
qrCodeDetector = cv2.QRCodeDetector()
decodedText, points, _ = qrCodeDetector.detectAndDecode(image)
qr_data = decodedText.split(',')
qr_size = qr_data[0]
top = qr_data[1]
right = qr_data[2]
bottom = qr_data[3]
left = qr_data[4]
print(f'Size: {qr_size}' + str(qr_data[5]))
print(f'Top: {top}')
print(f'Right: {right}')
print(f'Bottom: {bottom}')
print(f'Left: {left}')
if points is not None:
pts = len(points)
print(pts)
for i in range(pts):
nextPointIndex = (i+1) % pts
cv2.line(image, tuple(points[i][0]), tuple(points[nextPointIndex][0]), (255,0,0), 5)
print(points[i][0])
print(decodedText)
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("QR code not detected")
Here's an example Image:
and here's a sample of input image:
Here's a simple approach using thresholding, morphological operations, and contour filtering.
Obtain binary image. Load image, grayscale, Gaussian blur, Otsu's threshold
Connect individual QR contours. Create a rectangular structuring kernel with cv2.getStructuringElement() then perform morphological operations with cv2.MORPH_CLOSE.
Filter for QR code. Find contours
and filter using contour approximation, contour area, and aspect ratio.
Detected QR code
Extracted QR code
From here you can compare the QR code with your reference information
Code
import cv2
import numpy as np
# Load imgae, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread('1.jpg')
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (9,9), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Morph close
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)
# Find contours and filter for QR code
cnts = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
x,y,w,h = cv2.boundingRect(approx)
area = cv2.contourArea(c)
ar = w / float(h)
if len(approx) == 4 and area > 1000 and (ar > .85 and ar < 1.3):
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 3)
ROI = original[y:y+h, x:x+w]
cv2.imwrite('ROI.png', ROI)
cv2.imshow('thresh', thresh)
cv2.imshow('close', close)
cv2.imshow('image', image)
cv2.imshow('ROI', ROI)
cv2.waitKey()
I got the width and height data using points and compare it with the qr_data size. Then cropped the QR according to needed.
import cv2
import math
image = cv2.imread('/ur/image/directory/qr.jpg')
qrCodeDetector = cv2.QRCodeDetector()
decodedText, points, _ = qrCodeDetector.detectAndDecode(image)
qr_data = decodedText.split(',')
qr_size = qr_data[0]
top = qr_data[1]
right = qr_data[2]
bottom = qr_data[3]
left = qr_data[4]
if points is not None:
pts = len(points)
print(pts)
for i in range(pts):
nextPointIndex = (i+1) % pts
cv2.line(image, tuple(points[i][0]), tuple(points[nextPointIndex][0]), (255,0,0), 5)
print(points[i][0])
width = int(math.sqrt((points[0][0][0]-points[1][0][0])**2 + (points[0][0][1]-points[1][0][1])**2))
height = int(math.sqrt((points[1][0][0]-points[2][0][0])**2 + (points[1][0][1]-points[2][0][1])**2))
# Compare the size
if(width==qr_data[0] and height==qr_data[0]):
print("Sizes are equal")
else:
print("Width and height " + str(width) + "x" + str(height) + " not equal to "
+ str(qr_data[0]) + "x" + str(qr_data[0]))
# Add the extension values to points and crop
y = int(points[0][0][1]) - int(qr_data[1])
x = int(points[0][0][0]) - int(qr_data[4])
roi = image[y:y+height + int(qr_data[3]), x:x+width + int(qr_data[2])]
print(decodedText)
cv2.imshow("Image", image)
cv2.imshow("Crop", roi)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("QR code not detected")
Result:
So, you mainly have 3 problems here.
If the image is rotated with an angle \theta,
If the sheet is one a plane. (i.e., in the images, the upper line doesn't seem to be linear. But it should not be a big deal.)
The black borders. Will you always have those or may it be a different background? This is important because without cropping out those, you won't be able to get a reasonable result.
I improved your code a little bit and removed the border pixels:
import cv2
import matplotlib.pyplot as plt
import math
import numpy as np
image = cv2.imread('/Users/samettaspinar/Public/im.jpg')
qrCodeDetector = cv2.QRCodeDetector()
decodedText, points, _ = qrCodeDetector.detectAndDecode(image)
qr_data = decodedText.split(',')
qr_size = int(qr_data[0])
top = int(qr_data[1])
right = int(qr_data[2])
bottom = int(qr_data[3])
left = int(qr_data[4])
print(f'Size: {qr_size}' + str(qr_data[5]))
print(f'Top: {top}')
print(f'Right: {right}')
print(f'Bottom: {bottom}')
print(f'Left: {left}')
plt.imshow(image)
plt.show()
dists = [] #This is for estimating distances between corner points.
#I will average them to find ratio of pixels in image vs qr_size
#in the optimal case, all dists should be equal
if points is not None:
pts = len(points)
for i in range(pts):
p1 = points[i][0]
p2 = points[(i+1) % pts][0]
dists.append(math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2))
print('line', tuple(p1), tuple(p2))
image = cv2.line(image, tuple(p1), tuple(p2), (255,0,0), 5)
else:
print("QR code not detected")
print('distances: ', dists)
# Remove the black border pixels. I had a simple idea for this
# Get the average intensity of the gray image
# If count the row average of the first half that are less than intensity/2.
# It approx gives number of black borders on the left. etc.
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
inten = np.mean(gray)
x = np.mean(gray, axis=0) # finds the vertical average
y = np.mean(gray, axis=1) # finds horizontal average
bl_left = np.sum([x[:int(col/2)] < inten/2])
bl_right = np.sum([x[int(col/2)+1:] < inten/2])
bl_top = np.sum([y[:int(row/2)] < inten/2])
bl_bottom = np.sum([y[int(row/2)+1:] < inten/2])
print('black margins: ', bl_left, bl_right, bl_top, bl_bottom)
# Estimate how many pixel you will crop out
ratio = np.mean(dists)/ int(qr_size)
print('actual px / qr_size in px: ', ratio)
row,col,dim = image.shape
top, left, right, bottom = int(top*ratio), int(left*ratio), int(right*ratio), int(bottom*ratio)
top += bl_top
left += bl_left
right += bl_right
bottom += bl_bottom
print('num pixels to be cropped: ', top, left, right, bottom)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image2 = image[top:row-bottom, left:col-right, :]
plt.imshow(image2)
plt.show()
Notice that I ignored the rotation issue. If there is rotation, you can find the angle by calculating the tangents/arctan where I calculated the distances.
For QR detection and parsing
import cv2
import sys
filename = sys.argv[1]
# read the QRCODE image
#in case if QR code is not black/white it is better to convert it into grayscale
img = cv2.imread(filename, 0)# Zero means grayscale
img_origin = cv2.imread(filename)
# initialize the cv2 QRCode detector
detector = cv2.QRCodeDetector()
# detect and decode
data, bbox, straight_qrcode = detector.detectAndDecode(img)
# if there is a QR code
if bbox is not None:
print(f"QRCode data:\n{data}")
# display the image with lines
# length of bounding box
n_lines = len(bbox[0])#Cause bbox = [[[float, float]]], we need to convert fload into int and loop over the first element of array
bbox1 = bbox.astype(int) #Float to Int conversion
for i in range(n_lines):
# draw all lines
point1 = tuple(bbox1[0, [i][0]])
point2 = tuple(bbox1[0, [(i+1) % n_lines][0]])
cv2.line(img_origin, point1, point2, color=(255, 0, 0), thickness=2)
# display the result
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("QR code not detected")

How to get the cells of a sudoku grid with OpenCV?

I've been trying for the last few days to get a sudoku grid from a picture, and I have been struggling on getting the smaller squares of the grid.
I am working on the picture below. I thought processing the image with a canny filter would work fine, but it didn't and I couldn't get every contour of each square. I then put adaptive threshold, otsu, and a classic thresholding to the test, but every time, it just could not seem to capture every small square.
The final goal is to get the cells containing a number, and recognize the numbers with pytorch, so I would really like to have some clean images of the numbers, so the recognition doesn't screw up :)
Would anyone have an idea on how to achieve this?
Thanks a lot in advance! :D
Here's a potential solution:
Obtain binary image. Convert image to grayscale
and adaptive threshold
Filter out all numbers and noise to isolate only boxes. We filter using contour area to remove the numbers since we only want each individual cell
Fix grid lines. Perform morphological closing
with a horizontal and vertical kernel
to repair grid lines.
Sort each cell in top-to-bottom and left-to-right order. We organize each cell into a sequential order using imutils.contours.sort_contours() with the top-to-bottom and left-to-right parameter
Here's the initial binary image (left) and filtered out numbers + repaired grid lines + inverted image (right)
Here's a visualization of the iteration of each cell
The detected numbers in each cell
Code
import cv2
from imutils import contours
import numpy as np
# Load image, grayscale, and adaptive threshold
image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,57,5)
# Filter out all numbers and noise to isolate only boxes
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
if area < 1000:
cv2.drawContours(thresh, [c], -1, (0,0,0), -1)
# Fix horizontal and vertical lines
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,5))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, vertical_kernel, iterations=9)
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,1))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, horizontal_kernel, iterations=4)
# Sort by top to bottom and each row by left to right
invert = 255 - thresh
cnts = cv2.findContours(invert, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
(cnts, _) = contours.sort_contours(cnts, method="top-to-bottom")
sudoku_rows = []
row = []
for (i, c) in enumerate(cnts, 1):
area = cv2.contourArea(c)
if area < 50000:
row.append(c)
if i % 9 == 0:
(cnts, _) = contours.sort_contours(row, method="left-to-right")
sudoku_rows.append(cnts)
row = []
# Iterate through each box
for row in sudoku_rows:
for c in row:
mask = np.zeros(image.shape, dtype=np.uint8)
cv2.drawContours(mask, [c], -1, (255,255,255), -1)
result = cv2.bitwise_and(image, mask)
result[mask==0] = 255
cv2.imshow('result', result)
cv2.waitKey(175)
cv2.imshow('thresh', thresh)
cv2.imshow('invert', invert)
cv2.waitKey()
Note: The sorting idea was adapted from an old previous answer in Rubrik cube solver color extraction.
Steps:
Image PreProcessing ( closing operation )
Finding Sudoku Square and Creating Mask Image
Finding Vertical lines
Finding Horizontal Lines
Finding Grid Points
Correcting the defects
Extracting the digits from each cell
Code:
# ==========import the necessary packages============
import imutils
import numpy as np
import cv2
from transform import four_point_transform
from PIL import Image
import pytesseract
import math
from skimage.filters import threshold_local
# =============== For Transformation ==============
def order_points(pts):
"""initialzie a list of coordinates that will be ordered
such that the first entry in the list is the top-left,
the second entry is the top-right, the third is the
bottom-right, and the fourth is the bottom-left"""
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
############## To show image ##############
def show_image(img,title):
cv2.imshow(title, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def find_largest_feature(inp_img, scan_tl=None, scan_br=None):
"""
Uses the fact the `floodFill` function returns a bounding box of the area it filled to find the biggest
connected pixel structure in the image. Fills this structure in white, reducing the rest to black.
"""
img = inp_img.copy() # Copy the image, leaving the original untouched
height, width = img.shape[:2]
max_area = 0
seed_point = (None, None)
if scan_tl is None:
scan_tl = [0, 0]
if scan_br is None:
scan_br = [width, height]
# Loop through the image
for x in range(scan_tl[0], scan_br[0]):
for y in range(scan_tl[1], scan_br[1]):
# Only operate on light or white squares
if img.item(y, x) == 255 and x < width and y < height: # Note that .item() appears to take input as y, x
area = cv2.floodFill(img, None, (x, y), 64)
if area[0] > max_area: # Gets the maximum bound area which should be the grid
max_area = area[0]
seed_point = (x, y)
# Colour everything grey (compensates for features outside of our middle scanning range
for x in range(width):
for y in range(height):
if img.item(y, x) == 255 and x < width and y < height:
cv2.floodFill(img, None, (x, y), 64)
mask = np.zeros((height + 2, width + 2), np.uint8) # Mask that is 2 pixels bigger than the image
# Highlight the main feature
if all([p is not None for p in seed_point]):
cv2.floodFill(img, mask, seed_point, 255)
for x in range(width):
for y in range(height):
if img.item(y, x) == 64: # Hide anything that isn't the main feature
cv2.floodFill(img, mask, (x, y), 0)
return img
################# Preprocessing of sudoku image ###############
def preprocess(image,case):
ratio = image.shape[0] / 500.0
orig = image.copy()
image = imutils.resize(image, height = 500)
if case == True:
gray = cv2.GaussianBlur(image,(5,5),0)
gray = cv2.cvtColor(gray,cv2.COLOR_BGR2GRAY)
mask = np.zeros((gray.shape),np.uint8)
kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11))
close = cv2.morphologyEx(gray,cv2.MORPH_CLOSE,kernel1)
div = np.float32(gray)/(close)
res = np.uint8(cv2.normalize(div,div,0,255,cv2.NORM_MINMAX))
res2 = cv2.cvtColor(res,cv2.COLOR_GRAY2BGR)
edged = cv2.Canny(res, 75, 200)
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
# loop over the contours
for c in cnts:
# approximate the contour
rect = cv2.boundingRect(c)
area = cv2.contourArea(c)
cv2.rectangle(edged.copy(), (rect[0],rect[1]), (rect[2]+rect[0],rect[3]+rect[1]), (0,0,0), 2)
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points, then we
# can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
#print(screenCnt)
break
# show the contour (outline) of the piece of paper
#print(screenCnt)
cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
# apply the four point transform to obtain a top-down
# view of the original image
warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
warped1 = cv2.resize(warped,(610,610))
warp = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
T = threshold_local(warp, 11, offset = 10, method = "gaussian")
warp = (warp > T).astype("uint8") * 255
th3 = cv2.adaptiveThreshold(warp,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY_INV,11,2)
kernel = np.ones((5,5),np.uint8)
dilation =cv2.GaussianBlur(th3,(5,5),0)
else :
warped = image
warped1 = cv2.resize(warped,(610,610))
warp = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
T = threshold_local(warp, 11, offset = 10, method = "gaussian")
warp = (warp > T).astype("uint8") * 255
th3 = cv2.adaptiveThreshold(warp,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY_INV,11,2)
#show_image(warped1,"preprocessed")
return th3,warped1,warped
def grids(img,warped2):
#print("im:",img.shape)
img2 = img.copy()
img = np.zeros((500,500,3), np.uint8)
ratio2 = 3
kernel_size = 3
lowThreshold = 30
frame = img
img = cv2.resize(frame,(610,610))
for i in range(10):
cv2.line(img, (0,(img.shape[0]//9)*i),(img.shape[1],(img.shape[0]//9)*i), (255, 255, 255), 3, 1)
cv2.line(warped2, (0,(img.shape[0]//9)*i),(img.shape[1],(img.shape[0]//9)*i), (125, 0, 55), 3, 1)
for j in range(10):
cv2.line(img, ((img.shape[1]//9)*j, 0), ((img.shape[1]//9)*j, img.shape[0]), (255, 255, 255), 3, 1)
cv2.line(warped2, ((img.shape[1]//9)*j, 0), ((img.shape[1]//9)*j, img.shape[0]), (125, 0, 55), 3, 1)
#show_image(warped2,"grids")
return img
############### Finding out the intersection pts to get the grids #########
def grid_points(img,warped2):
img1 = img.copy()
kernelx = cv2.getStructuringElement(cv2.MORPH_RECT,(2,10))
dx = cv2.Sobel(img,cv2.CV_16S,1,0)
dx = cv2.convertScaleAbs(dx)
c=cv2.normalize(dx,dx,0,255,cv2.NORM_MINMAX)
c = cv2.morphologyEx(c,cv2.MORPH_DILATE,kernelx,iterations = 1)
cy = cv2.cvtColor(c,cv2.COLOR_BGR2GRAY)
closex = cv2.morphologyEx(cy,cv2.MORPH_DILATE,kernelx,iterations = 1)
kernely = cv2.getStructuringElement(cv2.MORPH_RECT,(10,2))
dy = cv2.Sobel(img,cv2.CV_16S,0,2)
dy = cv2.convertScaleAbs(dy)
c = cv2.normalize(dy,dy,0,255,cv2.NORM_MINMAX)
c = cv2.morphologyEx(c,cv2.MORPH_DILATE,kernely,iterations = 1)
cy = cv2.cvtColor(c,cv2.COLOR_BGR2GRAY)
closey = cv2.morphologyEx(cy,cv2.MORPH_DILATE,kernelx,iterations = 1)
res = cv2.bitwise_and(closex,closey)
#gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(res,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
kernel = np.ones((6,6),np.uint8)
# Perform morphology
se = np.ones((8,8), dtype='uint8')
image_close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, se)
image_close = cv2.morphologyEx(image_close, cv2.MORPH_OPEN, kernel)
contour, hier = cv2.findContours (image_close,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contour, key=cv2.contourArea, reverse=True)[:100]
centroids = []
for cnt in cnts:
mom = cv2.moments(cnt)
(x,y) = int(mom['m10']/mom['m00']), int(mom['m01']/mom['m00'])
cv2.circle(img1,(x,y),4,(0,255,0),-1)
cv2.circle(warped2,(x,y),4,(0,255,0),-1)
centroids.append((x,y))
#show_image(warped2,"grid_points")
Points = np.array(centroids,dtype = np.float32)
c = Points.reshape((100,2))
c2 = c[np.argsort(c[:,1])]
b = np.vstack([c2[i*10:(i+1)*10][np.argsort(c2[i*10:(i+1)*10,0])] for i in range(10)])
bm = b.reshape((10,10,2))
return c2,bm,cnts
############ Recognize digit images to number #############
def image_to_num(c2):
img = 255-c2
text = pytesseract.image_to_string(img, lang="eng",config='--psm 6 --oem 3') #builder=builder)
return list(text)[0]
###### To get the digit at the particular cell #############
def get_digit(c2,bm,warped1,cnts):
num = []
centroidx = np.empty((9, 9))
centroidy = np.empty((9, 9))
global list_images
list_images = []
for i in range(0,9):
for j in range(0,9):
x1,y1 = bm[i][j] # bm[0] row1
x2,y2 = bm[i+1][j+1]
coordx = ((x1+x2)//2)
coordy = ((y1+y2)//2)
centroidx[i][j] = coordx
centroidy[i][j] = coordy
crop = warped1[int(x1):int(x2),int(y1):int(y2)]
crop = imutils.resize(crop, height=69,width=67)
c2 = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)
c2 = cv2.adaptiveThreshold(c2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY_INV,11,2)
kernel = np.ones((2,2),np.uint8)
#c2 = cv2.morphologyEx(c2, cv2.MORPH_OPEN, kernel)
c2= cv2.copyMakeBorder(c2,5,5,5,5,cv2.BORDER_CONSTANT,value=(0,0,0))
no = 0
shape=c2.shape
w=shape[1]
h=shape[0]
mom = cv2.moments(c2)
(x,y) = int(mom['m10']/mom['m00']), int(mom['m01']/mom['m00'])
c2 = c2[14:70,15:62]
contour, hier = cv2.findContours (c2,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
if cnts is not None:
cnts = sorted(contour, key=cv2.contourArea,reverse=True)[:1]
for cnt in cnts:
x,y,w,h = cv2.boundingRect(cnt)
aspect_ratio = w/h
# print(aspect_ratio)
area = cv2.contourArea(cnt)
#print(area)
if area>120 and cnt.shape[0]>15 and aspect_ratio>0.2 and aspect_ratio<=0.9 :
#print("area:",area)
c2 = find_largest_feature(c2)
#show_image(c2,"box2")
contour, hier = cv2.findContours (c2,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contour, key=cv2.contourArea,reverse=True)[:1]
for cnt in cnts:
rect = cv2.boundingRect(cnt)
#cv2.rectangle(c2, (rect[0],rect[1]), (rect[2]+rect[0],rect[3]+rect[1]), (255,255,255), 2)
c2 = c2[rect[1]:rect[3]+rect[1],rect[0]:rect[2]+rect[0]]
c2= cv2.copyMakeBorder(c2,5,5,5,5,cv2.BORDER_CONSTANT,value=(0,0,0))
list_images.append(c2)
#show_image(c2,"box")
no = image_to_num(c2)
num.append(no)
centroidx = np.transpose(centroidx)
centroidy = np.transpose(centroidy)
return c2, num, centroidx, centroidy
######## creating matrix and filling numbers exist in the orig image #######
def sudoku_matrix(num):
c = 0
grid = np.empty((9, 9))
for i in range(9):
for j in range(9):
grid[i][j] = int(num[c])
c += 1
grid = np.transpose(grid)
return grid
######## Creating board to show the puzzle result in terminal##############
def board(arr):
for i in range(9):
if i%3==0 :
print("+",end="")
print("-------+"*3)
for j in range(9):
if j%3 ==0 :
print("",end="| ")
print(int(arr[i][j]),end=" ")
print("",end="|")
print()
print("+",end="")
print("-------+"*3)
return arr
def check_col(arr,num,col):
if all([num != arr[i][col] for i in range(9)]):
return True
return False
def check_row(arr,num,row):
if all([num != arr[row][i] for i in range(9)]):
return True
return False
def check_cell(arr,num,row,col):
sectopx = 3 * (row//3)
sectopy = 3 * (col//3)
for i in range(sectopx, sectopx+3):
for j in range(sectopy, sectopy+3):
if arr[i][j] == num:
return True
return False
def empty_loc(arr,l):
for i in range(9):
for j in range(9):
if arr[i][j] == 0:
l[0]=i
l[1]=j
return True
return False
#### Solving sudoku by back tracking############
def sudoku(arr):
l=[0,0]
if not empty_loc(arr,l):
return True
row = l[0]
col = l[1]
for num in range(1,10):
if check_row(arr,num,row) and check_col(arr,num,col) and not check_cell(arr,num,row,col):
arr[row][col] = int(num)
if(sudoku(arr)):
return True
# failure, unmake & try again
arr[row][col] = 0
return False
def overlay(arr,num,img,cx,cy):
no = -1
for i in range(9):
for j in range(9):
no += 1
#cv2.putText(img,str(no), (int(cx[i][j]),int(cy[i][j])),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
if num[no] == 0:
cv2.putText(img,str(int(arr[j][i])), (int(cx[i][j]-4),int(cy[i][j])+8),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 4)
cv2.imshow("Sudoku",img)
cv2.waitKey(0)
case = "False" # If transformation is required set True
image = cv2.imread("QupKb.png")
th3,warped1,warped = preprocess(image,case)
warped2 = warped1.copy()
img = grids(warped,warped2)
c2,bm,cnts = grid_points(img,warped2)
c2,num,cx,cy = get_digit(c2,bm,warped1,cnts)
grid = sudoku_matrix(num)
if(sudoku(grid)):
arr = board(grid)
overlay(arr,num,warped1,cx,cy)
else:
print("There is no solution")
warped:
th3:
warped2:
sudoku result:
All the extracted digits:
########## To view all the extracted digits ###############
_, axs = plt.subplots(1, len(list_images), figsize=(24, 24))
axs = axs.flatten()
for img, ax in zip(list_images, axs):
ax.imshow(cv2.resize(img,(64,64)))
plt.show()
References:
grid
points
get features (of
digits)
Images
samples
If image contains just the tightly fitted sudoku grid, one crude way to achieve it would be to divide image into equal 9X9 grid and then try to extract number in each of that grid.

How to find rotate and crop a section of text in openCV, python

I'm in a struggle with a project that takes an image of a pretty clear font from say a label for example reads the "text region" and outputs it as a string using OCR tesseract for instance.
Now I've made quite some progress with the thing as I added varios global filters to get to a quite clear result but I'm struggling with finding method of filtering just the text out of there and then you have to think about rotating it to be as horizontal as possible and then after that the easy part should be to crop it.
May I have any leads to how to do that not using traning data and over complicating the system sins I only use a rasdpberry pi to do the computing?
Thanks for helping here's what I've came up with so far:
Original Image(Captured from PiCamera):
Adaptive thresh after shadow removal:
[
Glocad tresh after shadow removal:
Here's the code:
# import the necessary packages
from PIL import Image
import pytesseract
import argparse
import cv2
import os
import picamera
import time
import numpy as np
#preprocess = "tresh"
#Remaining textcorping and rotating:
import math
import json
from collections import defaultdict
from scipy.ndimage.filters import rank_filter
def dilate(ary, N, iterations):
"""Dilate using an NxN '+' sign shape. ary is np.uint8."""
kernel = np.zeros((N,N), dtype=np.uint8)
kernel[(N-1)/2,:] = 1
dilated_image = cv2.dilate(ary / 255, kernel, iterations=iterations)
kernel = np.zeros((N,N), dtype=np.uint8)
kernel[:,(N-1)/2] = 1
dilated_image = cv2.dilate(dilated_image, kernel, iterations=iterations)
return dilated_image
def props_for_contours(contours, ary):
"""Calculate bounding box & the number of set pixels for each contour."""
c_info = []
for c in contours:
x,y,w,h = cv2.boundingRect(c)
c_im = np.zeros(ary.shape)
cv2.drawContours(c_im, [c], 0, 255, -1)
c_info.append({
'x1': x,
'y1': y,
'x2': x + w - 1,
'y2': y + h - 1,
'sum': np.sum(ary * (c_im > 0))/255
})
return c_info
def union_crops(crop1, crop2):
"""Union two (x1, y1, x2, y2) rects."""
x11, y11, x21, y21 = crop1
x12, y12, x22, y22 = crop2
return min(x11, x12), min(y11, y12), max(x21, x22), max(y21, y22)
def intersect_crops(crop1, crop2):
x11, y11, x21, y21 = crop1
x12, y12, x22, y22 = crop2
return max(x11, x12), max(y11, y12), min(x21, x22), min(y21, y22)
def crop_area(crop):
x1, y1, x2, y2 = crop
return max(0, x2 - x1) * max(0, y2 - y1)
def find_border_components(contours, ary):
borders = []
area = ary.shape[0] * ary.shape[1]
for i, c in enumerate(contours):
x,y,w,h = cv2.boundingRect(c)
if w * h > 0.5 * area:
borders.append((i, x, y, x + w - 1, y + h - 1))
return borders
def angle_from_right(deg):
return min(deg % 90, 90 - (deg % 90))
def remove_border(contour, ary):
"""Remove everything outside a border contour."""
# Use a rotated rectangle (should be a good approximation of a border).
# If it's far from a right angle, it's probably two sides of a border and
# we should use the bounding box instead.
c_im = np.zeros(ary.shape)
r = cv2.minAreaRect(contour)
degs = r[2]
if angle_from_right(degs) <= 10.0:
box = cv2.cv.BoxPoints(r)
box = np.int0(box)
cv2.drawContours(c_im, [box], 0, 255, -1)
cv2.drawContours(c_im, [box], 0, 0, 4)
else:
x1, y1, x2, y2 = cv2.boundingRect(contour)
cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)
return np.minimum(c_im, ary)
def find_components(edges, max_components=16):
"""Dilate the image until there are just a few connected components.
Returns contours for these components."""
# Perform increasingly aggressive dilation until there are just a few
# connected components.
count = 21
dilation = 5
n = 1
while count > 16:
n += 1
dilated_image = dilate(edges, N=3, iterations=n)
contours, hierarchy = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
count = len(contours)
#print dilation
#Image.fromarray(edges).show()
#Image.fromarray(255 * dilated_image).show()
return contours
def find_optimal_components_subset(contours, edges):
"""Find a crop which strikes a good balance of coverage/compactness.
Returns an (x1, y1, x2, y2) tuple.
"""
c_info = props_for_contours(contours, edges)
c_info.sort(key=lambda x: -x['sum'])
total = np.sum(edges) / 255
area = edges.shape[0] * edges.shape[1]
c = c_info[0]
del c_info[0]
this_crop = c['x1'], c['y1'], c['x2'], c['y2']
crop = this_crop
covered_sum = c['sum']
while covered_sum < total:
changed = False
recall = 1.0 * covered_sum / total
prec = 1 - 1.0 * crop_area(crop) / area
f1 = 2 * (prec * recall / (prec + recall))
#print '----'
for i, c in enumerate(c_info):
this_crop = c['x1'], c['y1'], c['x2'], c['y2']
new_crop = union_crops(crop, this_crop)
new_sum = covered_sum + c['sum']
new_recall = 1.0 * new_sum / total
new_prec = 1 - 1.0 * crop_area(new_crop) / area
new_f1 = 2 * new_prec * new_recall / (new_prec + new_recall)
# Add this crop if it improves f1 score,
# _or_ it adds 25% of the remaining pixels for <15% crop expansion.
# ^^^ very ad-hoc! make this smoother
remaining_frac = c['sum'] / (total - covered_sum)
new_area_frac = 1.0 * crop_area(new_crop) / crop_area(crop) - 1
if new_f1 > f1 or (
remaining_frac > 0.25 and new_area_frac < 0.15):
print '%d %s -> %s / %s (%s), %s -> %s / %s (%s), %s -> %s' % (
i, covered_sum, new_sum, total, remaining_frac,
crop_area(crop), crop_area(new_crop), area, new_area_frac,
f1, new_f1)
crop = new_crop
covered_sum = new_sum
del c_info[i]
changed = True
break
if not changed:
break
return crop
def pad_crop(crop, contours, edges, border_contour, pad_px=15):
"""Slightly expand the crop to get full contours.
This will expand to include any contours it currently intersects, but will
not expand past a border.
"""
bx1, by1, bx2, by2 = 0, 0, edges.shape[0], edges.shape[1]
if border_contour is not None and len(border_contour) > 0:
c = props_for_contours([border_contour], edges)[0]
bx1, by1, bx2, by2 = c['x1'] + 5, c['y1'] + 5, c['x2'] - 5, c['y2'] - 5
def crop_in_border(crop):
x1, y1, x2, y2 = crop
x1 = max(x1 - pad_px, bx1)
y1 = max(y1 - pad_px, by1)
x2 = min(x2 + pad_px, bx2)
y2 = min(y2 + pad_px, by2)
return crop
crop = crop_in_border(crop)
c_info = props_for_contours(contours, edges)
changed = False
for c in c_info:
this_crop = c['x1'], c['y1'], c['x2'], c['y2']
this_area = crop_area(this_crop)
int_area = crop_area(intersect_crops(crop, this_crop))
new_crop = crop_in_border(union_crops(crop, this_crop))
if 0 < int_area < this_area and crop != new_crop:
print '%s -> %s' % (str(crop), str(new_crop))
changed = True
crop = new_crop
if changed:
return pad_crop(crop, contours, edges, border_contour, pad_px)
else:
return crop
def downscale_image(im, max_dim=2048):
"""Shrink im until its longest dimension is <= max_dim.
Returns new_image, scale (where scale <= 1).
"""
a, b = im.size
if max(a, b) <= max_dim:
return 1.0, im
scale = 1.0 * max_dim / max(a, b)
new_im = im.resize((int(a * scale), int(b * scale)), Image.ANTIALIAS)
return scale, new_im
def process_image(inputImg):
opnImg = Image.open(inputImg)
scale, im = downscale_image(opnImg)
edges = cv2.Canny(np.asarray(im), 100, 200)
# TODO: dilate image _before_ finding a border. This is crazy sensitive!
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
borders = find_border_components(contours, edges)
borders.sort(key=lambda (i, x1, y1, x2, y2): (x2 - x1) * (y2 - y1))
border_contour = None
if len(borders):
border_contour = contours[borders[0][0]]
edges = remove_border(border_contour, edges)
edges = 255 * (edges > 0).astype(np.uint8)
# Remove ~1px borders using a rank filter.
maxed_rows = rank_filter(edges, -4, size=(1, 20))
maxed_cols = rank_filter(edges, -4, size=(20, 1))
debordered = np.minimum(np.minimum(edges, maxed_rows), maxed_cols)
edges = debordered
contours = find_components(edges)
if len(contours) == 0:
print '%s -> (no text!)' % path
return
crop = find_optimal_components_subset(contours, edges)
crop = pad_crop(crop, contours, edges, border_contour)
crop = [int(x / scale) for x in crop] # upscale to the original image size.
#draw = ImageDraw.Draw(im)
#c_info = props_for_contours(contours, edges)
#for c in c_info:
# this_crop = c['x1'], c['y1'], c['x2'], c['y2']
# draw.rectangle(this_crop, outline='blue')
#draw.rectangle(crop, outline='red')
#im.save(out_path)
#draw.text((50, 50), path, fill='red')
#orig_im.save(out_path)
#im.show()
text_im = opnImg.crop(crop)
text_im.save('Cropted_and_rotated_image.jpg')
return text_im
'''
text_im.save(out_path)
print '%s -> %s' % (path, out_path)
'''
#Camera capturing stuff:
myCamera = picamera.PiCamera()
myCamera.vflip = True
myCamera.hflip = True
'''
myCamera.start_preview()
time.sleep(6)
myCamera.stop_preview()
'''
myCamera.capture("Captured_Image.png")
#End capturing persidure
imgAddr = '/home/pi/My_examples/Mechanical_display_converter/Example1.jpg'
#imgAddr = "Captured_Image.png"
# construct the argument parse and parse the arguments
#ap = argparse.ArgumentParser()
'''
ap.add_argument("-i", "--image", required=True,
help="path to input image to be OCR'd")
ap.add_argument("-p", "--preprocess", type=str, default="thresh",
help="type of preprocessing to be done")
args = vars(ap.parse_args())
'''
# load the example image and convert it to grayscale
img = cv2.imread(imgAddr)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('Step1_gray_filter', gray)
'''
# check to see if we should apply thresholding to preprocess the
# image
if args["preprocess"] == "thresh":
gray = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# make a check to see if median blurring should be done to remove
# noise
elif args["preprocess"] == "blur":
gray = cv2.medianBlur(gray, 3)
if preprocess == "thresh":
gray = cv2.threshold(gray, 150, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# make a check to see if median blurring should be done to remove
# noise
elif preprocess == "blur":
gray = cv2.medianBlur(gray, 3)
'''
rgb_planes = cv2.split(img)
result_planes = []
result_norm_planes = []
for plane in rgb_planes:
dilated_img = cv2.dilate(plane, np.ones((7,7), np.uint8))
bg_img = cv2.medianBlur(dilated_img, 21)
diff_img = 255 - cv2.absdiff(plane, bg_img)
norm_img = cv2.normalize(diff_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
result_planes.append(diff_img)
result_norm_planes.append(norm_img)
result = cv2.merge(result_planes)
result_norm = cv2.merge(result_norm_planes)
cv2.imshow('shadows_out.png', result)
cv2.imshow('shadows_out_norm.png', result_norm)
grayUnShadowedImg = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
cv2.imshow('Shadow_Gray_CVT', grayUnShadowedImg)
ret, threshUnShadowedImg = cv2.threshold(grayUnShadowedImg, 200, 255, cv2.THRESH_BINARY)
cv2.imshow('unShadowed_Thresh_filtering', threshUnShadowedImg)
#v2.imwrite('unShadowed_Thresh_filtering.jpg', threshUnShadowedImg)
#croptedunShadowedImg = process_image('unShadowed_Thresh_filtering.jpg')
adptThreshUnShadowedImg = cv2.adaptiveThreshold(grayUnShadowedImg, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)
cv2.imshow('unShadowed_Adaptive_Thresh_filtering', adptThreshUnShadowedImg)
'''
blurFImg = cv2.GaussianBlur(adptThreshUnShadowedImg,(25,25), 0)
ret, f3Img = cv2.threshold(blurFImg,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow('f3Img', f3Img )
'''
#OCR Stage:
'''
# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, threshImg)
# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
text = pytesseract.image_to_string(Image.open(filename))
os.remove(filename)
print("\n" + text)
'''
cv2.waitKey(0)
cv2.destroyAllWindows()
Tryed this source out as well but this doesn't seem to work and is not that clear to understand:
https://www.danvk.org/2015/01/07/finding-blocks-of-text-in-an-image-using-python-opencv-and-numpy.html
I have made an example to maybe give you an idea on how to proceede. I made it without your transformations of the image but you could do it with them if you would like.
What I did was to first transform the image to binary with cv2.THRESH_BINARY. Next I made a mask and drew the contours by limiting them with size (cv2.contourArea()) and ratio (got it from cv2.boundingRect()) for threshold. Then I conected all the contours that are near each other using cv2.morphologyEx() and a big kernel size (50x50).
Then I selected the biggest contour (text) and drew a rotated rectangle with cv2.minAreaRect() which got me the rotational angle.
Then I could rotate the image using cv2.getRotationMatrix2D() and cv2.warpAffine() and get a slightly bigger bounding box using the highest X, Y and lowest X,Y values of the rotated rectangle which I used to crop the image.
Then I serched again for contours and removed the noise (little contours) from the image and the result is a text with high contrast.
Final result:
This code is meant only to give an idea or another point of view to the problem and it may not work with other images (if they differ from the original too much) or at least you would have to adjust some parameters of code. Hope it helps. Cheers!
Code:
import cv2
import numpy as np
# Read image and search for contours.
img = cv2.imread('rotatec.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, threshold = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
# Create first mask used for rotation.
mask = np.ones(img.shape, np.uint8)*255
# Draw contours on the mask with size and ratio of borders for threshold.
for cnt in contours:
size = cv2.contourArea(cnt)
x,y,w,h = cv2.boundingRect(cnt)
if 10000 > size > 500 and w*2.5 > h:
cv2.drawContours(mask, [cnt], -1, (0,0,0), -1)
# Connect neighbour contours and select the biggest one (text).
kernel = np.ones((50,50),np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
gray_op = cv2.cvtColor(opening, cv2.COLOR_BGR2GRAY)
_, threshold_op = cv2.threshold(gray_op, 150, 255, cv2.THRESH_BINARY_INV)
contours_op, hierarchy_op = cv2.findContours(threshold_op, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours_op, key=cv2.contourArea)
# Create rotated rectangle to get the angle of rotation and the 4 points of the rectangle.
_, _, angle = rect = cv2.minAreaRect(cnt)
(h,w) = img.shape[:2]
(center) = (w//2,h//2)
# Rotate the image.
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(img, M, (int(w),int(h)), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)
# Create bounding box for rotated text (use old points of rotated rectangle).
box = cv2.boxPoints(rect)
a, b, c, d = box = np.int0(box)
bound =[]
bound.append(a)
bound.append(b)
bound.append(c)
bound.append(d)
bound = np.array(bound)
(x1, y1) = (bound[:,0].min(), bound[:,1].min())
(x2, y2) = (bound[:,0].max(), bound[:,1].max())
cv2.drawContours(img,[box],0,(0,0,255),2)
# Crop the image and create new mask for the final image.
rotated = rotated[y1:y2, x1:x2]
mask_final = np.ones(rotated.shape, np.uint8)*255
# Remove noise from the final image.
gray_r = cv2.cvtColor(rotated, cv2.COLOR_BGR2GRAY)
_, threshold_r = cv2.threshold(gray_r, 150, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(threshold_r,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
size = cv2.contourArea(cnt)
if size < 500:
cv2.drawContours(threshold_r, [cnt], -1, (0,0,0), -1)
# Invert black and white.
final_image = cv2.bitwise_not(threshold_r)
# Display results.
cv2.imshow('final', final_image)
cv2.imshow('rotated', rotated)
EDIT:
For text recognition I recomend you see this post from SO Simple Digit Recognition OCR in OpenCV-Python.
The result with the code from mentioned post:
EDIT:
This is my code implemented with the slightly modified code from the mentioned post. All steps are written in the comments. You should save the script and the training image to the same directory. This is my training image:
Code:
import cv2
import numpy as np
# Read image and search for contours.
img = cv2.imread('rotatec.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, threshold = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
# Create first mask used for rotation.
mask = np.ones(img.shape, np.uint8)*255
# Draw contours on the mask with size and ratio of borders for threshold.
for cnt in contours:
size = cv2.contourArea(cnt)
x,y,w,h = cv2.boundingRect(cnt)
if 10000 > size > 500 and w*2.5 > h:
cv2.drawContours(mask, [cnt], -1, (0,0,0), -1)
# Connect neighbour contours and select the biggest one (text).
kernel = np.ones((50,50),np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
gray_op = cv2.cvtColor(opening, cv2.COLOR_BGR2GRAY)
_, threshold_op = cv2.threshold(gray_op, 150, 255, cv2.THRESH_BINARY_INV)
contours_op, hierarchy_op = cv2.findContours(threshold_op, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours_op, key=cv2.contourArea)
# Create rotated rectangle to get the angle of rotation and the 4 points of the rectangle.
_, _, angle = rect = cv2.minAreaRect(cnt)
(h,w) = img.shape[:2]
(center) = (w//2,h//2)
# Rotate the image.
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(img, M, (int(w),int(h)), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)
# Create bounding box for rotated text (use old points of rotated rectangle).
box = cv2.boxPoints(rect)
a, b, c, d = box = np.int0(box)
bound =[]
bound.append(a)
bound.append(b)
bound.append(c)
bound.append(d)
bound = np.array(bound)
(x1, y1) = (bound[:,0].min(), bound[:,1].min())
(x2, y2) = (bound[:,0].max(), bound[:,1].max())
cv2.drawContours(img,[box],0,(0,0,255),2)
# Crop the image and create new mask for the final image.
rotated = rotated[y1:y2, x1-10:x2]
mask_final = np.ones(rotated.shape, np.uint8)*255
# Remove noise from the final image.
gray_r = cv2.cvtColor(rotated, cv2.COLOR_BGR2GRAY)
_, threshold_r = cv2.threshold(gray_r, 150, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(threshold_r,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
size = cv2.contourArea(cnt)
if size < 500:
cv2.drawContours(threshold_r, [cnt], -1, (0,0,0), -1)
# Invert black and white.
final_image = cv2.bitwise_not(threshold_r)
# Display results.
cv2.imwrite('rotated12.png', final_image)
# Import module for finding path to database.
from pathlib import Path
# This code executes once amd writes two files.
# If file exists it skips this step, else it runs again.
file = Path("generalresponses.data")
if file.is_file() == False:
# Reading the training image
im = cv2.imread('pitrain1.png')
im3 = im.copy()
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
thresh = cv2.adaptiveThreshold(blur,255,1,1,11,2)
# Finding contour
_,contours,hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# Creates array and list for appending data
samples = np.empty((0,100))
responses = []
# Value serving to increment the "automatic" learning
i = 0
# Iterating through contours and appending the array and list with "learned" values
for cnt in contours:
i+=1
[x,y,w,h] = cv2.boundingRect(cnt)
cv2.rectangle(im,(x,y),(x+w,y+h),(0,0,255),2)
roi = thresh[y:y+h,x:x+w] # Croping ROI to bounding rectangle
roismall = cv2.resize(roi,(10,10)) # Resizing ROI to smaller image
cv2.imshow('norm',im)
# Appending values based on the pitrain1.png image
if i < 36:
responses.append(int(45))
elif 35 < i < 80:
responses.append(int(48))
elif 79 < i < 125:
responses.append(int(57))
elif 124 < i < 160:
responses.append(int(56))
elif 159 < i < 205:
responses.append(int(55))
elif 204 < i < 250:
responses.append(int(54))
elif 249 < i < 295:
responses.append(int(53))
elif 294 < i < 340:
responses.append(int(52))
elif 339 < i < 385:
responses.append(int(51))
elif 384 < i < 430:
responses.append(int(50))
elif 429 < i < 485:
responses.append(int(49))
else:
break
sample = roismall.reshape((1,100))
samples = np.append(samples,sample,0)
# Reshaping and saving database
responses = np.array(responses)
responses = responses.reshape((responses.size,1))
print('end')
np.savetxt('generalsamples.data',samples)
np.savetxt('generalresponses.data',responses, fmt='%s')
################### Recognition ########################
# Dictionary for numbers and characters (in this sample code the only
# character is " - ")
number = {
48 : "0",
53 : "5",
52 : "4",
50 : "2",
45 : "-",
55 : "7",
51 : "3",
57 : "9",
56 : "8",
54 : "6",
49 : "1"
}
####### training part ###############
samples = np.loadtxt('generalsamples.data',np.float32)
responses = np.loadtxt('generalresponses.data',np.float32)
responses = responses.reshape((responses.size,1))
model = cv2.ml.KNearest_create()
model.train(samples,cv2.ml.ROW_SAMPLE,responses)
############################# testing part #########################
im = cv2.imread('rotated12.png')
out = np.zeros(im.shape,np.uint8)
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray,255,1,1,11,2)
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
[x,y,w,h] = cv2.boundingRect(cnt)
cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
roi = thresh[y:y+h,x:x+w]
roismall = cv2.resize(roi,(10,10))
roismall = roismall.reshape((1,100))
roismall = np.float32(roismall)
retval, results, neigh_resp, dists = model.findNearest(roismall,k=5)
string = int((results[0][0]))
string2 = number.get(string)
print(string2)
cv2.putText(out,str(string2),(x,y+h),0,1,(0,255,0))
cv2.imshow('im',im)
cv2.imshow('out',out)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
Sorry for begin a complete moron in it,
I'm realy trying to learn as much as I can about coding,everything that goes around the computer and openCV with the very little time I have But here's the edited code I've managed to get partly working:
from PIL import Image
import pytesseract
import os
import picamera
import time
import cv2
import numpy as np
# Read image and search for contours.
img = cv2.imread('Example1.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, threshold = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) #EDITED
# Create first mask used for rotation.
mask = np.ones(img.shape, np.uint8)*255
# Draw contours on the mask with size and ratio of borders for threshold.
for cnt in contours:
size = cv2.contourArea(cnt)
x,y,w,h = cv2.boundingRect(cnt)
if 10000 > size > 500 and w*2.5 > h:
cv2.drawContours(mask, [cnt], -1, (0,0,0), -1)
# Connect neighbour contours and select the biggest one (text).
kernel = np.ones((50,50),np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
gray_op = cv2.cvtColor(opening, cv2.COLOR_BGR2GRAY)
_, threshold_op = cv2.threshold(gray_op, 150, 255, cv2.THRESH_BINARY_INV)
contours_op, hierarchy_op = cv2.findContours(threshold_op, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours_op, key=cv2.contourArea)
# Create rotated rectangle to get the angle of rotation and the 4 points of the rectangle.
_, _, angle = rect = cv2.minAreaRect(cnt)
(h,w) = img.shape[:2]
(center) = (w//2,h//2)
# Rotate the image.
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(img, M, (int(w),int(h)), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)
# Create bounding box for rotated text (use old points of rotated rectangle).
box = cv2.cv.BoxPoints(rect) #edited
a, b, c, d = box = np.int0(box)
bound =[]
bound.append(a)
bound.append(b)
bound.append(c)
bound.append(d)
bound = np.array(bound)
(x1, y1) = (bound[:,0].min(), bound[:,1].min())
(x2, y2) = (bound[:,0].max(), bound[:,1].max())
cv2.drawContours(img,[box],0,(0,0,255),2)
# Crop the image and create new mask for the final image.
rotated = rotated[y1:y2, x1:x2]
mask_final = np.ones(rotated.shape, np.uint8)*255
# Remove noise from the final image.
gray_r = cv2.cvtColor(rotated, cv2.COLOR_BGR2GRAY)
_, threshold_r = cv2.threshold(gray_r, 150, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(threshold_r,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
size = cv2.contourArea(cnt)
if size < 500:
cv2.drawContours(threshold_r, [cnt], -1, (0,0,0), -1)
# Invert black and white.
final_image = cv2.bitwise_not(threshold_r)
# Display results.
cv2.imshow('final', final_image)
cv2.imshow('rotated', rotated)
#OCR Stage:
# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite('Final_proc.jpg', final_image)
# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
text = pytesseract.image_to_string(Image.open('Final_proc.jpg'))
os.remove('Final_proc.jpg')
print("\n" + text)
cv2.waitKey(0)
cv2.destroyAllWindows()
When compiling it now it gives me this output:
[img]https://i.imgur.com/ImdKSCv.jpg[/img]
which is a little different from what you showed and compiled on the windows machine but still super close.
anyidea what happened? just after that this should be realy easy to dissect the code and learn it easily.
Again thank you very much for your time! :D
So for the python 3 and openCV 3 version of the code in order to make the img work with tesseract you'd need to add an around 20px white boarder to extend the image for somereason (I assume it's because the convolutional matrix scanning effort) according to my other post:
pytesseract struggling to recognize clean black and white pictures with font numbers and 7 seg digits(python)
and here's how you'd add the boarder:
how to add border around an image in opencv python
In one line of code:
outputImage = cv2.copyMakeBorder(
inputImage,
topBorderWidth,
bottomBorderWidth,
leftBorderWidth,
rightBorderWidth,
cv2.BORDER_CONSTANT,
value=color of border
)

Split cells from an image of a table

I have to get the contents of a table image using python and OpenCV.
Image is as follows, Lecturer detail table with lecturer name, subject code:
I need to get the text of the each row. For example:
My current implementation (up to splitting the rows of the table) is as follows:
import cv2
import numpy as np
cropped_Image_Location = "/home/shakya/Desktop/Paramore/CM_bot/timeTableDetails/Cropped/"
segmentCount = 0
img = cv2.imread(cropped_Image_Location+"cropped_5.jpg")
edges = cv2.Canny(img,50,150,apertureSize = 3)
cv2.imwrite('edges-50-150.jpg',edges)
minLineLength = 100
lines = cv2.HoughLinesP(image=edges, rho=1, theta=np.pi/10, threshold=200, lines=np.array([]), minLineLength= minLineLength, maxLineGap=100)
a,b,c = lines.shape
for i in range(a):
cv2.line(img, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3, cv2.LINE_AA)
small = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
grad = cv2.morphologyEx(small, cv2.MORPH_GRADIENT, kernel)
_, bw = cv2.threshold(grad, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 1))
connected = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
# using RETR_EXTERNAL instead of RETR_CCOMP
_,contours, hierarchy = cv2.findContours(connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
mask = np.zeros(bw.shape, dtype=np.uint8)
for idx in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[idx])
mask[y:y+h, x:x+w] = 0
cv2.drawContours(mask, contours, idx, (255, 255, 255), -1)
r = float(cv2.countNonZero(mask[y:y+h, x:x+w])) / (w * h)
if r > 0.45 and w > 8 and h > 8:
cv2.rectangle(small, (x, y), (x+w-1, y+h-1), (0, 255, 0), 1)
crop_img = small[y:y + h, x:x + w]
segmentCount = segmentCount + 1
imageNumber = str(segmentCount)
cv2.imwrite(cropped_Image_Location+"Lecturer_cropped_" + imageNumber+".jpg", crop_img)
cv2.imwrite(cropped_Image_Location+'lectureAll.jpg', small)
cv2.waitKey(0)
cv2.destroyAllWindows()
I'm stuck with splitting the cells of a row.
The goal is to find horizontal lines whose length is greater than a certain threshold. After finding horizontal lines you can find (x,y) position of lines to crop the rows.
you can process the image like this
import numpy as np
import cv2
img = cv2.imread('D:\Libraries\Downloads\Capture.PNG')
# find edges in the image
edges = cv2.Laplacian(img, cv2.CV_8U)
# kernel used to remove vetical and small horizontal lines using erosion
kernel = np.zeros((5, 11), np.uint8)
kernel[2, :] = 1
eroded = cv2.morphologyEx(edges, cv2.MORPH_ERODE,
kernel) # erode image to remove unwanted lines
# find (x,y) position of the horizontal lines
indices = np.nonzero(eroded)
# As indices contain all the points along horizontal line, so get unique rows only (indices[0] contains rows or y coordinate)
rows = np.unique(indices[0])
# now you have unique rows but edges are more than 1 pixel thick
# so remove lines which are near to each other using a certain threshold
filtered_rows = []
for ii in range(len(rows)):
if ii == 0:
filtered_rows.append(rows[ii])
else:
if np.abs(rows[ii] - rows[ii - 1]) >= 10:
filtered_rows.append(rows[ii])
print(filtered_rows)
# crop first row of table
first_cropped_row = img[filtered_rows[0]:filtered_rows[1], :, :]
cv2.imshow('Image', eroded)
cv2.imshow('Cropped_Row', first_cropped_row)
cv2.waitKey(0)
you can use filtered_rows to crop the rows of table
UPDATE: working cod as of python 3.6.8 - fixed based on http://answers.opencv.org/question/198043/problem-using-morphologyex/?answer=198052#post-id-198052
first you identify all of the boxes by using contours.
https://docs.opencv.org/3.3.1/d4/d73/tutorial_py_contours_begin.html
then get the moment of each of box.
https://docs.opencv.org/3.1.0/dd/d49/tutorial_py_contour_features.html
then you can identify the row. moment represent the middle pixel of the contour area. doc says central of mass of the object
then check same contours with same x values. or in small range (you decide)
combine the ROIs togather. you will have the row.

Categories

Resources