Here's one of the images I want to apply the outline on, and how it's supposed to look:
And here's the code that's the closest to what I want (taken from here):
import cv2
import numpy as np
from PIL import Image
import math
img = Image.open(f"frames/frame000028.png")
def stroke(origin_image, threshold, stroke_size, colors):
img = np.array(origin_image)
h, w, _ = img.shape
padding = stroke_size + 50
alpha = img[:,:,2]
rgb_img = img[:,:,0:3]
bigger_img = cv2.copyMakeBorder(rgb_img, padding, padding, padding, padding,
cv2.BORDER_CONSTANT, value=(0, 0, 0, 0))
alpha = cv2.copyMakeBorder(alpha, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=0)
bigger_img = cv2.merge((bigger_img, alpha))
h, w, _ = bigger_img.shape
_, alpha_without_shadow = cv2.threshold(alpha, threshold, 255, cv2.THRESH_BINARY) # threshold=0 in photoshop
alpha_without_shadow = 255 - alpha_without_shadow
dist = cv2.distanceTransform(alpha_without_shadow, cv2.DIST_L2, cv2.DIST_MASK_3) # dist l1 : L1 , dist l2 : l2
stroked = change_matrix(dist, stroke_size)
stroke_alpha = (stroked * 255).astype(np.uint8)
stroke_b = np.full((h, w), colors[0][2], np.uint8)
stroke_g = np.full((h, w), colors[0][1], np.uint8)
stroke_r = np.full((h, w), colors[0][0], np.uint8)
stroke = cv2.merge((stroke_b, stroke_g, stroke_r, stroke_alpha))
stroke = cv2pil(stroke)
bigger_img = cv2pil(bigger_img)
result = Image.alpha_composite(stroke, bigger_img)
return result
def change_matrix(input_mat, stroke_size):
stroke_size = stroke_size - 1
mat = np.ones(input_mat.shape)
check_size = stroke_size + 1.0
mat[input_mat > check_size] = 0
border = (input_mat > stroke_size) & (input_mat <= check_size)
mat[border] = 1.0 - (input_mat[border] - stroke_size)
return mat
def cv2pil(cv_img):
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGRA2RGBA)
pil_img = Image.fromarray(cv_img.astype("uint8"))
return pil_img
output = stroke(img, threshold=0, stroke_size=10, colors=((42,102,209),))
output.show()
Related
I am trying to detect drops inside the water, where at first I will detect the edges, but there are light spots in the image, which are also detected as drops.
Noting that the drops are white surrounded by a dark layer.
My code :
import cv2
import numpy as np
def unsharp_mask(img, blur_size = (5,5), imgWeight = 1.5, gaussianWeight = -0.5):
gaussian = cv2.GaussianBlur(img, (5,5), 0)
return cv2.addWeighted(img, imgWeight, gaussian, gaussianWeight, 0)
def clahe(img, clip_limit = 2.0):
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(5,5))
return clahe.apply(img)
def get_sobel(img, size = -1):
sobelx64f = cv2.Sobel(img,cv2.CV_64F,2,0,size)
abs_sobel64f = np.absolute(sobelx64f)
return np.uint8(abs_sobel64f)
img = cv2.imread("img_brightened.jpg")
# save color copy for visualizing
imgc = img.copy()
# resize image to make the analytics easier (a form of filtering)
resize_times = 1.5
img = cv2.resize(img, None, img, fx = 1 / resize_times, fy = 1 / resize_times)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow("Input", img)
# use sobel operator to evaluate high frequencies
sobel = get_sobel(img)
# experimentally calculated function - needs refining
clip_limit = (-2.556) * np.sum(sobel)/(img.shape[0] * img.shape[1]) + 26.557
# don't apply clahe if there is enough high freq to find blobs
if(clip_limit < 1.0):
clip_limit = 0.1
# limit clahe if there's not enough details - needs more tests
if(clip_limit > 8.0):
clip_limit = 8
# apply clahe and unsharp mask to improve high frequencies as much as possible
img = clahe(img, clip_limit)
img = unsharp_mask(img)
# filter the image to ensure edge continuity and perform Canny
img_blurred = (cv2.GaussianBlur(img, (2*2+1,2*2+1), 0))
canny = cv2.Canny(img_blurred, 100, 255)
cv2.imshow("Output", canny)
cv2.waitKey(0)
Result
I used codes from https://github.com/kavyamusty/Shading-removal-of-images/blob/master/Article%20submission.ipynb, which works for removing shadows first, then the cv2.HoughCircles to find the circles.
The codes as below :
import cv2
import numpy as np
import matplotlib.pyplot as plt
def max_filtering(N, I_temp):
wall = np.full((I_temp.shape[0]+(N//2)*2, I_temp.shape[1]+(N//2)*2), -1)
wall[(N//2):wall.shape[0]-(N//2), (N//2):wall.shape[1]-(N//2)] = I_temp.copy()
temp = np.full((I_temp.shape[0]+(N//2)*2, I_temp.shape[1]+(N//2)*2), -1)
for y in range(0,wall.shape[0]):
for x in range(0,wall.shape[1]):
if wall[y,x]!=-1:
window = wall[y-(N//2):y+(N//2)+1,x-(N//2):x+(N//2)+1]
num = np.amax(window)
temp[y,x] = num
A = temp[(N//2):wall.shape[0]-(N//2), (N//2):wall.shape[1]-(N//2)].copy()
return A
def min_filtering(N, A):
wall_min = np.full((A.shape[0]+(N//2)*2, A.shape[1]+(N//2)*2), 300)
wall_min[(N//2):wall_min.shape[0]-(N//2), (N//2):wall_min.shape[1]-(N//2)] = A.copy()
temp_min = np.full((A.shape[0]+(N//2)*2, A.shape[1]+(N//2)*2), 300)
for y in range(0,wall_min.shape[0]):
for x in range(0,wall_min.shape[1]):
if wall_min[y,x]!=300:
window_min = wall_min[y-(N//2):y+(N//2)+1,x-(N//2):x+(N//2)+1]
num_min = np.amin(window_min)
temp_min[y,x] = num_min
B = temp_min[(N//2):wall_min.shape[0]-(N//2), (N//2):wall_min.shape[1]-(N//2)].copy()
return B
def background_subtraction(I, B):
O = I - B
norm_img = cv2.normalize(O, None, 0,255, norm_type=cv2.NORM_MINMAX)
return norm_img
def min_max_filtering(M, N, I):
if M == 0:
#max_filtering
A = max_filtering(N, I)
#min_filtering
B = min_filtering(N, A)
#subtraction
normalised_img = background_subtraction(I, B)
elif M == 1:
#min_filtering
A = min_filtering(N, I)
#max_filtering
B = max_filtering(N, A)
#subtraction
normalised_img = background_subtraction(I, B)
return normalised_img
# Read Image
img = cv2.imread(r"D:/Image.jpg")
# Copy origin image
cimg = img.copy()
# Initialization array of uint8
img_remove_shadow = np.zeros(np.shape(img), dtype="uint8")
for i in range(np.shape(img)[2]):
img_remove_shadow[:, :, i] = np.array(min_max_filtering(M = 0, N = 20, I = img[:, :, i]))
# Using median blur
img = cv2.medianBlur(img_remove_shadow,5)
# Change to gray image
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow("Removing Shadow", img)
# Find circles
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 5, np.array([]), 40, 23, 5,20)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
cv2.imshow('detected circles',cimg)
cv2.waitKey(0)
cv2.destroyAllWindows()
The result is as below:
PS: The codes take 11.74s running time, I would appreciate it if someone could optimize the code.
I have a collection of images as below -
Example 1
Example 2
Example 3
These represent dates in DDMMYYYY format. For each of these images, I want to save each digit as a separate image.For example 1, I wish to save 7,9,0,8,5,8,7,1 as separate images sliced from the original image. So far, I have tried various methods described on different stackoverflow & blogposts but none of them seems to work.
Code to extract boxes surrounding dates -
from glob import glob
import cv2 as cv
import numpy as np
from tqdm import tqdm
class ExtractRectangle:
def __init__(self):
super().__init__()
self.minLinLength_h = 70
self.minLinLength_v = 5
self.maxLineGap = 20
def is_horizontal(self, line, thresh=5):
return abs(line[1] - line[3]) <= thresh
def is_vertical(self, line, thresh=5):
return abs(line[0] - line[2]) <= thresh
def get_lines(self, canny, horizontal=True):
lines = []
if horizontal:
linesP = cv.HoughLinesP(
canny,
rho=1,
theta=np.pi / 180,
threshold=10,
lines=None,
minLineLength=self.minLinLength_h,
maxLineGap=20,
)
else:
linesP = cv.HoughLinesP(
canny,
rho=1,
theta=np.pi / 180,
threshold=10,
lines=None,
minLineLength=self.minLinLength_v,
maxLineGap=20,
)
if linesP is not None:
for i in range(0, len(linesP)):
l = linesP[i][0]
if self.is_horizontal(l, 3) and horizontal:
lines.append(l)
elif self.is_vertical(l, 3):
lines.append(l)
return lines
def remove_whitespace(self, img):
# https://stackoverflow.com/questions/48395434/how-to-crop-or-remove-white-background-from-an-image
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
th, threshed = cv.threshold(gray, 127, 255, cv.THRESH_BINARY_INV)
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (11, 11))
morphed = cv.morphologyEx(threshed, cv.MORPH_CLOSE, kernel)
cnts = cv.findContours(morphed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)[-2]
cnt = sorted(cnts, key=cv.contourArea)[-1]
x, y, w, h = cv.boundingRect(cnt)
dst = img[y : y + h, x : x + w]
return dst
def process_image(self, filename, path):
errenous = False
img = cv.imread(cv.samples.findFile(filename))
img = self.remove_whitespace(img)
cImage = np.copy(img)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
canny = cv.Canny(gray, 100, 200)
horizontal_lines = self.get_lines(canny)
horizontal_lines = sorted(horizontal_lines, key=lambda a_entry: a_entry[..., 1])
vertical_lines = self.get_lines(canny, horizontal=False)
vertical_lines = sorted(vertical_lines, key=lambda a_entry: a_entry[..., 0])
if len(horizontal_lines) > 0:
initial_line = horizontal_lines[0]
final_line = horizontal_lines[-1]
# LeftTop(x1, y1) -> RightTop(x2, y1) -> RightBottom(x2, y2) -> LeftBottom(x1, y2)
y1 = initial_line[1]
y2 = final_line[1]
bottom = min(y1, y2)
top = max(y1, y2)
# post whitespace removal, dates should only be the major component
if (top-bottom) / img.shape[0] < 0.6:
errenous = True
else:
errenous = True
if len(vertical_lines) > 0:
initial_line = vertical_lines[0]
final_line = vertical_lines[-1]
x1 = initial_line[0]
x2 = final_line[0]
left = min(x1, x2)
right = max(x1, x2)
# as dates occupy majority of the horizontal space
if (right-left) / img.shape[1] < 0.95:
errenous = True
else:
errenous = True
if not errenous:
# cImage = cv.rectangle(cImage, (left, bottom), (right, top), (255, 0, 0), 2)
cImage = cImage[
bottom : bottom + (top - bottom), left : left + (right - left)
]
cv.imwrite(f"{path}/{filename.split('/')[-1]}", cImage)
if __name__ == "__main__":
extract = ExtractRectangle()
test_files = glob("data/raw/test/*.png")
test_path = "data/processed/test/"
for path in tqdm(test_files):
extract.process_image(path, test_path)
train_files = glob("data/raw/train/*.png")
train_path = "data/processed/train/"
for path in tqdm(train_files):
extract.process_image(path, train_path)
Resultant detection for above images -
Example 1
Example 2
Example 3
Some other samples
Hello I am trying to identify the odometer reading from the image attached using open CV and EAST model along with Pyteserract.
Following is my code :
import cv2
import numpy as np
import matplotlib.pyplot as plt
# assuming you have the result image store in median
median = cv2.imread("odo_4.jpg", 0)
image_gray = median
binary = cv2.bitwise_not(image_gray)
blur = cv2.GaussianBlur(image_gray,(5,5),0)
ret2,th2 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
edged = cv2.Canny(th2, 50, 80, 255)
#threshold = cv2.adaptiveThreshold(edged,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
close = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel, iterations=1)
contours = cv2.findContours(close, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
rect_cnts = []
for cnt in contours:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.04 * peri, True)
(x, y, w, h) = cv2.boundingRect(cnt)
ar = w / float(h)
if (len(approx) == 4) & (ar >= 0.95 and ar <= 1.05) : # shape filtering condition
pass
else :
rect_cnts.append(cnt)
max_area = 0
football_square = None
for cnt in rect_cnts:
(x, y, w, h) = cv2.boundingRect(cnt)
if max_area < w*h:
max_area = w*h
football_square = cnt
image = cv2.cvtColor(image_gray, cv2.COLOR_GRAY2RGB)
(x, y, w, h) = cv2.boundingRect(football_square)
new_image = image[y:y+h, x:x+w]
new = new_image
import cv2 as cv
orig = new.copy()
(origH, origW) = new.shape[:2]
rW = origW / 320.0
rH = origH / 320.0
# resize the original image to new dimensions
new = cv.resize(new, (320, 320))
(H, W) = new.shape[:2]
# construct a blob from the image to forward pass it to EAST model
blob = cv.dnn.blobFromImage(new, 1.0, (W, H),
(123.68, 116.78, 103.94), swapRB=True, crop=False)
net = cv.dnn.readNet('frozen_east_text_detection.pb')
layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
def predictions(prob_score, geo):
(numR, numC) = prob_score.shape[2:4]
boxes = []
confidence_val = []
# loop over rows
for y in range(0, numR):
scoresData = prob_score[0, 0, y]
x0 = geo[0, 0, y]
x1 = geo[0, 1, y]
x2 = geo[0, 2, y]
x3 = geo[0, 3, y]
anglesData = geo[0, 4, y]
# loop over the number of columns
for i in range(0, numC):
if scoresData[i] < 0.5:
continue
(offX, offY) = (i * 4.0, y * 4.0)
# extracting the rotation angle for the prediction and computing the sine and cosine
angle = anglesData[i]
cos = np.cos(angle)
sin = np.sin(angle)
# using the geo volume to get the dimensions of the bounding box
h = x0[i] + x2[i]
w = x1[i] + x3[i]
# compute start and end for the text pred bbox
endX = int(offX + (cos * x1[i]) + (sin * x2[i]))
endY = int(offY - (sin * x1[i]) + (cos * x2[i]))
startX = int(endX - w)
startY = int(endY - h)
boxes.append((startX, startY, endX, endY))
confidence_val.append(scoresData[i])
# return bounding boxes and associated confidence_val
return (boxes, confidence_val)
(boxes, confidence_val) = predictions(scores, geometry)
boxes = non_max_suppression(np.array(boxes), probs=confidence_val)
# initialize the list of results
results = []
# loop over the bounding boxes to find the coordinate of bounding boxes
for (startX, startY, endX, endY) in boxes:
# scale the coordinates based on the respective ratios in order to reflect bounding box on the original image
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
#extract the region of interest
r = orig[startY:endY, startX:endX]
plt.imshow(r)
#configuration setting to convert image to string.
configuration = ("-l eng --oem 1 --psm 7")
##This will recognize the text from the image of bounding box
text = pytesseract.image_to_string(r, config=configuration)
# append bbox coordinate and associated text to the list of results
results.append(((startX, startY, endX, endY), text))
The results are bad - but my EAST model is identify the contour ( area) where the digits are present. Can you please help me ? I have tried different psm values in config for image_to_string.
Use InRange() for selection. See example:
import cv2 as cv
low_H = 80
low_S = 160
low_V = 200
high_H = 100
high_S = 255
high_V = 255
frame = cv.imread('OAPgE.jpg')
frame_HSV = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
frame_threshold = cv.inRange(frame_HSV, (low_H, low_S, low_V), (high_H, high_S, high_V))
frame_threshold=cv.bitwise_not(frame_threshold)
cv.imwrite('out_36.png', frame_threshold)
I have a pice of code which calculates and draws the dense optical flow:
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
def getFrame():
ret, img = cap.read()
img = cv.resize(img,(640,480))
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
return gray
def draw_flow(img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
cv.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (_x2, _y2) in lines:
cv.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def main():
prevgray = getFrame()
while True:
gray = getFrame()
flow = cv.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 10, 1.2, 0)
prevgray = gray
cv.imshow('flow', draw_flow(gray, flow))
cv.waitKey(1)
if __name__ == '__main__':
main()
Since this takes too long, I had the idea to calculate sift features, match them and then calculate the optical flow of the features.
Until now I have this:
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
sift = cv.xfeatures2d.SIFT_create()
bf = cv.BFMatcher()
def getFrame():
ret, img = cap.read()
img = cv.resize(img,(640,480))
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
return gray
def main():
prev_gray = getFrame()
prev_kp, prev_des = sift.detectAndCompute(prev_gray,None)
while True:
next_gray = getFrame()
next_kp, next_des = sift.detectAndCompute(next_gray,None)
matches = bf.knnMatch(prev_des, next_des, k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
#calc optical flow
prev_kp = next_kp
prev_des = next_des
prev_gray = next_gray
cv.waitKey(1)
if __name__ == '__main__':
main()
How can I calculate the optical flow of the sift features and draw them with the drwa_flow function in the first code?
I have also this code:
import numpy as np
import cv2 as cv
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
track_len = 5
tracks = []
cam = cv.VideoCapture(0)
def getFrame():
_ret, bgr = cam.read()
bgr = cv.resize(bgr, (640,480))
gray = cv.cvtColor(bgr, cv.COLOR_BGR2GRAY)
return gray, bgr
while True:
gray, vis = getFrame()
if len(tracks) > 0:
img0, img1 = prev_gray, gray
p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)
p1, _st, _err = cv.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, _st, _err = cv.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2), good):
if not good_flag:
continue
tr.append((x, y))
if len(tr) > track_len:
del tr[0]
new_tracks.append(tr)
cv.circle(vis, (x, y), 2, (0, 255, 0), -1)
tracks = new_tracks
cv.polylines(vis, [np.int32(tr) for tr in tracks], False, (0, 255, 0))
mask = np.zeros_like(gray)
mask[:] = 255
for x, y in [np.int32(tr[-1]) for tr in tracks]:
cv.circle(mask, (x, y), 5, 0, -1)
p = cv.goodFeaturesToTrack(gray, mask = mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
tracks.append([(x, y)])
prev_gray = gray
cv.imshow('lk_track', vis)
ch = cv.waitKey(1)
But with this code do not uses the sift features and also don't draws the flow like the first code...
I have a code to detect and identify the car number plate and convert the image into text using tesseract.
I am using openCV to localise the number plate.
The problem that I am facing is that tesseract is not accurately identifying the number. Is there any way I can improve the tesseract performance?
My code (which I downloaded from Internet) is:
import numpy as np
import cv2
# from copy import deepcopy
from PIL import Image
import pytesseract as tess
# plate = 0
def preprocess(img):
# print ('preprocessing image')
# cv2.imshow("Input", img)
imgBlurred = cv2.GaussianBlur(img, (5, 5), 0)
gray = cv2.cvtColor(imgBlurred, cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=3)
cv2.imshow("Sobel",sobelx)
cv2.waitKey(0)
ret2, threshold_img = cv2.threshold(sobelx, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imshow("Threshold",threshold_img)
cv2.waitKey(0)
return threshold_img
def cleanPlate(plate):
# print ("CLEANING PLATE. . .")
gray = cv2.cvtColor(plate, cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
thresh= cv2.dilate(gray, kernel, iterations=1)
_, thresh = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
im1, contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if contours:
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
max_cnt = contours[max_index]
max_cntArea = areas[max_index]
x, y, w, h = cv2.boundingRect(max_cnt)
if not ratioCheck(max_cntArea, w, h):
return plate, None
cleaned_final = thresh[y:y + h, x:x + w]
# cv2.imshow("Function Test",cleaned_final)
return cleaned_final, [x, y, w, h]
else:
return plate, None
def extract_contours(threshold_img):
# print ('extracting contours')
element = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(17, 3))
morph_img_threshold = threshold_img.copy()
cv2.morphologyEx(src=threshold_img, op=cv2.MORPH_CLOSE, kernel=element, dst=morph_img_threshold)
cv2.imshow("Morphed", morph_img_threshold)
cv2.waitKey(0)
im2, contours, hierarchy = cv2.findContours(morph_img_threshold, mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_NONE)
return contours
def ratioCheck(area, width, height):
# print ('checking ratio')
ratio = float(width) / float(height)
if ratio < 1:
ratio = 1 / ratio
aspect = 4.7272
min = 15 * aspect * 15 # minimum area
max = 125 * aspect * 125 # maximum area
rmin = 3
rmax = 6
if (area < min or area > max) or (ratio < rmin or ratio > rmax):
return False
return True
def isMaxWhite(plate):
# print ('is Max white')
avg = np.mean(plate)
if (avg >= 115):
return True
else:
return False
def validateRotationAndRatio(rect):
# print( 'validate the rotation and ratio')
(x, y), (width, height), rect_angle = rect
if (width > height):
angle = -rect_angle
else:
angle = 90 + rect_angle
if angle > 15:
return False
if height == 0 or width == 0:
return False
area = height * width
if not ratioCheck(area, width, height):
return False
else:
return True
def cleanAndRead(img, contours):
# print ('clean and read')
# count=0
for i, cnt in enumerate(contours):
min_rect = cv2.minAreaRect(cnt)
if validateRotationAndRatio(min_rect):
x, y, w, h = cv2.boundingRect(cnt)
plate_img = img[y:y + h, x:x + w]
if (isMaxWhite(plate_img)):
# count+=1
clean_plate, rect = cleanPlate(plate_img)
if rect:
x1, y1, w1, h1 = rect
x, y, w, h = x + x1, y + y1, w1, h1
cv2.imshow("Cleaned Plate", clean_plate)
cv2.waitKey(0)
plate_im = Image.fromarray(clean_plate)
plate_im.save('donald1.png')
text = tess.image_to_string(plate_im, lang='eng')
# print text
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("Detected Plate", img)
cv2.waitKey(0)
return text
numberplate = 0
img = cv2.imread("car_number_plate.jpg")
threshold_img = preprocess(img)
contours = extract_contours(threshold_img)
# if len(contours)!=0:
# print len(contours) #Test
# cv2.drawContours(img, contours, -1, (0,255,0), 1)
# cv2.imshow("Contours",img)
# cv2.waitKey(0)
plate = cleanAndRead(img, contours)
print ('plate information: ', plate)
If my number plate is: MH01AV8866
It will be recognised as MH01AY8866
Any suggestion will be appreciated. Let me know if any other information is required too.
You are using tesseract as a general model for your problem you can tune your model for that you need to generate synthetic data for your number plates with this
https://github.com/Belval/TextRecognitionDataGenerator
and then you can tune your model using the steps provided
https://github.com/tesseract-ocr/tesseract/wiki/TrainingTesseract-4.00---Finetune
https://github.com/tesseract-ocr/tesseract/wiki/TrainingTesseract-4.00
I've tuned the tesseract on synthetic data and it works like a charm, tried CNN models and tesseract both and tesseract trains better with lesser data and gives better performance.