Extracting digits from image with python and OpenCV - python

I am looking for some coding solution/help to extract the digits from the LCD display and then output the value from the image.
Below is the code and example images and how I got so far, but I need some further help to actually extract the "digits" from the image and output the value.
I have made digits lookup table from an earlier example,
# import the necessary packages
from imutils.perspective import four_point_transform
from imutils import contours
import imutils
import cv2
# define the dictionary of digit segments so we can identify
# each digit on the thermostat
DIGITS_LOOKUP = {
(1, 1, 1, 0, 1, 1, 1): 0,
(0, 0, 1, 0, 0, 1, 0): 1,
(1, 0, 1, 1, 1, 1, 0): 2,
(1, 0, 1, 1, 0, 1, 1): 3,
(0, 1, 1, 1, 0, 1, 0): 4,
(1, 1, 0, 1, 0, 1, 1): 5,
(1, 1, 0, 1, 1, 1, 1): 6,
(1, 0, 1, 0, 0, 1, 0): 7,
(1, 1, 1, 1, 1, 1, 1): 8,
(1, 1, 1, 1, 0, 1, 1): 9}
# load the example image
image = cv2.imread("Multimeter_1.jpg")
# pre-process the image by converting it to
# graycale, blurring it, and computing an edge map
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,87,9)
# find contours in the edge map,
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
rect = None
# loop over the contours
for c in cnts:
# approximate the contour
x,y,w,h = cv2.boundingRect(c)
#cv2.rectangle(image, (x, y), (x+w, y+h), (36, 255, 12), 1)
cv2.drawContours(image, [c], -1, (36,255,12),3)
rect = c
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
rect = approx
break
# extract the display, apply a perspective transform
warped = four_point_transform(thresh, rect.reshape(4, 2))
output = four_point_transform(image, rect.reshape(4, 2))
# Warp the image and perform morphology to clean it
thresh = cv2.threshold(warped, 0, 255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
# pre-process the image by converting it to
# graycale, blurring it, and computing an edge map
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,87,9)
# find contours in the edge map,
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
rect = None
# loop over the contours
for c in cnts:
# approximate the contour
x,y,w,h = cv2.boundingRect(c)
#cv2.rectangle(image, (x, y), (x+w, y+h), (36, 255, 12), 1)
cv2.drawContours(image, [c], -1, (36,255,12),3)
rect = c
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.05 * peri, True)
if len(approx) == 4:
rect = approx
break
# extract the display, apply a perspective transform
warped = four_point_transform(thresh, rect.reshape(4, 2))
output = four_point_transform(image, rect.reshape(4, 2))
# Warp the image and perform morphology to clean it
thresh = cv2.threshold(warped, 0, 255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
cnts = cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
digitCnts = []
# loop over the digit area candidates
for c in cnts:
(x,y,w,h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if w >= 25 and (h >= 50 and h <= 60):
digitCnts.append(c)
cv2.imshow("Multimeter", image)
cv2.imshow("Threshed", thresh)
cv2.imwrite("Threshed.jpg",thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
Can someone very profiecient in python, especially in Open CV help me out with this. This is a school assignment about the image process part and I have decided to use Python and OpenCV, so it is not a programming assignment and the code will be used just to do and explain the actual image processing nothing less or more. I just need help to actually extract the digits out of the treshed image. I also provided the original image from the LCD display in case there is a better method to achieve what I am looking for.

Here's an approach using the thresholded input image you have provided
Convert image to grayscale and Otsu's threshold
Perform morph close with a horizontal kernel to merge the numbers into a single contour
Find contours and filter using a minimum threshold area to filter out the large outer contour
Sort by largest contour area which should be the desired text contour
Iterate through contours and extract ROI using Numpy slicing
Starting from your input image
We morph close using a horizontal kernel to merge the numbers together into a single contour
From here we find contours and filter to only keep contours that are below a threshold area. Specifically, we use 0.75 * w * h to filter out the outer contours. Once we do this, the desired text contour should be the largest contour
Finally we extract the ROI using cv2.boundingRect() and Numpy slicing
import cv2
image = cv2.imread('1.jpg')
original = image.copy()
h, w, _ = image.shape
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15,1))
close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)
cnts = cv2.findContours(close, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
minimum_area = .75 * h * w
cnts = [c for c in cnts if cv2.contourArea(c) < minimum_area]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
ROI = 255 - original[y:y+h, x:x+w]
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)
break
cv2.imshow('close', close)
cv2.imshow('image', image)
cv2.imshow('ROI', ROI)
cv2.waitKey()

Related

Need help in extracting lines from this image using opencv

Input:
The output should be:
How can I do this using OpenCV or any other method?
I tried this
img = cv2.imread('test2.JPG')
imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 150, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
print("Number of contours = " + str(len(contours)))
print(contours[0])
# cv2.drawContours(img, contours, -1, (0, 255, 0), 1)
# cv2.drawContours(imgray, contours, -1, (0, 255, 0), 3)
for cnt in contours:
area = cv2.contourArea(cnt)
if area>20:
peri = cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
x,y,w,h = cv2.boundingRect(approx)
cv2.rectangle(img,(x,y-3),(x+w,y+h-3),(255,0,0),1)
You have found the contours and drawn only those above certain area. So far so good.
To capture each line as an individual entity, you need to find a way to connect the text in each line. Since the lines given in the image a straight, a simple approach would be to use a horizontal kernel ([1, 1, 1, 1, 1]) of certain length and perform morphology.
Code:
img = cv2.imread('text.jpg',1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
th = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
Using horizontal kernel 8 pixels in length. This is the parameter you would need to change when trying out for other images of different font size and text length.
hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 1))
# array([[1, 1, 1, 1, 1, 1, 1, 1]], dtype=uint8)
dilated = cv2.dilate(th, hor_kernel, iterations=1)
Looking at the image above, hope you have an idea of what dilation using a horizontal kernel does. From here on, we find outermost contours above certain area.
contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
img2 = img.copy()
for i, c in enumerate(contours):
area = cv2.contourArea(c)
if area > 100:
x,y,w,h = cv2.boundingRect(c)
img2 = cv2.rectangle(img2, (x, y), (x + w, y + h), (0,255,0), 1)

How to put the contours in a image numbers? (OCR)

I've been studying CV for a few months now but I ran into a problem on my second project, I needed to remove the noise from a sequence of numbers, in order to apply ocr. I managed to clean it up, but the numbers lost some internal pixels.
See the initial and current final image.
Initial
Final
Code used:
blur = cv2.GaussianBlur(img, (15, 15), 2)
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
lower_gray = np.array([1, 1, 1])
upper_gray = np.array([102, 102, 102])
mask = cv2.inRange(hsv, lower_gray, upper_gray)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
opened_mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
masked_img = cv2.bitwise_and(img, img, mask=opened_mask)
coloured = masked_img.copy()
coloured[mask == 0] = (255, 255, 255)
gray = cv2.cvtColor(coloured, cv2.COLOR_BGR2GRAY)
des = cv2.bitwise_not(gray)
contour, hier = cv2.findContours(des, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contour:
cv2.drawContours(des, [cnt], 0, 255, -1)
#des is the final image
Is there a better way to clean the background for OCR, or maybe close the lost pixels in the characters?
I managed to solve it, I didn't use the method you mentioned, but it was a good way, I was apprehensive that it would cause an expansion in the characters and wouldn't be good for OCR reading.
This is my final result:
for mrz in mrz_list:
try:
thresh = cv2.threshold(mrz, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
dist = cv2.distanceTransform(thresh, cv2.DIST_L2, 5)
dist = cv2.normalize(dist, dist, 0, 1.0, cv2.NORM_MINMAX)
dist = (dist * 255).astype("uint8")
thresh = cv2.threshold(dist, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
cnts = cv2.findContours(opening.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
chars = []
for c in cnts:
(x, y, w, h) = cv2.boundingRect(c)
if w >= 20 and h >= 25:
chars.append(c)
chars = np.vstack([chars[i] for i in range(0, len(chars))])
hull = cv2.convexHull(chars)
mask = np.zeros(mrz.shape[:2], dtype="uint8")
cv2.drawContours(mask, [hull], -1, 255, -1)
mask = cv2.dilate(mask, None, iterations=2)
final = cv2.bitwise_and(opening, opening, mask=mask)`
Thanks everyone.
It is easy to get a clean background by morphological dilation or closing, a smooth outline by Gaussian filtering, and then binarization. But I found no way to separate the characters.

use python open-cv for segmenting newspaper article

I'm using the code below for segmenting the articles from an image of newspaper.
def segmenter(image_received):
# Process 1: Lines Detection
img = image_received
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to binary gray image
edges = cv2.Canny(gray, 75, 150) # determine contours
lines = cv2.HoughLinesP(edges, 0.017, np.pi / 180, 60, minLineLength=100, maxLineGap=0.1) # houghlines generation
# drawing houghlines
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 128), 12) # the houghlines of color (0,0,128) is drawn
# Drawing brown border
bold = cv2.copyMakeBorder(
img, # image source
5, # top width
5, # bottomm width
5, # left width
5, # right width
cv2.BORDER_CONSTANT,
value=(0, 0, 128) # brown color value
)
image = bold
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if int(len(c) >= 10):
cv2.drawContours(image, [c], 0, (0, 17, 255), 1)
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if int(len(c) >= 10):
cv2.drawContours(image, [c], 0, (0, 17, 255), 1)
cv2.imwrite(f'tmp/{str(str(uuid.uuid4()))}.jpg', image)
for instance
the input image is
and the output image is :
There are three problems:
the output rectangles aren't complete in all cases.
Images also are segmented inside articles as part of articles. But what I need is to segment only the text of the newspaper and crop all the other things out. Something like this one:
Consider the following image:
The article indicated by borders is not rectangular and is much more complicated. How can I achieve the correct borders using python open-cv or other image processing libraries?
(the question has an answer here for matlab. But I need a python code.
here my pipeline.
I think can be optimized.
Initialization
%matplotlib inline
import numpy as np
import cv2
from matplotlib import pyplot as plt
Load image
image_file_name = 'paper.jpg'
image = cv2.imread(image_file_name)
# gray convertion
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
The first important thing is to remove the lines. So I search the lines.
grad_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
grad_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
# threshold
thresh_x = cv2.threshold(abs_grad_x, 0, 255, cv2.THRESH_OTSU)[1]
thresh_y = cv2.threshold(abs_grad_y, 0, 255, cv2.THRESH_OTSU)[1]
# bluring
kernel_size = 3
blur_thresh_x = cv2.GaussianBlur(thresh_x,(kernel_size, kernel_size),0)
blur_thresh_y = cv2.GaussianBlur(thresh_y,(kernel_size, kernel_size),0)
# Run Hough on edge detected image
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 200 # minimum number of pixels making up a line
max_line_gap = 1 # maximum gap in pixels between connectable line segments
line_image = np.copy(gray) * 0 # creating a blank to draw lines on
# Vertical lines
vertical_lines = cv2.HoughLinesP(blur_thresh_x, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
if vertical_lines is not None:
for line in vertical_lines:
for x1,y1,x2,y2 in line:
# here it's possible to add a selection of only vertical lines
if np.abs(y1-y2)> 0.1 * np.abs(x1-x2):
cv2.line(line_image,(x1,y1),(x2,y2),255,5)
# Horizontal lines
horizontal_lines = cv2.HoughLinesP(blur_thresh_y, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
if horizontal_lines is not None:
for line in horizontal_lines:
for x1,y1,x2,y2 in line:
# here it's possible to add a selection of only horizontal lines
if np.abs(x1-x2)> 0.1 * np.abs(y1-y2):
cv2.line(line_image,(x1,y1),(x2,y2),255,5)
After I remove the lines from the threshold
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# remove lines
clean_thresh = cv2.subtract(thresh, line_image)
Then I search the phrases
# search the phrases
dilatation_type = cv2.MORPH_RECT
horizontal_dilatation = 20 #This is the gap. 20 for the first image, 10 for the second image
vertical_dilatation = 1
element = cv2.getStructuringElement(dilatation_type, (2*horizontal_dilatation + 1, 2*vertical_dilatation+1), (horizontal_dilatation, vertical_dilatation))
dilatation_thresh = cv2.dilate(clean_thresh, element)
# Fill
filled_tresh = dilatation_thresh.copy()
contours, hierarchy = cv2.findContours(dilatation_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
cv2.drawContours(filled_tresh, [cnt], -1, 255, cv2.FILLED)
Now I detect the bounding boxes
# Draw bounding boxes
bounding_box1 = filled_tresh.copy()
contours, hierarchy = cv2.findContours(bounding_box1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(bounding_box1,(x,y),(x+w,y+h),255,cv2.FILLED)
# REPEAT Draw bounding boxes and Find the mean text width
mean_bb_width = 0 # mean bounding box width
bounding_box2 = bounding_box1.copy()
contours, hierarchy = cv2.findContours(bounding_box2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
num_cnt=0
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(bounding_box2,(x,y),(x+w,y+h),255,cv2.FILLED)
mean_bb_width = mean_bb_width+w
num_cnt=num_cnt+1
mean_bb_width=mean_bb_width/num_cnt
Now I separate the titles from the text
# define title what has width bigger than 1.5* mean_width
min_title_width = 1.5 * mean_bb_width
raw_title = np.copy(gray) * 0
raw_text = np.copy(gray) * 0
# separate titles from phrases
contours, hierarchy = cv2.findContours(bounding_box2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
if w >=min_title_width :
cv2.drawContours(raw_title, [cnt], -1, 255, cv2.FILLED)
else :
cv2.drawContours(raw_text, [cnt], -1, 255, cv2.FILLED)
and then the final processing
image_out = image.copy()
# Closing parameters
horizontal_closing = 1
vertical_closing = 20
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(horizontal_closing,vertical_closing))
# Processing titles
# Closing
closing_title = cv2.morphologyEx(raw_title, cv2.MORPH_CLOSE, kernel)
# Find contours
contours, hierarchy = cv2.findContours(closing_title, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Draw bounding boxes
bounding_title = closing_title.copy()
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image_out,(x,y),(x+w,y+h),(255,0,0),2)
# Processing text
# Closing
closing_text = cv2.morphologyEx(raw_text, cv2.MORPH_CLOSE, kernel)
# Find contours
contours, hierarchy = cv2.findContours(closing_text , cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Draw bounding boxes
bounding_text = closing_text.copy()
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image_out,(x,y),(x+w,y+h),(0,255,0),2)
The result is
Changing the parameter horizontal_dilatation from 20 to 10, I obtain for the second image (where I remove the red border that you added) the following result

How to obtain and store centroid coordinates of rooms of a floor plan image?

I have a floor plan image which consists of multiple rooms. Using Python, I want to find the centers of each room and store the coordinates in the form of (x,y) so that I can use them further for mathematical calculations. The existing drawContours and FindContours functions help in determining the contours, but how can I store the values obtained into a list.
The image represents a sample floor plan with multiple rooms.
I tried using moments but the function doesn't work properly.
As you may see this image is obtained from drawContours function. But then how do I store the x and y coordinates.
Here's my code :
k= []
# Going through every contours found in the image.
for cnt in contours :
approx = cv2.approxPolyDP(cnt, 0.009 * cv2.arcLength(cnt, True), True)
# draws boundary of contours.
cv2.drawContours(img, [approx], -1, (0, 0,255), 3)
# Used to flatted the array containing
# the co-ordinates of the vertices.
n = approx.ravel()
i = 0
x=[]
y=[]
for j in n :
if(i % 2 == 0):
x = n[i]
y = n[i + 1]
# String containing the co-ordinates.
string = str(x) + " ," + str(y)
if(i == 0):
# text on topmost co-ordinate.
cv2.putText(img, string, (x, y),
font, 0.5, (255, 0, 0))
k.append(str((x, y)))
else:
# text on remaining co-ordinates.
cv2.putText(img, string, (x, y),
font, 0.5, (0, 255, 0))
k.append(str((x, y)))
i = i + 1
# Showing the final image.
cv2_imshow( img )
# Exiting the window if 'q' is pressed on the keyboard.
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows()
Here's a simple approach:
Obtain binary image. Load image, grayscale, and Otsu's threshold.
Remove text. We find contours then filter using contour area to remove contours smaller than some threshold value. We effectively remove these contours by filling them in with cv2.drawContours.
Find rectangular boxes and obtain centroid coordinates. We find contours again then filter using contour area and contour approximation. We then find moments for each contour which gives us the centroid.
Here's a visualization:
Remove text
Result
Coordinates
[(93, 241), (621, 202), (368, 202), (571, 80), (317, 79), (93, 118)]
Code
import cv2
import numpy as np
# Load image, grayscale, Otsu's threshold
image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Remove text
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
if area < 1000:
cv2.drawContours(thresh, [c], -1, 0, -1)
thresh = 255 - thresh
result = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)
coordinates = []
# Find rectangular boxes and obtain centroid coordinates
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.05 * peri, True)
if len(approx) == 4 and area < 100000:
# cv2.drawContours(result, [c], -1, (36,255,12), 1)
M = cv2.moments(c)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
coordinates.append((cx, cy))
cv2.circle(result, (cx, cy), 3, (36,255,12), -1)
cv2.putText(result, '({}, {})'.format(int(cx), int(cy)), (int(cx) -40, int(cy) -10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (36,255,12), 2)
print(coordinates)
cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.imshow('result', result)
cv2.waitKey()

Digit recognizion of non light/color digits using Python/OpenCV

I am using OpenCV 4 and latest Python version. Got all the packages aswell.
I know how to resize, grayscale, blur, edge etc, but I get so many errors, seems like I cant get any detection.
Here is the Multimeter display image which is cropped which I am trying to detect and extract the digits from.
I need a simple code or help to code and to retrieve the digits, tried multiple guides without success. OpenCV is new to me, but took me 2-3 days to learn the basic image processing capabilities.
Digital multimeter non color
Ok, Now I have updated the code. I want to extract the digits from the warped and transformed binary image. But I get either errors or I need to delete parts of the code in order to at least show me the two images.
Here is the code so far:
# import the necessary packages
from imutils.perspective import four_point_transform
from imutils import contours
import imutils
import cv2
# define the dictionary of digit segments so we can identify
# each digit on the thermostat
DIGITS_LOOKUP = {
(1, 1, 1, 0, 1, 1, 1): 0,
(0, 0, 1, 0, 0, 1, 0): 1,
(1, 0, 1, 1, 1, 1, 0): 2,
(1, 0, 1, 1, 0, 1, 1): 3,
(0, 1, 1, 1, 0, 1, 0): 4,
(1, 1, 0, 1, 0, 1, 1): 5,
(1, 1, 0, 1, 1, 1, 1): 6,
(1, 0, 1, 0, 0, 1, 0): 7,
(1, 1, 1, 1, 1, 1, 1): 8,
(1, 1, 1, 1, 0, 1, 1): 9}
# load the example image
image = cv2.imread("Multimeter_1.jpg")
# pre-process the image by converting it to
# graycale, blurring it, and computing an edge map
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV,87,9)
# find contours in the edge map,
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
rect = None
# loop over the contours
for c in cnts:
# approximate the contour
x,y,w,h = cv2.boundingRect(c)
#cv2.rectangle(image, (x, y), (x+w, y+h), (36, 255, 12), 1)
cv2.drawContours(image, [c], -1, (36,255,12),3)
rect = c
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
rect = approx
break
# extract the display, apply a perspective transform
warped = four_point_transform(thresh, rect.reshape(4, 2))
output = four_point_transform(image, rect.reshape(4, 2))
# Warp the image and perform morphology to clean it
thresh = cv2.threshold(warped, 0, 255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)
[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
# pre-process the image by converting it to
# graycale, blurring it, and computing an edge map
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV,87,9)
# find contours in the edge map,
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
rect = None
# loop over the contours
for c in cnts:
# approximate the contour
x,y,w,h = cv2.boundingRect(c)
#cv2.rectangle(image, (x, y), (x+w, y+h), (36, 255, 12), 1)
cv2.drawContours(image, [c], -1, (36,255,12),3)
rect = c
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
rect = approx
break
# extract the display, apply a perspective transform
warped = four_point_transform(thresh, rect.reshape(4, 2))
output = four_point_transform(image, rect.reshape(4, 2))
# Warp the image and perform morphology to clean it
thresh = cv2.threshold(warped, 0, 255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)
[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
cnts =
cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
digitCnts = []
# loop over the digit area candidates
for c in cnts:
(x,y,w,h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if w >= 25 and (h >= 50 and h <= 60):
digitCnts.append(c)
cv2.imshow("Multimeter", image)
cv2.imshow("Multimeter2", thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
Warped,Transformed, tresholded
I struggle to write a correct code for that, still in learning process...
I took that morphed image and made another python script to simplify things. Now lets say the image is already binary, transformed and warped. How can I detect the digits and display the information on the image or on console after extraction? I have not learned to do that yet. I struggle to find the coordinates of the digits, I just found the rectangle boundry of the image it self but not what I am looking for.
In other words: I need help to find the boundry boxes first of the digits, detect and extract them.
Image example
Here is simplified sample code so far:
# import the necessary packages
from imutils.perspective import four_point_transform
from imutils import contours
import imutils
import cv2
import numpy
image = cv2.imread("Morph.jpg")
copy = image.copy()
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cnts = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
ROI_number = 0
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
ROI = image[y:y+h, x:x+w]
cv2.imwrite('ROI_{}.png'.format(ROI_number), ROI)
cv2.rectangle(copy,(x,y),(x+w,y+h),(36,255,12),2)
ROI_number += 1
cv2.imshow('thresh', image)
cv2.imshow('copy', copy)
cv2.imwrite("Morphed_rectangle.jpg",copy)
cv2.waitKey(0)
cv2.destroyAllWindows()
You can use cv2.adaptiveThreshold() to get a binary image then extract the display by sorting for the largest contour. Once you have detected this contour, you can then apply your perspective transformation
import cv2
image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,87,9)
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
rect = None
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.drawContours(image, [c], -1, (36,255,12), 3)
# cv2.rectangle(image, (x, y), (x+w, y+h), (36, 255, 12), 1)
rect = c
break
cv2.imshow('image', image)
cv2.waitKey()

Categories

Resources