Find dominant color on contour opencv - python

I am trying to find the dominant color inside a contour (black or white).
I am using OpenCV to read an image and extract white on black images. This is what I got so far:
The green outline is the contour, the blue lines the bounding box. So I this instance I am trying to extract the numbers 87575220 but as you can see it also recognizes some random artifacts and for instance the letter G. I think the solution would be to find the dominant colour inside of the contours and that colour should be close to white. I don't have any idea how to do this though.
This the code I have at the moment:
import argparse
import cv2
import imutils
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("--image", "-i", required=True, help="Image to detect blobs from")
args = vars(parser.parse_args())
image = cv2.imread(args["image"])
image = imutils.resize(image, width=1200)
grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(grey)
maxval_10 = maxVal * 0.5
ret, threshold = cv2.threshold(grey, maxval_10, 255, cv2.THRESH_BINARY)
canny = cv2.Canny(grey, 200, 250)
lines = cv2.HoughLines(canny, 1, np.pi / 180, 140)
print(maxVal)
theta_min = 60 * np.pi / 180.
theta_max = 120 * np.pi / 180.0
theta_avr = 0
theta_deg = 0
filteredLines = []
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
if theta_min <= theta <= theta_max:
filteredLines.append(theta)
theta_avr += theta
if len(filteredLines) > 0:
theta_avr /= len(filteredLines)
theta_deg = (theta_avr / np.pi * 180) - 90
else:
print("Failed to detect skew")
image = imutils.rotate(image, theta_deg)
canny = imutils.rotate(canny, theta_deg)
im2, contours, hierarchy = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# cv2.drawContours(image, contours, -1, (0, 255, 0), 1)
cv2.imshow('Contours', im2)
boundingBoxes = []
filteredContours = []
for cnt in contours:
(x, y, w, h) = cv2.boundingRect(cnt)
if (h > 20 and h < 90 and w > 5 and w < h):
if cv2.contourArea(cnt, True) <= 0:
boundingBoxes.append((x, y, w, h))
filteredContours.append(cnt)
for x, y, w, h in boundingBoxes:
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.drawContours(image, filteredContours, -1, (0, 255, 0), 1)
cv2.imshow('Image', image)
cv2.imshow('Edges', canny)
cv2.imshow('Threshold', threshold)
cv2.waitKey(0)
cv2.destroyAllWindows()
This is the original picture:

I would try to make a ROI before I start searching for numbers. You have not give the original image so this example is made with the image you posted (with boxes and contours allready drawn). Should aslo work with the original though. Steps are written in the example code. Hope it helps. Cheers!
Example code:
import cv2
import numpy as np
# Read the image and make a copy then create a blank mask
img = cv2.imread('dominant.jpg')
img2 = img.copy()
h,w = img.shape[:2]
mask = np.zeros((h,w), np.uint8)
# Transform to gray colorspace and perform histogram equalization
gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
equ = cv2.equalizeHist(gray)
# Transform all pixels above thershold to white
black = np.where(equ>10)
img2[black[0], black[1], :] = [255, 255, 255]
# Transform to gray colorspace and make a thershold then dilate the thershold
gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
kernel = np.ones((15,15),np.uint8)
dilation = cv2.dilate(thresh,kernel,iterations = 1)
# Search for contours and select the biggest one and draw it on mask
_, contours, hierarchy = cv2.findContours(dilation,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
cv2.drawContours(mask, [cnt], 0, 255, -1)
# Perform a bitwise operation
res = cv2.bitwise_and(img, img, mask=mask)
# Display the ROI
cv2.imshow('img', res)
Result:

You could create a mask out of each contour:
mask = np.zeros(image.shape, dtype="uint8")
cv2.drawContours(mask, [cnt], -1, 255, -1)
and then calculate the mean value of all pixels inside of the mask:
mean = cv2.mean(image, mask=mask)
and then check whether mean is close enough to white

Colors and mean do not match well due to color space properties. I would create an histogram and select the most frequent one (some color down sampling could be applied too)

Related

Correct the object orientation in the image. Calculate the correct angle of rotation and correct the alignment of the object in the image

I have cropped images of electronic meter reading. Those readings are taken in random style. I need the orientation of the object(not the image) in the image to be aligned.
The detection of contours is not working. As there are lots of contours are formed in the image and in order to calculate the angle I need to select the right contour. Some times contour is not formed.
2.I want set of rotated images as shown in figure above. I tried some code of rotating image from the OpenCV. But due to two type of use case ( as we don't know from code that the reading style may be any of the two) The images are turned out as below.
Using the code below I am able to find the angle of rotation but for any one case. I need it to be done automatically for both type of cases. Also see the data set I have attached for other type of examples.
import cv2
import numpy as np
debug = True
# Display image
def display(img, frameName="OpenCV Image"):
if not debug:
return
h, w = img.shape[0:2]
neww = 800
newh = int(neww*(h/w))
img = cv2.resize(img, (neww, newh))
plt.imshow(img)
plt.show()
# cv2.imshow(frameName, img)
# cv2.waitKey(0)
#rotate the image with given theta value
def rotate(img, theta):
rows, cols = img.shape[0], img.shape[1]
image_center = (cols/2, rows/2)
M = cv2.getRotationMatrix2D(image_center,theta,1)
abs_cos = abs(M[0,0])
abs_sin = abs(M[0,1])
bound_w = int(rows * abs_sin + cols * abs_cos)
bound_h = int(rows * abs_cos + cols * abs_sin)
M[0, 2] += bound_w/2 - image_center[0]
M[1, 2] += bound_h/2 - image_center[1]
# rotate orignal image to show transformation
rotated = cv2.warpAffine(img,M,(bound_w,bound_h),borderValue=(255,255,255))
return rotated
def slope(x1, y1, x2, y2):
if x1 == x2:
return 0
slope = (y2-y1)/(x2-x1)
theta = np.rad2deg(np.arctan(slope))
return theta
def main(filePath):
img = cv2.imread(filePath)
(hi, wi) = img.shape[:2]
textImg = img.copy()
small = cv2.cvtColor(textImg, cv2.COLOR_BGR2GRAY)
# find the gradient map
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
grad = cv2.morphologyEx(small, cv2.MORPH_GRADIENT, kernel)
display(grad)
# Binarize the gradient image
_, bw = cv2.threshold(grad, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
display(bw)
# connect horizontally oriented regions
# kernal value (9,1) can be changed to improved the text detection
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 1))
connected = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
display(connected)
# using RETR_EXTERNAL instead of RETR_CCOMP
# _ , contours, hierarchy = cv2.findContours(connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours, hierarchy = cv2.findContours(connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) #opencv >= 4.0
mask = np.zeros(bw.shape, dtype=np.uint8)
display(mask)
# cumulative theta value
cummTheta = 0
# number of detected text regions
ct = 0
flag=False
for idx in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[idx])
mask[y:y+h, x:x+w] = 0
# fill the contour
cv2.drawContours(mask, contours, idx, (255, 255, 255), -1)
display(mask)
# ratio of non-zero pixels in the filled region
r = float(cv2.countNonZero(mask[y:y+h, x:x+w])) / (w * h)
# assume at least 45% of the area is filled if it contains text
# if r > 0.39 and w > 8 and h > 8:
if (h/hi)>0.4 and (w/wi)>0.4:
flag=True
print(r,w,h)
# cv2.rectangle(textImg, (x1, y), (x+w-1, y+h-1), (0, 255, 0), 2)
rect = cv2.minAreaRect(contours[idx])
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(textImg,[box],0,(0,0,255),2)
center = (int(rect[0][0]),int(rect[0][1]))
width = int(rect[1][0])
height = int(rect[1][1])
angle = int(rect[2])
print(angle)
print(width,height)
if width < height:
angle = 90+angle
print(angle,'final')
# we can filter theta as outlier based on other theta values
# this will help in excluding the rare text region with different orientation from ususla value
theta = slope(box[0][0], box[0][1], box[1][0], box[1][1])
cummTheta += theta
ct +=1
# print("Theta", theta)
# find the average of all cumulative theta value
# orientation = cummTheta/ct
print("Image orientation in degress: ", angle)
finalImage = rotate(img, angle)
display(textImg, "Detectd Text minimum bounding box")
display(finalImage)
out_path='cropped_corrected/rotated/'+filePath.split('\\')[-1]
print(out_path)
cv2.imwrite(out_path,finalImage)
print('image svaed here in rotated')
break
if not flag:
out_path='cropped_corrected/not_rotated/'+filePath.split('\\')[-1]
print(out_path)
cv2.imwrite(out_path,img)
print('image svaed here without rotated')
if __name__ == "__main__":
filePath = 'cropped/N3963001963.jpg'
main(filePath)
I am attaching some sample images that need to be rotated and the object inside the image needs to be aligned:

Reliable program that can detect QR Codes without cv2.QRCodeDetector() or pyzbar library

I am having trouble finding a set of morphological operations that allow me to detect (only) the QR codes in various images using cv2.connectedComponentsWithStats() or cv2.findContours() (but I would prefer to solve this with cv2.connectedComponentsWithStats()).
The images I absolutely need the code to work on are the following:
I have been messing with 2 different codes, one using cv2.connectedComponentsWithStats() and the other cv2.findContours() and some other methods (based off nathancy's answer to Detect a QR code from an image and crop using OpenCV). To test I've been using the following codes:
Using cv2.connectedComponentsWithStats(), the problem with this code is that it captures more than the QR code in the 2nd as you can see bellow. In the 1st it works great and in the 3rd as well if scaled to 0.5, or else it also detects more than the QR code like the 2nd image.
import cv2
import numpy as np
#img = cv2.imread('Code-1.jpg'); scale = 1;
img = cv2.imread('Code-2.jpg'); scale = 1;
#img = cv2.imread('Code-3.jpg'); scale = 0.5;
width = int(img.shape[1] * scale); height = int(img.shape[0] * scale); img = cv2.resize(img, (width, height))
og = img.copy()
gray = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gaussianblur = cv2.GaussianBlur(gray, (7,7), 0)
otsuthresh = cv2.threshold(gaussianblur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
edges = cv2.Canny(otsuthresh, threshold1=100, threshold2=200)
dilate = cv2.dilate(edges,(5,5),iterations=1)
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(dilate, 8, cv2.CV_32S)
for i in range(1,num_labels):
objint = (labels == i).astype(np.uint8)*255/i
x = stats[i, cv2.CC_STAT_LEFT]
y = stats[i, cv2.CC_STAT_TOP]
w = stats[i, cv2.CC_STAT_WIDTH]
h = stats[i, cv2.CC_STAT_HEIGHT]
area = stats[i, cv2.CC_STAT_AREA]
ratio = w / float(h)
(cX, cY) = centroids[i]
if area > 500 and (ratio > .95 and ratio < 1.05) and (w < 0.99*img.shape[1]):
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
ROI = og[y:y + h, x:x + w]
cv2.imwrite('ROI.png', ROI)
cv2.imshow('image', img)
cv2.imshow('QR code', ROI)
Using cv2.findContours(), this one can't detect any of the QR codes in the images in which the code must not fail, but can detect in some other random images
import cv2
import numpy as np
#img = cv2.imread('Code-1.jpg'); scale = 1;
img = cv2.imread('Code-2.jpg'); scale = 1;
#img = cv2.imread('Code-3.jpg'); scale = 0.5;
width = int(img.shape[1] * scale); height = int(img.shape[0] * scale); img = cv2.resize(img, (width, height))
og = img.copy()
gray = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gaussianblur = cv2.GaussianBlur(gray, (7,7), 0)
otsuthresh = cv2.threshold(gaussianblur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
closed = cv2.morphologyEx(otsuthresh, cv2.MORPH_CLOSE, kernel, iterations=3)
contours = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) == 2:
contours = contours[0]
else:
contours = contours[1]
for cnt in contours:
perim = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.05 * perim, True)
x,y,w,h = cv2.boundingRect(approx)
area = cv2.contourArea(cnt)
ratio = w / float(h)
if len(approx) == 4 and area > 1000 and (ratio > .80 and ratio < 1.2):
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 4)
ROI = og[y:y + h, x:x + w]
cv2.imwrite('ROI.png', ROI)
cv2.imshow('image', img)
cv2.imshow('QR code', ROI)
Thank you for reading and if I wasn't clear on something please let me know.
Filipe Almeida
Maybe, you could try QReader. It is just a wrapper of OpenCV, Pyzbar and other QR detection and image filtering methods, but it works quite out-of-the-box for those cases.
from qreader import QReader
from matplotlib import pyplot as plt
import cv2
if __name__ == '__main__':
# Initialize QReader
detector = QReader()
for img_path in ('0oOAF.jpg', 'HXlS8.jpg', '5fFTo.jpg'):
# Read the image
img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
# Detect the QR bbox
found, bbox = detector.detect(image=img)
if found:
# Draw the bbox
x1, y1, x2, y2 = bbox
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(0, 255, 0), thickness=2)
# Save the image
plt.imshow(img)
plt.savefig(f"{img_path}-bbox.png")
That's the output it gives:

How to blur a face in opencv with round borders - Python?

I blurred the face in OpenCV like this:
I used this code:
face = cv2.medianBlur(face, 100)
img[top:bottom,left:right] = face
But I want to make the face border round like this (does not need to be perfect)
First, create a mask image. To do so, draw a white circle at the face location on a black image.
Second, blur the whole image.
Third, copy blurred content to the original image only where your mask is > 0.
p1 = (65, 65)
w, h = 100, 100
p2 = (p1[0] + w, p1[1] + h)
circle_center = ((p1[0] + p2[0])// 2, (p1[1] + p2[1]) // 2)
circle_radius = int(math.sqrt(w * w + h * h) // 2)
mask_img = np.zeros(img.shape, dtype='uint8')
cv2.circle(mask_img, circle_center, circle_radius, (255, 255, 255), -1)
img_all_blurred = cv2.medianBlur(img, 99)
img_face_blurred = np.where(mask_img > 0, img_all_blurred, img)
Output:
import cv2
import matplotlib.pyplot as plt
import numpy as np
img = cv2.imread('image.jpg')
h, w, c = img.shape
plt.imshow(img)
plt.show()
c_mask = np.zeros((h,w), np.uint8)
cv2.circle(c_mask,(w//2,h//2),100,1,thickness=-1)
mask = cv2.bitwise_and(img, img, mask=c_mask)
plt.imshow(mask)
plt.show()
img_mask = img - mask
plt.imshow(img_mask)
plt.show()
blur = cv2.blur(img,(17, 17))
plt.imshow(blur)
plt.show()
mask2 = cv2.bitwise_and(blur, blur, mask=c_mask) # mask
plt.imshow(mask2)
plt.show()
final_img = img_mask + mask2
print(np.max(final_img))
plt.imshow(final_img)
plt.show()
You can blur whole image then copy the result to source using any mask you like.

How to find rotate and crop a section of text in openCV, python

I'm in a struggle with a project that takes an image of a pretty clear font from say a label for example reads the "text region" and outputs it as a string using OCR tesseract for instance.
Now I've made quite some progress with the thing as I added varios global filters to get to a quite clear result but I'm struggling with finding method of filtering just the text out of there and then you have to think about rotating it to be as horizontal as possible and then after that the easy part should be to crop it.
May I have any leads to how to do that not using traning data and over complicating the system sins I only use a rasdpberry pi to do the computing?
Thanks for helping here's what I've came up with so far:
Original Image(Captured from PiCamera):
Adaptive thresh after shadow removal:
[
Glocad tresh after shadow removal:
Here's the code:
# import the necessary packages
from PIL import Image
import pytesseract
import argparse
import cv2
import os
import picamera
import time
import numpy as np
#preprocess = "tresh"
#Remaining textcorping and rotating:
import math
import json
from collections import defaultdict
from scipy.ndimage.filters import rank_filter
def dilate(ary, N, iterations):
"""Dilate using an NxN '+' sign shape. ary is np.uint8."""
kernel = np.zeros((N,N), dtype=np.uint8)
kernel[(N-1)/2,:] = 1
dilated_image = cv2.dilate(ary / 255, kernel, iterations=iterations)
kernel = np.zeros((N,N), dtype=np.uint8)
kernel[:,(N-1)/2] = 1
dilated_image = cv2.dilate(dilated_image, kernel, iterations=iterations)
return dilated_image
def props_for_contours(contours, ary):
"""Calculate bounding box & the number of set pixels for each contour."""
c_info = []
for c in contours:
x,y,w,h = cv2.boundingRect(c)
c_im = np.zeros(ary.shape)
cv2.drawContours(c_im, [c], 0, 255, -1)
c_info.append({
'x1': x,
'y1': y,
'x2': x + w - 1,
'y2': y + h - 1,
'sum': np.sum(ary * (c_im > 0))/255
})
return c_info
def union_crops(crop1, crop2):
"""Union two (x1, y1, x2, y2) rects."""
x11, y11, x21, y21 = crop1
x12, y12, x22, y22 = crop2
return min(x11, x12), min(y11, y12), max(x21, x22), max(y21, y22)
def intersect_crops(crop1, crop2):
x11, y11, x21, y21 = crop1
x12, y12, x22, y22 = crop2
return max(x11, x12), max(y11, y12), min(x21, x22), min(y21, y22)
def crop_area(crop):
x1, y1, x2, y2 = crop
return max(0, x2 - x1) * max(0, y2 - y1)
def find_border_components(contours, ary):
borders = []
area = ary.shape[0] * ary.shape[1]
for i, c in enumerate(contours):
x,y,w,h = cv2.boundingRect(c)
if w * h > 0.5 * area:
borders.append((i, x, y, x + w - 1, y + h - 1))
return borders
def angle_from_right(deg):
return min(deg % 90, 90 - (deg % 90))
def remove_border(contour, ary):
"""Remove everything outside a border contour."""
# Use a rotated rectangle (should be a good approximation of a border).
# If it's far from a right angle, it's probably two sides of a border and
# we should use the bounding box instead.
c_im = np.zeros(ary.shape)
r = cv2.minAreaRect(contour)
degs = r[2]
if angle_from_right(degs) <= 10.0:
box = cv2.cv.BoxPoints(r)
box = np.int0(box)
cv2.drawContours(c_im, [box], 0, 255, -1)
cv2.drawContours(c_im, [box], 0, 0, 4)
else:
x1, y1, x2, y2 = cv2.boundingRect(contour)
cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)
return np.minimum(c_im, ary)
def find_components(edges, max_components=16):
"""Dilate the image until there are just a few connected components.
Returns contours for these components."""
# Perform increasingly aggressive dilation until there are just a few
# connected components.
count = 21
dilation = 5
n = 1
while count > 16:
n += 1
dilated_image = dilate(edges, N=3, iterations=n)
contours, hierarchy = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
count = len(contours)
#print dilation
#Image.fromarray(edges).show()
#Image.fromarray(255 * dilated_image).show()
return contours
def find_optimal_components_subset(contours, edges):
"""Find a crop which strikes a good balance of coverage/compactness.
Returns an (x1, y1, x2, y2) tuple.
"""
c_info = props_for_contours(contours, edges)
c_info.sort(key=lambda x: -x['sum'])
total = np.sum(edges) / 255
area = edges.shape[0] * edges.shape[1]
c = c_info[0]
del c_info[0]
this_crop = c['x1'], c['y1'], c['x2'], c['y2']
crop = this_crop
covered_sum = c['sum']
while covered_sum < total:
changed = False
recall = 1.0 * covered_sum / total
prec = 1 - 1.0 * crop_area(crop) / area
f1 = 2 * (prec * recall / (prec + recall))
#print '----'
for i, c in enumerate(c_info):
this_crop = c['x1'], c['y1'], c['x2'], c['y2']
new_crop = union_crops(crop, this_crop)
new_sum = covered_sum + c['sum']
new_recall = 1.0 * new_sum / total
new_prec = 1 - 1.0 * crop_area(new_crop) / area
new_f1 = 2 * new_prec * new_recall / (new_prec + new_recall)
# Add this crop if it improves f1 score,
# _or_ it adds 25% of the remaining pixels for <15% crop expansion.
# ^^^ very ad-hoc! make this smoother
remaining_frac = c['sum'] / (total - covered_sum)
new_area_frac = 1.0 * crop_area(new_crop) / crop_area(crop) - 1
if new_f1 > f1 or (
remaining_frac > 0.25 and new_area_frac < 0.15):
print '%d %s -> %s / %s (%s), %s -> %s / %s (%s), %s -> %s' % (
i, covered_sum, new_sum, total, remaining_frac,
crop_area(crop), crop_area(new_crop), area, new_area_frac,
f1, new_f1)
crop = new_crop
covered_sum = new_sum
del c_info[i]
changed = True
break
if not changed:
break
return crop
def pad_crop(crop, contours, edges, border_contour, pad_px=15):
"""Slightly expand the crop to get full contours.
This will expand to include any contours it currently intersects, but will
not expand past a border.
"""
bx1, by1, bx2, by2 = 0, 0, edges.shape[0], edges.shape[1]
if border_contour is not None and len(border_contour) > 0:
c = props_for_contours([border_contour], edges)[0]
bx1, by1, bx2, by2 = c['x1'] + 5, c['y1'] + 5, c['x2'] - 5, c['y2'] - 5
def crop_in_border(crop):
x1, y1, x2, y2 = crop
x1 = max(x1 - pad_px, bx1)
y1 = max(y1 - pad_px, by1)
x2 = min(x2 + pad_px, bx2)
y2 = min(y2 + pad_px, by2)
return crop
crop = crop_in_border(crop)
c_info = props_for_contours(contours, edges)
changed = False
for c in c_info:
this_crop = c['x1'], c['y1'], c['x2'], c['y2']
this_area = crop_area(this_crop)
int_area = crop_area(intersect_crops(crop, this_crop))
new_crop = crop_in_border(union_crops(crop, this_crop))
if 0 < int_area < this_area and crop != new_crop:
print '%s -> %s' % (str(crop), str(new_crop))
changed = True
crop = new_crop
if changed:
return pad_crop(crop, contours, edges, border_contour, pad_px)
else:
return crop
def downscale_image(im, max_dim=2048):
"""Shrink im until its longest dimension is <= max_dim.
Returns new_image, scale (where scale <= 1).
"""
a, b = im.size
if max(a, b) <= max_dim:
return 1.0, im
scale = 1.0 * max_dim / max(a, b)
new_im = im.resize((int(a * scale), int(b * scale)), Image.ANTIALIAS)
return scale, new_im
def process_image(inputImg):
opnImg = Image.open(inputImg)
scale, im = downscale_image(opnImg)
edges = cv2.Canny(np.asarray(im), 100, 200)
# TODO: dilate image _before_ finding a border. This is crazy sensitive!
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
borders = find_border_components(contours, edges)
borders.sort(key=lambda (i, x1, y1, x2, y2): (x2 - x1) * (y2 - y1))
border_contour = None
if len(borders):
border_contour = contours[borders[0][0]]
edges = remove_border(border_contour, edges)
edges = 255 * (edges > 0).astype(np.uint8)
# Remove ~1px borders using a rank filter.
maxed_rows = rank_filter(edges, -4, size=(1, 20))
maxed_cols = rank_filter(edges, -4, size=(20, 1))
debordered = np.minimum(np.minimum(edges, maxed_rows), maxed_cols)
edges = debordered
contours = find_components(edges)
if len(contours) == 0:
print '%s -> (no text!)' % path
return
crop = find_optimal_components_subset(contours, edges)
crop = pad_crop(crop, contours, edges, border_contour)
crop = [int(x / scale) for x in crop] # upscale to the original image size.
#draw = ImageDraw.Draw(im)
#c_info = props_for_contours(contours, edges)
#for c in c_info:
# this_crop = c['x1'], c['y1'], c['x2'], c['y2']
# draw.rectangle(this_crop, outline='blue')
#draw.rectangle(crop, outline='red')
#im.save(out_path)
#draw.text((50, 50), path, fill='red')
#orig_im.save(out_path)
#im.show()
text_im = opnImg.crop(crop)
text_im.save('Cropted_and_rotated_image.jpg')
return text_im
'''
text_im.save(out_path)
print '%s -> %s' % (path, out_path)
'''
#Camera capturing stuff:
myCamera = picamera.PiCamera()
myCamera.vflip = True
myCamera.hflip = True
'''
myCamera.start_preview()
time.sleep(6)
myCamera.stop_preview()
'''
myCamera.capture("Captured_Image.png")
#End capturing persidure
imgAddr = '/home/pi/My_examples/Mechanical_display_converter/Example1.jpg'
#imgAddr = "Captured_Image.png"
# construct the argument parse and parse the arguments
#ap = argparse.ArgumentParser()
'''
ap.add_argument("-i", "--image", required=True,
help="path to input image to be OCR'd")
ap.add_argument("-p", "--preprocess", type=str, default="thresh",
help="type of preprocessing to be done")
args = vars(ap.parse_args())
'''
# load the example image and convert it to grayscale
img = cv2.imread(imgAddr)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('Step1_gray_filter', gray)
'''
# check to see if we should apply thresholding to preprocess the
# image
if args["preprocess"] == "thresh":
gray = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# make a check to see if median blurring should be done to remove
# noise
elif args["preprocess"] == "blur":
gray = cv2.medianBlur(gray, 3)
if preprocess == "thresh":
gray = cv2.threshold(gray, 150, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# make a check to see if median blurring should be done to remove
# noise
elif preprocess == "blur":
gray = cv2.medianBlur(gray, 3)
'''
rgb_planes = cv2.split(img)
result_planes = []
result_norm_planes = []
for plane in rgb_planes:
dilated_img = cv2.dilate(plane, np.ones((7,7), np.uint8))
bg_img = cv2.medianBlur(dilated_img, 21)
diff_img = 255 - cv2.absdiff(plane, bg_img)
norm_img = cv2.normalize(diff_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
result_planes.append(diff_img)
result_norm_planes.append(norm_img)
result = cv2.merge(result_planes)
result_norm = cv2.merge(result_norm_planes)
cv2.imshow('shadows_out.png', result)
cv2.imshow('shadows_out_norm.png', result_norm)
grayUnShadowedImg = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
cv2.imshow('Shadow_Gray_CVT', grayUnShadowedImg)
ret, threshUnShadowedImg = cv2.threshold(grayUnShadowedImg, 200, 255, cv2.THRESH_BINARY)
cv2.imshow('unShadowed_Thresh_filtering', threshUnShadowedImg)
#v2.imwrite('unShadowed_Thresh_filtering.jpg', threshUnShadowedImg)
#croptedunShadowedImg = process_image('unShadowed_Thresh_filtering.jpg')
adptThreshUnShadowedImg = cv2.adaptiveThreshold(grayUnShadowedImg, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)
cv2.imshow('unShadowed_Adaptive_Thresh_filtering', adptThreshUnShadowedImg)
'''
blurFImg = cv2.GaussianBlur(adptThreshUnShadowedImg,(25,25), 0)
ret, f3Img = cv2.threshold(blurFImg,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow('f3Img', f3Img )
'''
#OCR Stage:
'''
# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, threshImg)
# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
text = pytesseract.image_to_string(Image.open(filename))
os.remove(filename)
print("\n" + text)
'''
cv2.waitKey(0)
cv2.destroyAllWindows()
Tryed this source out as well but this doesn't seem to work and is not that clear to understand:
https://www.danvk.org/2015/01/07/finding-blocks-of-text-in-an-image-using-python-opencv-and-numpy.html
I have made an example to maybe give you an idea on how to proceede. I made it without your transformations of the image but you could do it with them if you would like.
What I did was to first transform the image to binary with cv2.THRESH_BINARY. Next I made a mask and drew the contours by limiting them with size (cv2.contourArea()) and ratio (got it from cv2.boundingRect()) for threshold. Then I conected all the contours that are near each other using cv2.morphologyEx() and a big kernel size (50x50).
Then I selected the biggest contour (text) and drew a rotated rectangle with cv2.minAreaRect() which got me the rotational angle.
Then I could rotate the image using cv2.getRotationMatrix2D() and cv2.warpAffine() and get a slightly bigger bounding box using the highest X, Y and lowest X,Y values of the rotated rectangle which I used to crop the image.
Then I serched again for contours and removed the noise (little contours) from the image and the result is a text with high contrast.
Final result:
This code is meant only to give an idea or another point of view to the problem and it may not work with other images (if they differ from the original too much) or at least you would have to adjust some parameters of code. Hope it helps. Cheers!
Code:
import cv2
import numpy as np
# Read image and search for contours.
img = cv2.imread('rotatec.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, threshold = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
# Create first mask used for rotation.
mask = np.ones(img.shape, np.uint8)*255
# Draw contours on the mask with size and ratio of borders for threshold.
for cnt in contours:
size = cv2.contourArea(cnt)
x,y,w,h = cv2.boundingRect(cnt)
if 10000 > size > 500 and w*2.5 > h:
cv2.drawContours(mask, [cnt], -1, (0,0,0), -1)
# Connect neighbour contours and select the biggest one (text).
kernel = np.ones((50,50),np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
gray_op = cv2.cvtColor(opening, cv2.COLOR_BGR2GRAY)
_, threshold_op = cv2.threshold(gray_op, 150, 255, cv2.THRESH_BINARY_INV)
contours_op, hierarchy_op = cv2.findContours(threshold_op, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours_op, key=cv2.contourArea)
# Create rotated rectangle to get the angle of rotation and the 4 points of the rectangle.
_, _, angle = rect = cv2.minAreaRect(cnt)
(h,w) = img.shape[:2]
(center) = (w//2,h//2)
# Rotate the image.
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(img, M, (int(w),int(h)), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)
# Create bounding box for rotated text (use old points of rotated rectangle).
box = cv2.boxPoints(rect)
a, b, c, d = box = np.int0(box)
bound =[]
bound.append(a)
bound.append(b)
bound.append(c)
bound.append(d)
bound = np.array(bound)
(x1, y1) = (bound[:,0].min(), bound[:,1].min())
(x2, y2) = (bound[:,0].max(), bound[:,1].max())
cv2.drawContours(img,[box],0,(0,0,255),2)
# Crop the image and create new mask for the final image.
rotated = rotated[y1:y2, x1:x2]
mask_final = np.ones(rotated.shape, np.uint8)*255
# Remove noise from the final image.
gray_r = cv2.cvtColor(rotated, cv2.COLOR_BGR2GRAY)
_, threshold_r = cv2.threshold(gray_r, 150, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(threshold_r,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
size = cv2.contourArea(cnt)
if size < 500:
cv2.drawContours(threshold_r, [cnt], -1, (0,0,0), -1)
# Invert black and white.
final_image = cv2.bitwise_not(threshold_r)
# Display results.
cv2.imshow('final', final_image)
cv2.imshow('rotated', rotated)
EDIT:
For text recognition I recomend you see this post from SO Simple Digit Recognition OCR in OpenCV-Python.
The result with the code from mentioned post:
EDIT:
This is my code implemented with the slightly modified code from the mentioned post. All steps are written in the comments. You should save the script and the training image to the same directory. This is my training image:
Code:
import cv2
import numpy as np
# Read image and search for contours.
img = cv2.imread('rotatec.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, threshold = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
# Create first mask used for rotation.
mask = np.ones(img.shape, np.uint8)*255
# Draw contours on the mask with size and ratio of borders for threshold.
for cnt in contours:
size = cv2.contourArea(cnt)
x,y,w,h = cv2.boundingRect(cnt)
if 10000 > size > 500 and w*2.5 > h:
cv2.drawContours(mask, [cnt], -1, (0,0,0), -1)
# Connect neighbour contours and select the biggest one (text).
kernel = np.ones((50,50),np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
gray_op = cv2.cvtColor(opening, cv2.COLOR_BGR2GRAY)
_, threshold_op = cv2.threshold(gray_op, 150, 255, cv2.THRESH_BINARY_INV)
contours_op, hierarchy_op = cv2.findContours(threshold_op, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours_op, key=cv2.contourArea)
# Create rotated rectangle to get the angle of rotation and the 4 points of the rectangle.
_, _, angle = rect = cv2.minAreaRect(cnt)
(h,w) = img.shape[:2]
(center) = (w//2,h//2)
# Rotate the image.
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(img, M, (int(w),int(h)), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)
# Create bounding box for rotated text (use old points of rotated rectangle).
box = cv2.boxPoints(rect)
a, b, c, d = box = np.int0(box)
bound =[]
bound.append(a)
bound.append(b)
bound.append(c)
bound.append(d)
bound = np.array(bound)
(x1, y1) = (bound[:,0].min(), bound[:,1].min())
(x2, y2) = (bound[:,0].max(), bound[:,1].max())
cv2.drawContours(img,[box],0,(0,0,255),2)
# Crop the image and create new mask for the final image.
rotated = rotated[y1:y2, x1-10:x2]
mask_final = np.ones(rotated.shape, np.uint8)*255
# Remove noise from the final image.
gray_r = cv2.cvtColor(rotated, cv2.COLOR_BGR2GRAY)
_, threshold_r = cv2.threshold(gray_r, 150, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(threshold_r,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
size = cv2.contourArea(cnt)
if size < 500:
cv2.drawContours(threshold_r, [cnt], -1, (0,0,0), -1)
# Invert black and white.
final_image = cv2.bitwise_not(threshold_r)
# Display results.
cv2.imwrite('rotated12.png', final_image)
# Import module for finding path to database.
from pathlib import Path
# This code executes once amd writes two files.
# If file exists it skips this step, else it runs again.
file = Path("generalresponses.data")
if file.is_file() == False:
# Reading the training image
im = cv2.imread('pitrain1.png')
im3 = im.copy()
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
thresh = cv2.adaptiveThreshold(blur,255,1,1,11,2)
# Finding contour
_,contours,hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# Creates array and list for appending data
samples = np.empty((0,100))
responses = []
# Value serving to increment the "automatic" learning
i = 0
# Iterating through contours and appending the array and list with "learned" values
for cnt in contours:
i+=1
[x,y,w,h] = cv2.boundingRect(cnt)
cv2.rectangle(im,(x,y),(x+w,y+h),(0,0,255),2)
roi = thresh[y:y+h,x:x+w] # Croping ROI to bounding rectangle
roismall = cv2.resize(roi,(10,10)) # Resizing ROI to smaller image
cv2.imshow('norm',im)
# Appending values based on the pitrain1.png image
if i < 36:
responses.append(int(45))
elif 35 < i < 80:
responses.append(int(48))
elif 79 < i < 125:
responses.append(int(57))
elif 124 < i < 160:
responses.append(int(56))
elif 159 < i < 205:
responses.append(int(55))
elif 204 < i < 250:
responses.append(int(54))
elif 249 < i < 295:
responses.append(int(53))
elif 294 < i < 340:
responses.append(int(52))
elif 339 < i < 385:
responses.append(int(51))
elif 384 < i < 430:
responses.append(int(50))
elif 429 < i < 485:
responses.append(int(49))
else:
break
sample = roismall.reshape((1,100))
samples = np.append(samples,sample,0)
# Reshaping and saving database
responses = np.array(responses)
responses = responses.reshape((responses.size,1))
print('end')
np.savetxt('generalsamples.data',samples)
np.savetxt('generalresponses.data',responses, fmt='%s')
################### Recognition ########################
# Dictionary for numbers and characters (in this sample code the only
# character is " - ")
number = {
48 : "0",
53 : "5",
52 : "4",
50 : "2",
45 : "-",
55 : "7",
51 : "3",
57 : "9",
56 : "8",
54 : "6",
49 : "1"
}
####### training part ###############
samples = np.loadtxt('generalsamples.data',np.float32)
responses = np.loadtxt('generalresponses.data',np.float32)
responses = responses.reshape((responses.size,1))
model = cv2.ml.KNearest_create()
model.train(samples,cv2.ml.ROW_SAMPLE,responses)
############################# testing part #########################
im = cv2.imread('rotated12.png')
out = np.zeros(im.shape,np.uint8)
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray,255,1,1,11,2)
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
[x,y,w,h] = cv2.boundingRect(cnt)
cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
roi = thresh[y:y+h,x:x+w]
roismall = cv2.resize(roi,(10,10))
roismall = roismall.reshape((1,100))
roismall = np.float32(roismall)
retval, results, neigh_resp, dists = model.findNearest(roismall,k=5)
string = int((results[0][0]))
string2 = number.get(string)
print(string2)
cv2.putText(out,str(string2),(x,y+h),0,1,(0,255,0))
cv2.imshow('im',im)
cv2.imshow('out',out)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
Sorry for begin a complete moron in it,
I'm realy trying to learn as much as I can about coding,everything that goes around the computer and openCV with the very little time I have But here's the edited code I've managed to get partly working:
from PIL import Image
import pytesseract
import os
import picamera
import time
import cv2
import numpy as np
# Read image and search for contours.
img = cv2.imread('Example1.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, threshold = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) #EDITED
# Create first mask used for rotation.
mask = np.ones(img.shape, np.uint8)*255
# Draw contours on the mask with size and ratio of borders for threshold.
for cnt in contours:
size = cv2.contourArea(cnt)
x,y,w,h = cv2.boundingRect(cnt)
if 10000 > size > 500 and w*2.5 > h:
cv2.drawContours(mask, [cnt], -1, (0,0,0), -1)
# Connect neighbour contours and select the biggest one (text).
kernel = np.ones((50,50),np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
gray_op = cv2.cvtColor(opening, cv2.COLOR_BGR2GRAY)
_, threshold_op = cv2.threshold(gray_op, 150, 255, cv2.THRESH_BINARY_INV)
contours_op, hierarchy_op = cv2.findContours(threshold_op, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours_op, key=cv2.contourArea)
# Create rotated rectangle to get the angle of rotation and the 4 points of the rectangle.
_, _, angle = rect = cv2.minAreaRect(cnt)
(h,w) = img.shape[:2]
(center) = (w//2,h//2)
# Rotate the image.
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(img, M, (int(w),int(h)), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)
# Create bounding box for rotated text (use old points of rotated rectangle).
box = cv2.cv.BoxPoints(rect) #edited
a, b, c, d = box = np.int0(box)
bound =[]
bound.append(a)
bound.append(b)
bound.append(c)
bound.append(d)
bound = np.array(bound)
(x1, y1) = (bound[:,0].min(), bound[:,1].min())
(x2, y2) = (bound[:,0].max(), bound[:,1].max())
cv2.drawContours(img,[box],0,(0,0,255),2)
# Crop the image and create new mask for the final image.
rotated = rotated[y1:y2, x1:x2]
mask_final = np.ones(rotated.shape, np.uint8)*255
# Remove noise from the final image.
gray_r = cv2.cvtColor(rotated, cv2.COLOR_BGR2GRAY)
_, threshold_r = cv2.threshold(gray_r, 150, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(threshold_r,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
size = cv2.contourArea(cnt)
if size < 500:
cv2.drawContours(threshold_r, [cnt], -1, (0,0,0), -1)
# Invert black and white.
final_image = cv2.bitwise_not(threshold_r)
# Display results.
cv2.imshow('final', final_image)
cv2.imshow('rotated', rotated)
#OCR Stage:
# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite('Final_proc.jpg', final_image)
# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
text = pytesseract.image_to_string(Image.open('Final_proc.jpg'))
os.remove('Final_proc.jpg')
print("\n" + text)
cv2.waitKey(0)
cv2.destroyAllWindows()
When compiling it now it gives me this output:
[img]https://i.imgur.com/ImdKSCv.jpg[/img]
which is a little different from what you showed and compiled on the windows machine but still super close.
anyidea what happened? just after that this should be realy easy to dissect the code and learn it easily.
Again thank you very much for your time! :D
So for the python 3 and openCV 3 version of the code in order to make the img work with tesseract you'd need to add an around 20px white boarder to extend the image for somereason (I assume it's because the convolutional matrix scanning effort) according to my other post:
pytesseract struggling to recognize clean black and white pictures with font numbers and 7 seg digits(python)
and here's how you'd add the boarder:
how to add border around an image in opencv python
In one line of code:
outputImage = cv2.copyMakeBorder(
inputImage,
topBorderWidth,
bottomBorderWidth,
leftBorderWidth,
rightBorderWidth,
cv2.BORDER_CONSTANT,
value=color of border
)

Detect regtangles in a low contrast image using opencv in python for reading by tesseract

I would like to detect the labels in images like this one for the purpose of extracting the text using tesseract. I have tried various combinations of thresholding and using edge detection. However I can only detect about half of the labels at a time at max. These are a few of the images I've been trying to read the labels from:
enter image description here
enter image description here
All of the labels have the same aspect ratio (the width is 3.5 times larger than the height) so I'm trying to find contours that have a minAreaRect with that same aspect ratio. The hard part is handing the labels on the lighter background. This is the code I have so far:
from PIL import Image
import pytesseract
import numpy as np
import argparse
import cv2
import os
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image to be OCR'd")
args = vars(ap.parse_args())
#function to crop an image to a minAreaRect
def crop_minAreaRect(img, rect):
# rotate img
angle = rect[2]
rows,cols = img.shape[0], img.shape[1]
M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)
img_rot = cv2.warpAffine(img,M,(cols,rows))
# rotate bounding box
rect0 = (rect[0], rect[1], 0.0)
box = cv2.boxPoints(rect)
pts = np.int0(cv2.transform(np.array([box]), M))[0]
pts[pts < 0] = 0
# crop
img_crop = img_rot[pts[1][1]:pts[0][1],
pts[1][0]:pts[2][0]]
return img_crop
# load image and apply threshold
image = cv2.imread(args["image"])
bw = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#bw = cv2.threshold(bw, 210, 255, cv2.THRESH_BINARY)[1]
bw = cv2.adaptiveThreshold(bw, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 27, 20)
#do edge detection
v = np.median(bw)
sigma = 0.5
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
bw = cv2.Canny(bw, lower, upper)
kernel = np.ones((5,5), np.uint8)
bw = cv2.dilate(bw,kernel,iterations=1)
#find contours
image2, contours, hierarchy = cv2.findContours(bw,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
bw = cv2.drawContours(bw,contours,0,(0,0,255),2)
cv2.imwrite("edge.png", bw)
#test which contours have the correct aspect ratio
largestarea = 0.0
passes = []
for contour in contours:
(x,y),(w,h),a = cv2.minAreaRect(contour)
if h > 20 and w > 20:
if h > w:
maxdim = h
mindim = w
else:
maxdim = w
mindim = h
ratio = maxdim/mindim
print("ratio: {}".format(ratio))
if (ratio > 3.4 and ratio < 3.6):
passes.append(contour)
if not passes:
print "no passes"
exit()
passboxes = []
i = 1
#crop out each label and attemp to extract text
for ps in passes:
rect = cv2.minAreaRect(ps)
bw = crop_minAreaRect(image, rect)
cv2.imwrite("{}.png".format(i), bw)
i += 1
h, w = bw.shape[:2]
print str(h) + "x" + str(w)
if w and h:
bw = cv2.cvtColor(bw, cv2.COLOR_BGR2GRAY)
bw = cv2.threshold(bw, 50, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
cv2.imwrite("output.png", bw)
im = Image.open("output.png")
w, h = im.size
print "W:{} H:{}".format(w,h)
if h > w:
print ("rotating")
im.rotate(90)
im.save("output.png")
print pytesseract.image_to_string(Image.open("output.png"))
im.rotate(180)
im.save("output.png")
print pytesseract.image_to_string(Image.open("output.png"))
box = cv2.boxPoints(cv2.minAreaRect(ps))
passboxes.append(np.int0(box))
im.close()
cnts = cv2.drawContours(image,passboxes,0,(0,0,255),2)
cnts = cv2.drawContours(cnts,contours,-1,(255,255,0),2)
cnts = cv2.drawContours(cnts, passes, -1, (0,255,0), 3)
cv2.imwrite("output2.png", image)
I believe the problem I have could be the parameters for the thresholding. Or I could be over complicating this.
Only the white labels with "A-08337" and such? The following detects all of them on both images:
import numpy as np
import cv2
img = cv2.imread('labels.jpg')
#downscale the image because Canny tends to work better on smaller images
w, h, c = img.shape
resize_coeff = 0.25
img = cv2.resize(img, (int(resize_coeff*h), int(resize_coeff*w)))
#find edges, then contours
canny = cv2.Canny(img, 100, 200)
_, contours, _ = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#draw the contours, do morphological close operation
#to close possible small gaps, then find contours again on the result
w, h, c = img.shape
blank = np.zeros((w, h)).astype(np.uint8)
cv2.drawContours(blank, contours, -1, 1, 1)
blank = cv2.morphologyEx(blank, cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8))
_, contours, _ = cv2.findContours(blank, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#keep only contours of more or less correct area and perimeter
contours = [c for c in contours if 800 < cv2.contourArea(c) < 1600]
contours = [c for c in contours if cv2.arcLength(c, True) < 200]
cv2.drawContours(img, contours, -1, (0, 0, 255), 1)
cv2.imwrite("contours.png", img)
Probably with some additional convexity check you can get rid of the "Verbatim" contours and such (for example, only keep contours with near zero difference between their area and their convex hull's area).

Categories

Resources