Trying to segment characters using opencv - Ilumination problem - python

My code it's not detecting well binary image!
LpImg = cv2.imread('/content/drive/My Drive/TESTING/Placas_detectadas/CPVL92.png')
if (len(LpImg)): #check if there is at least one license image
# Scales, calculates absolute values, and converts the result to 8-bit.
plate_image = cv2.convertScaleAbs(LpImg[0], alpha=(255.0))
plate_image = LpImg #image_cropped
# convert to grayscale and blur the image
gray = cv2.cvtColor(plate_image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(7,7),0)
# Applied inversed thresh_binary
thresh_inv = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 39, 1)
#binary = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
thre_mor = cv2.morphologyEx(thresh_inv, cv2.MORPH_DILATE, kernel3)
# visualize results
fig = plt.figure(figsize=(12,7))
plt.rcParams.update({"font.size":18})
grid = gridspec.GridSpec(ncols=2,nrows=3,figure = fig)
plot_image = [plate_image, gray, blur, thresh_inv,thre_mor]
plot_name = ["plate_image","gray","blur","binary","dilation"]
for i in range(len(plot_image)):
fig.add_subplot(grid[i])
plt.axis(False)
plt.title(plot_name[i])
if i ==0:
plt.imshow(plot_image[i])
else:
plt.imshow(plot_image[i],cmap="gray")
This is the image:
With this results:
If I use adaptive threshhold
binary = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
to this line
thresh_inv = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 39, 1)
I have got this result:
Why this is happening? How can I solve it?
I was thinking use this:
LpImg = cv2.imread('/content/image.png')
# Set scaling factors and add
gamma1 = 0.3
gamma2 = 1.5
Iout = gamma1*Ioutlow[0:rows,0:cols] + gamma2*Iouthigh[0:rows,0:cols]
# Anti-log then rescale to [0,1]
Ihmf = np.expm1(Iout)
Ihmf = (Ihmf - np.min(Ihmf)) / (np.max(Ihmf) - np.min(Ihmf))
Ihmf2 = np.array(255*LpImg, dtype="uint8")
# Threshold the image - Anything below intensity 65 gets set to white
Ithresh = Ihmf2 < 65 #65
Ithresh = 255*Ithresh.astype("uint8")
Ihmf2 = np.array(255*Ihmf, dtype="uint8")
# Threshold the image - Anything below intensity 65 gets set to white
Ithresh = Ihmf2 < 65 #65
Ithresh = 255*Ithresh.astype("uint8")
That have this result:
But I still want to use this filters:
Grayscale
Blur
Binarization
Segmentation

Another approach is to use division normalization in Python/OpenCV.
Read the input
Convert to gray
Apply morphology dilation
Divide the input by the dilated image
Threshold
Save the results
Input:
import cv2
import numpy as np
# read the image
img = cv2.imread('license_chile.png')
# convert to gray
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# apply morphology
kernel = cv2.getStructuringElement(cv2.MORPH_RECT , (75,75))
smooth = cv2.morphologyEx(gray, cv2.MORPH_DILATE, kernel)
# divide gray by morphology image
division = cv2.divide(gray, smooth, scale=255)
# threshold
result = cv2.threshold(division, 0, 255, cv2.THRESH_OTSU )[1]
# save results
cv2.imwrite('license_chile_thresh.jpg',result)
# show results
cv2.imshow('smooth', smooth)
cv2.imshow('division', division)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

Related

How to increase the quality of this image for Tesseract?

I have this image:
And, I process it in this way in order to pass it to Tesseract in the best way possible:
sharpened = unsharp_mask(img, amount=1.5)
cv2.imwrite(TEMP_FOLDER + 'sharpened.png', sharpened)
thr = cv2.threshold(sharpened, 220, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# im = cv2.resize(thr, None, fx=2, fy=2, interpolation=cv2.INTER_AREA)
os.makedirs(TEMP_FOLDER, exist_ok=True)
cv2.imwrite(TEMP_FOLDER + 'inverted.png', thr)
inverted = cv2.imread(TEMP_FOLDER + 'inverted.png')
filtered_inverted = remove_black_boundaries(inverted)
filtered_inverted = cv2.resize(filtered_inverted, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
# kernel = np.ones((2, 2), np.uint8)
# filtered_inverted = cv2.dilate(filtered_inverted, kernel)
cv2.imwrite(TEMP_FOLDER + 'filtered.png', filtered_inverted)
median = cv2.medianBlur(filtered_inverted, 5)
# median = cv2.cvtColor(median, cv2.COLOR_RGB2GRAY)
# median = cv2.threshold(median, 127, 255, cv2.THRESH_BINARY)[1]
cv2.imwrite(TEMP_FOLDER + 'median.png', median)
The function unsharp_mask is defined as:
def unsharp_mask(image: np.ndarray, kernel_size: Tuple[int] = (5, 5),
sigma: float = 1.0, amount: float = 1.0, threshold: float = 0) -> np.ndarray:
"""Return a sharpened version of the image, using an unsharp mask."""
blurred = cv2.GaussianBlur(image, kernel_size, sigma)
sharpened = float(amount + 1) * image - float(amount) * blurred
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
sharpened = sharpened.round().astype(np.uint8)
if threshold > 0:
low_contrast_mask = np.absolute(image - blurred) < threshold
np.copyto(sharpened, image, where=low_contrast_mask)
return sharpened
And, the function remove_black_boundaries (in this case it is useless since there aren't black boundaries in the image) is defined as:
def remove_black_boundaries(img: np.ndarray) -> np.ndarray:
hh, ww = img.shape[:2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
thresh = cv2.erode(thresh, np.ones((3, 3), np.uint8))
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
cnt = max(contours, key=cv2.contourArea)
# draw white contour on black background as mask
mask = np.zeros((hh, ww), dtype=np.uint8)
cv2.drawContours(mask, [cnt], 0, (255, 255, 255), cv2.FILLED)
# invert mask so shapes are white on black background
mask_inv = 255 - mask
# create new (white) background
bckgnd = np.full_like(img, (255, 255, 255))
# apply mask to image
image_masked = cv2.bitwise_and(img, img, mask=mask)
# apply inverse mask to background
bckgnd_masked = cv2.bitwise_and(bckgnd, bckgnd, mask=mask_inv)
# add together
result = cv2.add(image_masked, bckgnd_masked)
return result
So I get the sharpened as:
And, I get the inverted (and the filtered) as:
So, the image passed to Tesseract is:
But, what I get from Tesseract is Conteggio: 2900 without the first line. I tried also to resize the image, but I get the same output. Any idea on how I can improve the image sent to Tesseract?
You haven't shown your actual pytesseract code, but without any (pre)processing, I get the correct result solely switching to page segmentation method 6, which is:
Assume a single uniform block of text.
import cv2
import pytesseract
img = cv2.imread('KQ49Y.png', cv2.IMREAD_GRAYSCALE)
text = pytesseract.image_to_string(img, config='--psm 6')
print(text.replace('\f', ''))
# Facebook
# Conteggio: 2900
----------------------------------------
System information
----------------------------------------
Platform: Windows-10-10.0.19042-SP0
Python: 3.9.6
PyCharm: 2021.2
OpenCV: 4.5.3
pytesseract: 5.0.0-alpha.20201127
----------------------------------------

Flood fill function not producing good results

I applied the floodfill function in opencv to extract the foreground from the background but some of the objects in the image were not recognized by the algorithm so I would like to know how I can improve my detections and what modifications are necessary.
image = cv2.imread(args["image"])
image = cv2.resize(image, (800, 800))
h,w,chn = image.shape
ratio = image.shape[0] / 800.0
orig = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)
# show the original image and the edge detected image
print("STEP 1: Edge Detection")
cv2.imshow("Image", image)
cv2.imshow("Edged", edged)
warped1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
T = threshold_local(warped1, 11, offset = 10, method = "gaussian")
warped1 = (warped1 > T).astype("uint8") * 255
print("STEP 3: Apply perspective transform")
seed = (10, 10)
foreground, birdEye = floodFillCustom(image, seed)
cv2.circle(birdEye, seed, 50, (0, 255, 0), -1)
cv2.imshow("originalImg", birdEye)
cv2.circle(birdEye, seed, 100, (0, 255, 0), -1)
cv2.imshow("foreground", foreground)
cv2.imshow("birdEye", birdEye)
gray = cv2.cvtColor(foreground, cv2.COLOR_BGR2GRAY)
cv2.imshow("gray", gray)
cv2.imwrite("gray.jpg", gray)
threshImg = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)[1]
h_threshold,w_threshold = threshImg.shape
area = h_threshold*w_threshold
cv2.imshow("threshImg", threshImg)[![enter image description here][1]][1]
The floodFillCustom function is as follows -
def floodFillCustom(originalImage, seed):
originalImage = np.maximum(originalImage, 10)
foreground = originalImage.copy()
cv2.floodFill(foreground, None, seed, (0, 0, 0),
loDiff=(10, 10, 10), upDiff=(10, 10, 10))
return [foreground, originalImage]
A little bit late, but here's an alternative solution for segmenting the tools. It involves converting the image to the CMYK color space and extracting the K (Key) component. This component can be thresholded to get a nice binary mask of the tools, the procedure is very straightforward:
Convert the image to the CMYK color space
Extract the K (Key) component
Threshold the image via Otsu's thresholding
Apply some morphology (a closing) to clean up the mask
(Optional) Get bounding rectangles of all the tools
Let's see the code:
# Imports
import cv2
import numpy as np
# Read image
imagePath = "C://opencvImages//"
inputImage = cv2.imread(imagePath+"DAxhk.jpg")
# Create deep copy for results:
inputImageCopy = inputImage.copy()
# Convert to float and divide by 255:
imgFloat = inputImage.astype(np.float) / 255.
# Calculate channel K:
kChannel = 1 - np.max(imgFloat, axis=2)
# Convert back to uint 8:
kChannel = (255*kChannel).astype(np.uint8)
The first step is to convert the BGR image to CMYK. There's no direct conversion in OpenCV for this, so I applied directly the conversion formula. We can get every color space component from that formula, but we are only interested on the K channel. The conversion is easy, but we need to be careful with the data types. We need to operate on float arrays. After getting the K channel, we convert back the image to an unsigned 8-bit array, this is the resulting image:
Let's threshold this image using Otsu's thresholding method:
# Threshold via Otsu:
_, binaryImage = cv2.threshold(kChannel, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
This yields the following binary image:
Looks very nice! Additionally, we can clean it up a little bit (joining the little gaps) using a morphological closing. Let's apply a rectangular structuring element of size 5 x 5 and use 2 iterations:
# Use a little bit of morphology to clean the mask:
# Set kernel (structuring element) size:
kernelSize = 5
# Set morph operation iterations:
opIterations = 2
# Get the structuring element:
morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernelSize, kernelSize))
# Perform closing:
binaryImage = cv2.morphologyEx(binaryImage, cv2.MORPH_CLOSE, morphKernel, None, None, opIterations, cv2.BORDER_REFLECT101)
Which results in this:
Very cool. What follows is optional. We can get the bounding rectangles for every tool by looking for the outer (external) contours:
# Find the contours on the binary image:
contours, hierarchy = cv2.findContours(binaryImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Look for the outer bounding boxes (no children):
for _, c in enumerate(contours):
# Get the contours bounding rectangle:
boundRect = cv2.boundingRect(c)
# Get the dimensions of the bounding rectangle:
rectX = boundRect[0]
rectY = boundRect[1]
rectWidth = boundRect[2]
rectHeight = boundRect[3]
# Set bounding rectangle:
color = (0, 0, 255)
cv2.rectangle( inputImageCopy, (int(rectX), int(rectY)),
(int(rectX + rectWidth), int(rectY + rectHeight)), color, 5 )
cv2.imshow("Bounding Rectangles", inputImageCopy)
cv2.waitKey(0)
Which produces the final image:

extract data from the overlapping letters in a image

Input image:
i want to extract the data from the image ( ocr )
code which i tried:
import cv2
import textract
import numpy as np
img = cv2.imread('/home/ajay/Desktop/name.jpg',0)
# img = cv2.imread('path_to_your_image', 0)
_, blackAndWhite = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(blackAndWhite, None, None, None, 8, cv2.CV_32S)
sizes = stats[1:, -1] #get CC_STAT_AREA component
img2 = np.zeros((labels.shape), np.uint8)
for i in range(0, nlabels - 1):
if sizes[i] >= 50: #filter small dotted regions
img2[labels == i + 1] = 255
res = cv2.bitwise_not(img2)
cv2.imwrite('ress.png', res)
a = textract.process('ress.png',method = 'tesseract')
a = a.decode()
print(a)
A simple method is:
Apply a sharpening kernel
Otsu's threshold
Apply slight Gaussian blur
Invert image
OCR
Here's a visualization of the steps:
Input image
Sharpen
Otsu's threshold
Slight Gaussian blur
Invert image
Here's the OCR results using Pytesseract
DST INTERNATIONAL D-307# 3266 01 Dec 2007. HowellJerde Jan!
2007" 125802AM RafaelaBoyer Keon3#gmnil.com Fhvio Abernathy Sr.
Code
import cv2
import numpy as np
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
sharpen = cv2.filter2D(gray, -1, kernel)
thresh = cv2.threshold(sharpen, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
blur = cv2.GaussianBlur(thresh, (3,3), 0)
invert = 255 - blur
data = pytesseract.image_to_string(invert, lang='eng',config='--psm 6')
print(data)
cv2.imshow('sharpen', sharpen)
cv2.imshow('thresh', thresh)
cv2.imshow('blur', blur)
cv2.imshow('invert', invert)
cv2.waitKey()

Watershed Segmentation excluding alone object?

Problem
Using this answer to create a segmentation program, it is counting the objects incorrectly. I noticed that alone objects are being ignored or poor imaging acquisition.
I counted 123 objects and the program returns 117, as can be seen, bellow. The objects circled in red seem to be missing:
Using the following image from a 720p webcam:
Code
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import label
import urllib.request
# https://stackoverflow.com/a/14617359/7690982
def segment_on_dt(a, img):
border = cv2.dilate(img, None, iterations=5)
border = border - cv2.erode(border, None)
dt = cv2.distanceTransform(img, cv2.DIST_L2, 3)
plt.imshow(dt)
plt.show()
dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(np.uint8)
_, dt = cv2.threshold(dt, 140, 255, cv2.THRESH_BINARY)
lbl, ncc = label(dt)
lbl = lbl * (255 / (ncc + 1))
# Completing the markers now.
lbl[border == 255] = 255
lbl = lbl.astype(np.int32)
cv2.watershed(a, lbl)
print("[INFO] {} unique segments found".format(len(np.unique(lbl)) - 1))
lbl[lbl == -1] = 0
lbl = lbl.astype(np.uint8)
return 255 - lbl
# Open Image
resp = urllib.request.urlopen("https://i.stack.imgur.com/YUgob.jpg")
img = np.asarray(bytearray(resp.read()), dtype="uint8")
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
## Yellow slicer
mask = cv2.inRange(img, (0, 0, 0), (55, 255, 255))
imask = mask > 0
slicer = np.zeros_like(img, np.uint8)
slicer[imask] = img[imask]
# Image Binarization
img_gray = cv2.cvtColor(slicer, cv2.COLOR_BGR2GRAY)
_, img_bin = cv2.threshold(img_gray, 140, 255,
cv2.THRESH_BINARY)
# Morphological Gradient
img_bin = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN,
np.ones((3, 3), dtype=int))
# Segmentation
result = segment_on_dt(img, img_bin)
plt.imshow(np.hstack([result, img_gray]), cmap='Set3')
plt.show()
# Final Picture
result[result != 255] = 0
result = cv2.dilate(result, None)
img[result == 255] = (0, 0, 255)
plt.imshow(result)
plt.show()
Question
How to count the missing objects?
Answering your main question, watershed does not remove single objects. Watershed was functioning fine in your algorithm. It receives the predefined labels and perform segmentation accordingly.
The problem was the threshold you set for the distance transform was too high and it removed the weak signal from the single objects, thus preventing the objects from being labeled and sent to the watershed algorithm.
The reason for the weak distance transform signal was due to the improper segmentation during the color segmentation stage and the difficulty of setting a single threshold to remove noise and extract signal.
To remedy this, we need to perform proper color segmentation and use adaptive threshold instead of the single threshold when segmenting the distance transform signal.
Here is the code i modified. I have incorporated color segmentation method by #user1269942 in the code. Extra explanation is in the code.
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import label
import urllib.request
# https://stackoverflow.com/a/14617359/7690982
def segment_on_dt(a, img, img_gray):
# Added several elliptical structuring element for better morphology process
struct_big = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
struct_small = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
# increase border size
border = cv2.dilate(img, struct_big, iterations=5)
border = border - cv2.erode(img, struct_small)
dt = cv2.distanceTransform(img, cv2.DIST_L2, 3)
dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(np.uint8)
# blur the signal lighty to remove noise
dt = cv2.GaussianBlur(dt,(7,7),-1)
# Adaptive threshold to extract local maxima of distance trasnform signal
dt = cv2.adaptiveThreshold(dt, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 21, -9)
#_ , dt = cv2.threshold(dt, 2, 255, cv2.THRESH_BINARY)
# Morphology operation to clean the thresholded signal
dt = cv2.erode(dt,struct_small,iterations = 1)
dt = cv2.dilate(dt,struct_big,iterations = 10)
plt.imshow(dt)
plt.show()
# Labeling
lbl, ncc = label(dt)
lbl = lbl * (255 / (ncc + 1))
# Completing the markers now.
lbl[border == 255] = 255
plt.imshow(lbl)
plt.show()
lbl = lbl.astype(np.int32)
cv2.watershed(a, lbl)
print("[INFO] {} unique segments found".format(len(np.unique(lbl)) - 1))
lbl[lbl == -1] = 0
lbl = lbl.astype(np.uint8)
return 255 - lbl
# Open Image
resp = urllib.request.urlopen("https://i.stack.imgur.com/YUgob.jpg")
img = np.asarray(bytearray(resp.read()), dtype="uint8")
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
## Yellow slicer
# blur to remove noise
img = cv2.blur(img, (9,9))
# proper color segmentation
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0, 140, 160), (35, 255, 255))
#mask = cv2.inRange(img, (0, 0, 0), (55, 255, 255))
imask = mask > 0
slicer = np.zeros_like(img, np.uint8)
slicer[imask] = img[imask]
# Image Binarization
img_gray = cv2.cvtColor(slicer, cv2.COLOR_BGR2GRAY)
_, img_bin = cv2.threshold(img_gray, 140, 255,
cv2.THRESH_BINARY)
plt.imshow(img_bin)
plt.show()
# Morphological Gradient
# added
cv2.morphologyEx(img_bin, cv2.MORPH_OPEN,cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)),img_bin,(-1,-1),10)
cv2.morphologyEx(img_bin, cv2.MORPH_ERODE,cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)),img_bin,(-1,-1),3)
plt.imshow(img_bin)
plt.show()
# Segmentation
result = segment_on_dt(img, img_bin, img_gray)
plt.imshow(np.hstack([result, img_gray]), cmap='Set3')
plt.show()
# Final Picture
result[result != 255] = 0
result = cv2.dilate(result, None)
img[result == 255] = (0, 0, 255)
plt.imshow(result)
plt.show()
Final results :
124 Unique items found.
An extra item was found because one of the object was divided to 2.
With proper parameter tuning, you might get the exact number you are looking. But i would suggest getting a better camera.
Looking at your code, it is completely reasonable so I'm just going to make one small suggestion and that is to do your "inRange" using HSV color space.
opencv docs on color spaces:
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html
another SO example using inRange with HSV:
How to detect two different colors using `cv2.inRange` in Python-OpenCV?
and a small code edits for you:
img = cv2.blur(img, (5,5)) #new addition just before "##yellow slicer"
## Yellow slicer
#mask = cv2.inRange(img, (0, 0, 0), (55, 255, 255)) #your line: comment out.
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #new addition...convert to hsv
mask = cv2.inRange(hsv, (0, 120, 120), (35, 255, 255)) #new addition use hsv for inRange and an adjustment to the values.
Improving Accuracy
Detecting missing objects
im_1, im_2, im_3
I've count 12 missing objects: 2, 7, 8, 11, 65, 77, 78, 84, 92, 95, 96. edit: 85 too
117 found, 12 missing, 6 wrong
1° Attempt: Decrease Mask Sensibility
#mask = cv2.inRange(img, (0, 0, 0), (55, 255, 255)) #Current
mask = cv2.inRange(img, (0, 0, 0), (80, 255, 255)) #1' Attempt
inRange documentaion
im_4, im_5, im_6, im_7
[INFO] 120 unique segments found
120 found, 9 missing, 6 wrong

Remove noise from threshold image opencv python

I am trying to get the corners of the box in image. Following are example images, their threshold results and on the right after the arrow are the results that I need. You might have seen these images before too on slack because I am using these images for my example questions on slack.
Following is the code that allows me reach till the middle image.
import cv2
import numpy as np
img_file = 'C:/Users/box.jpg'
img = cv2.imread(img_file, cv2.IMREAD_COLOR)
img = cv2.blur(img, (5, 5))
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
thresh0 = cv2.adaptiveThreshold(s, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
thresh1 = cv2.adaptiveThreshold(v, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
thresh2 = cv2.adaptiveThreshold(v, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
thresh = cv2.bitwise_or(thresh0, thresh1)
cv2.imshow('Image-thresh0', thresh0)
cv2.waitKey(0)
cv2.imshow('Image-thresh1', thresh1)
cv2.waitKey(0)
cv2.imshow('Image-thresh2', thresh2)
cv2.waitKey(0)
Is there any method in opencv that can do it for me. I tried dilation cv2.dilate() and erosion cv2.erode() but it doesn't work in my cases.Or if not then what could be alternative ways of doing it ?
Thanks
Canny version of the image ... On the left with low threshold and on the right with high threshold
Below is a python implementation of #dhanushka's approach
import cv2
import numpy as np
# load color image
im = cv2.imread('input.jpg')
# smooth the image with alternative closing and opening
# with an enlarging kernel
morph = im.copy()
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
morph = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
# take morphological gradient
gradient_image = cv2.morphologyEx(morph, cv2.MORPH_GRADIENT, kernel)
# split the gradient image into channels
image_channels = np.split(np.asarray(gradient_image), 3, axis=2)
channel_height, channel_width, _ = image_channels[0].shape
# apply Otsu threshold to each channel
for i in range(0, 3):
_, image_channels[i] = cv2.threshold(~image_channels[i], 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY)
image_channels[i] = np.reshape(image_channels[i], newshape=(channel_height, channel_width, 1))
# merge the channels
image_channels = np.concatenate((image_channels[0], image_channels[1], image_channels[2]), axis=2)
# save the denoised image
cv2.imwrite('output.jpg', image_channels)
The above code doesn't give good results if the image you are dealing are invoices(or has large amount of text on a white background).
In order to get good results on such images, remove
gradient_image = cv2.morphologyEx(morph, cv2.MORPH_GRADIENT, kernel)
and pass morph obj to the split function and remove the ~ symbol inside for loop
You can smooth the image to some degree by applying alternative morphological closing and opening operations with an enlarging structuring element.Here are the original and smoothed versions.
Then take the morphological gradient of the image.
Then apply Otsu threshold to each of the channels, and merge those channels.
If your image sizes are different (larger), you might want to either change some of the parameters of the code or resize the images roughly to the sizes used here. The code is in c++ but it won't be difficult to port it to python.
/* load color image */
Mat im = imread(INPUT_FOLDER_PATH + string("2.jpg"));
/*
smooth the image with alternative closing and opening
with an enlarging kernel
*/
Mat morph = im.clone();
for (int r = 1; r < 4; r++)
{
Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(2*r+1, 2*r+1));
morphologyEx(morph, morph, CV_MOP_CLOSE, kernel);
morphologyEx(morph, morph, CV_MOP_OPEN, kernel);
}
/* take morphological gradient */
Mat mgrad;
Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(3, 3));
morphologyEx(morph, mgrad, CV_MOP_GRADIENT, kernel);
Mat ch[3], merged;
/* split the gradient image into channels */
split(mgrad, ch);
/* apply Otsu threshold to each channel */
threshold(ch[0], ch[0], 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
threshold(ch[1], ch[1], 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
threshold(ch[2], ch[2], 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
/* merge the channels */
merge(ch, 3, merged);
Not sure about how robust that solution will be but the idea is pretty simple. The edges of the box should be more pronounced than all the other high frequencies on those images. Thus using some basic preprocessing should allow to emphasize them.
I used your code to make a prototype but the contour finding doesn't have to be the right path. Also sorry for the iterative unsharp masking - didn't have time to adjust the parameters.
import cv2
import numpy as np
def unsharp_mask(img, blur_size = (9,9), imgWeight = 1.5, gaussianWeight = -0.5):
gaussian = cv2.GaussianBlur(img, (5,5), 0)
return cv2.addWeighted(img, imgWeight, gaussian, gaussianWeight, 0)
img_file = 'box.png'
img = cv2.imread(img_file, cv2.IMREAD_COLOR)
img = cv2.blur(img, (5, 5))
img = unsharp_mask(img)
img = unsharp_mask(img)
img = unsharp_mask(img)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
thresh = cv2.adaptiveThreshold(s, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
_, contours, heirarchy = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(contours, key = cv2.contourArea, reverse = True)
#for cnt in cnts:
canvas_for_contours = thresh.copy()
cv2.drawContours(thresh, cnts[:-1], 0, (0,255,0), 3)
cv2.drawContours(canvas_for_contours, contours, 0, (0,255,0), 3)
cv2.imshow('Result', canvas_for_contours - thresh)
cv2.imwrite("result.jpg", canvas_for_contours - thresh)
cv2.waitKey(0)
method 1: using AI models
always try image segmentation models if feasible to your project, robust models will work better on a wider domain than any thresholding technique.
for example Rembg , try online on a Huggingface space
here are the results:
method 2:
almost similar to other answers but with another approach.
instead of closing and opening to blur the "noise", we use cv2.bilateralFilter which is similar to photoshop's surface blur, read more
im = cv2.imread('1.png')
blur = cv2.bilateralFilter(im,21,75,75)
use sobel filter to find edges
from skimage.filters import sobel
gray = cv2.cvtColor(blur,cv2.COLOR_BGR2GRAY)
mm = sobel(gray)
mm = ((mm/mm.max())*255).astype('uint8')
apply thresholding, I use Sauvola Thresholding here:
from skimage.filters import threshold_sauvola
mm2 = np.invert(mm)
thresh_sauvola = threshold_sauvola(mm2, window_size=51)
th = mm2 < thresh_sauvola
dilate and fill holes:
def fill_hole(input_mask):
h, w = input_mask.shape
canvas = np.zeros((h + 2, w + 2), np.uint8)
canvas[1:h + 1, 1:w + 1] = input_mask.copy()
mask = np.zeros((h + 4, w + 4), np.uint8)
cv2.floodFill(canvas, mask, (0, 0), 1)
canvas = canvas[1:h + 1, 1:w + 1].astype(np.bool)
return ~canvas | input_mask
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2))
th2 =cv2.morphologyEx((th*255).astype('uint8'), cv2.MORPH_DILATE, kernel)
filled = fill_hole(th2==255)

Categories

Resources