Watershed Segmentation excluding alone object? - python

Problem
Using this answer to create a segmentation program, it is counting the objects incorrectly. I noticed that alone objects are being ignored or poor imaging acquisition.
I counted 123 objects and the program returns 117, as can be seen, bellow. The objects circled in red seem to be missing:
Using the following image from a 720p webcam:
Code
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import label
import urllib.request
# https://stackoverflow.com/a/14617359/7690982
def segment_on_dt(a, img):
border = cv2.dilate(img, None, iterations=5)
border = border - cv2.erode(border, None)
dt = cv2.distanceTransform(img, cv2.DIST_L2, 3)
plt.imshow(dt)
plt.show()
dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(np.uint8)
_, dt = cv2.threshold(dt, 140, 255, cv2.THRESH_BINARY)
lbl, ncc = label(dt)
lbl = lbl * (255 / (ncc + 1))
# Completing the markers now.
lbl[border == 255] = 255
lbl = lbl.astype(np.int32)
cv2.watershed(a, lbl)
print("[INFO] {} unique segments found".format(len(np.unique(lbl)) - 1))
lbl[lbl == -1] = 0
lbl = lbl.astype(np.uint8)
return 255 - lbl
# Open Image
resp = urllib.request.urlopen("https://i.stack.imgur.com/YUgob.jpg")
img = np.asarray(bytearray(resp.read()), dtype="uint8")
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
## Yellow slicer
mask = cv2.inRange(img, (0, 0, 0), (55, 255, 255))
imask = mask > 0
slicer = np.zeros_like(img, np.uint8)
slicer[imask] = img[imask]
# Image Binarization
img_gray = cv2.cvtColor(slicer, cv2.COLOR_BGR2GRAY)
_, img_bin = cv2.threshold(img_gray, 140, 255,
cv2.THRESH_BINARY)
# Morphological Gradient
img_bin = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN,
np.ones((3, 3), dtype=int))
# Segmentation
result = segment_on_dt(img, img_bin)
plt.imshow(np.hstack([result, img_gray]), cmap='Set3')
plt.show()
# Final Picture
result[result != 255] = 0
result = cv2.dilate(result, None)
img[result == 255] = (0, 0, 255)
plt.imshow(result)
plt.show()
Question
How to count the missing objects?

Answering your main question, watershed does not remove single objects. Watershed was functioning fine in your algorithm. It receives the predefined labels and perform segmentation accordingly.
The problem was the threshold you set for the distance transform was too high and it removed the weak signal from the single objects, thus preventing the objects from being labeled and sent to the watershed algorithm.
The reason for the weak distance transform signal was due to the improper segmentation during the color segmentation stage and the difficulty of setting a single threshold to remove noise and extract signal.
To remedy this, we need to perform proper color segmentation and use adaptive threshold instead of the single threshold when segmenting the distance transform signal.
Here is the code i modified. I have incorporated color segmentation method by #user1269942 in the code. Extra explanation is in the code.
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import label
import urllib.request
# https://stackoverflow.com/a/14617359/7690982
def segment_on_dt(a, img, img_gray):
# Added several elliptical structuring element for better morphology process
struct_big = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
struct_small = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
# increase border size
border = cv2.dilate(img, struct_big, iterations=5)
border = border - cv2.erode(img, struct_small)
dt = cv2.distanceTransform(img, cv2.DIST_L2, 3)
dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(np.uint8)
# blur the signal lighty to remove noise
dt = cv2.GaussianBlur(dt,(7,7),-1)
# Adaptive threshold to extract local maxima of distance trasnform signal
dt = cv2.adaptiveThreshold(dt, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 21, -9)
#_ , dt = cv2.threshold(dt, 2, 255, cv2.THRESH_BINARY)
# Morphology operation to clean the thresholded signal
dt = cv2.erode(dt,struct_small,iterations = 1)
dt = cv2.dilate(dt,struct_big,iterations = 10)
plt.imshow(dt)
plt.show()
# Labeling
lbl, ncc = label(dt)
lbl = lbl * (255 / (ncc + 1))
# Completing the markers now.
lbl[border == 255] = 255
plt.imshow(lbl)
plt.show()
lbl = lbl.astype(np.int32)
cv2.watershed(a, lbl)
print("[INFO] {} unique segments found".format(len(np.unique(lbl)) - 1))
lbl[lbl == -1] = 0
lbl = lbl.astype(np.uint8)
return 255 - lbl
# Open Image
resp = urllib.request.urlopen("https://i.stack.imgur.com/YUgob.jpg")
img = np.asarray(bytearray(resp.read()), dtype="uint8")
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
## Yellow slicer
# blur to remove noise
img = cv2.blur(img, (9,9))
# proper color segmentation
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0, 140, 160), (35, 255, 255))
#mask = cv2.inRange(img, (0, 0, 0), (55, 255, 255))
imask = mask > 0
slicer = np.zeros_like(img, np.uint8)
slicer[imask] = img[imask]
# Image Binarization
img_gray = cv2.cvtColor(slicer, cv2.COLOR_BGR2GRAY)
_, img_bin = cv2.threshold(img_gray, 140, 255,
cv2.THRESH_BINARY)
plt.imshow(img_bin)
plt.show()
# Morphological Gradient
# added
cv2.morphologyEx(img_bin, cv2.MORPH_OPEN,cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)),img_bin,(-1,-1),10)
cv2.morphologyEx(img_bin, cv2.MORPH_ERODE,cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)),img_bin,(-1,-1),3)
plt.imshow(img_bin)
plt.show()
# Segmentation
result = segment_on_dt(img, img_bin, img_gray)
plt.imshow(np.hstack([result, img_gray]), cmap='Set3')
plt.show()
# Final Picture
result[result != 255] = 0
result = cv2.dilate(result, None)
img[result == 255] = (0, 0, 255)
plt.imshow(result)
plt.show()
Final results :
124 Unique items found.
An extra item was found because one of the object was divided to 2.
With proper parameter tuning, you might get the exact number you are looking. But i would suggest getting a better camera.

Looking at your code, it is completely reasonable so I'm just going to make one small suggestion and that is to do your "inRange" using HSV color space.
opencv docs on color spaces:
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.html
another SO example using inRange with HSV:
How to detect two different colors using `cv2.inRange` in Python-OpenCV?
and a small code edits for you:
img = cv2.blur(img, (5,5)) #new addition just before "##yellow slicer"
## Yellow slicer
#mask = cv2.inRange(img, (0, 0, 0), (55, 255, 255)) #your line: comment out.
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #new addition...convert to hsv
mask = cv2.inRange(hsv, (0, 120, 120), (35, 255, 255)) #new addition use hsv for inRange and an adjustment to the values.

Improving Accuracy
Detecting missing objects
im_1, im_2, im_3
I've count 12 missing objects: 2, 7, 8, 11, 65, 77, 78, 84, 92, 95, 96. edit: 85 too
117 found, 12 missing, 6 wrong
1° Attempt: Decrease Mask Sensibility
#mask = cv2.inRange(img, (0, 0, 0), (55, 255, 255)) #Current
mask = cv2.inRange(img, (0, 0, 0), (80, 255, 255)) #1' Attempt
inRange documentaion
im_4, im_5, im_6, im_7
[INFO] 120 unique segments found
120 found, 9 missing, 6 wrong

Related

CV2 Draw ellipse only on masked area

I'm trying to make a circle under the player on soccer field, similar to this:
If I just do a circle around the player's feet, it looks bad:
I'm trying to draw the circle only on the green part of the field (to make it more 3D).
First I masked only the green part on the field, using the following code:
def mask(img):
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
## mask of green (36,25,25) ~ (86, 255,255)
lower = (36, 25, 25)
upper = (86, 255, 255)
mask = cv2.inRange(hsv, lower, upper)
## slice the green
imask = mask == 0
green = np.zeros_like(img, np.uint8)
green[imask] = img[imask]
img = green
return img
This gives the following result:
How can I draw the ellipse only on the green (after the masking - black) part?
P.S. This is the full code:
import cv2
import numpy as np
def mask(img):
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
## mask of green (36,25,25) ~ (86, 255,255)
lower = (36, 25, 25)
upper = (86, 255, 255)
mask = cv2.inRange(hsv, lower, upper)
## slice the green
imask = mask == 0
green = np.zeros_like(img, np.uint8)
# green.fill(255)
green[imask] = img[imask]
img = green
return img
def player_ellipse(img, player_point):
axesLength = (45, 20)
angle = 0
startAngle = 0
endAngle = 360
# Red color in BGR
color = (0, 0, 255)
# Line thickness of 5 px
thickness = 5
img = cv2.ellipse(img, player_point, axesLength, angle, startAngle, endAngle, color, thickness)
return img
def main():
img = cv2.imread('img.png')
point = (160, 665)
img = player_ellipse(img, point)
img = mask(img)
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
main()
And this is player without any editing:
You are almost there. All that lefts to do is to copy the pixels from the original image (without ellipse) using the founded mask as answered by Micka and Christoph Rackwitz in comments.
Optionally you may apply some morphological operations to make the mask more appealing.
So the steps are:
Draw an ellipse:
Extract a mask using green color:
[Optional] Apply mask erode:
Copy pixels from original image using the mask:
img_with_ellipse[green_mask] = img[green_mask]
Complete example:
import cv2
import numpy as np
def mask(img):
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
## mask of green (36,25,25) ~ (86, 255,255)
lower = (36, 25, 25)
upper = (86, 255, 255)
mask = cv2.inRange(hsv, lower, upper)
mask = cv2.erode(mask, np.ones((3, 3), np.uint8), iterations=5) == 0
return mask
def player_ellipse(img, player_point):
axesLength = (45, 20)
angle = 0
startAngle = 0
endAngle = 360
# Red color in BGR
color = (0, 0, 255)
# Line thickness of 5 px
thickness = 5
img = cv2.ellipse(
img.copy(),
player_point,
axesLength,
angle,
startAngle,
endAngle,
color,
thickness,
)
return img
def main():
img = cv2.imread("img.png")
point = (160, 665)
green_mask = mask(img)
img_with_ellipse = player_ellipse(img, point)
img_with_ellipse[green_mask] = img[green_mask]
cv2.imshow("img", img_with_ellipse)
cv2.waitKey(0)
main()

How to remove a contoured area from an image?

I have an image with a white background and some colored blocks. I have created multiple filters to find the colored blocks and get the contours for another purpose and I am able to draw the contours around the colored blocks.
These blocks are connected by some black lines which I would like to get the contours of. I was using the original contours to also get the lines but I was advised not to do that and instead remove the colored blocks from the image so that I would only remain with the black lines in the image making it easier to contour.
I have created a mask that would draw over the contoured block but when I display the final image the contoured blocks are black instead of white.
Is there any way to make the blocks white similar to the background so that I could remain with only the black lines?
From the images, you can see that the mask covers the image and the black line remains. However, I can't figure out how to make the blocks white instead of black so that only the line remains.
from ctypes import sizeof
from doctest import master
from cv2 import approxPolyDP, contourArea, cvtColor, inRange
import numpy as np
import cv2
kernel = (5, 5)
srcImg = cv2.imread('BluePurpleConnected.png', cv2.IMREAD_COLOR)
blur = cv2.GaussianBlur(srcImg, kernel, 0)
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
grayImg = cv2.cvtColor(srcImg, cv2.COLOR_BGR2GRAY)
rows = int(grayImg.shape[0]/2)
cols = int(grayImg.shape[1]/2)
ker = np.ones((0, 0), 'uint8')
graySize = grayImg.shape
sig = 0.33
cPix = 200
bPix = 255
lPurp = np.array([130, 20, 10])
uPurp = np.array([145, 255, 255])
lBlue = np.array([94, 80, 2])
uBlue = np.array([126, 255, 255])
colArray = np.array([lPurp, uPurp, lBlue, uBlue])
masterStruc = []
maskArray = []
mask = np.ones(srcImg.shape[:2], dtype="uint8") * 255
x = 0
while x <= 1:
imgMask = cv2.inRange(hsv, colArray[2*x], colArray[2*x+1])
maskArray.append(imgMask)
v = np.median(imgMask)
lower = int(max(0, (1.0 - sig) * v))
upper = int(min(255, (1.0 + sig) * v))
bwImg = cv2.Canny(imgMask, lower, upper)
nbwImg = cv2.dilate(bwImg, kernel, iterations=1)
cImg = nbwImg
ret, threshold = cv2.threshold(cImg, cPix, bPix, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt = 0
for cnt in contours:
curve = approxPolyDP(cnt, 0.02 * cv2.arcLength(cnt, True), True)
vert = len(curve)
area = cv2.contourArea(curve)
if(area > 1000):
if(vert == 4):
masterStruc.append([x, curve, area])
cv2.drawContours(mask, [curve], -1, 0, -1)
x = x + 1
srcImg = cv2.bitwise_and(srcImg, srcImg, mask=mask)
cv2.imshow('Mask', mask)
cv2.imshow('Image', srcImg)
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows()

I'm getting an error while detecting bottle fill level with OpenCV

I am trying to detect the filling levels of bottles moving on a conveyor belt with opencv. Since the bottles are colored, I give a white light from the back and determine the liquid contours and measure. I'm getting an error in a certain part of the code. Mistake;
(contours, areas) = zip(*sorted(zip(contours, areas), key = lambda a:a[1])) ValueError: not enouhg values to unpack (expected 2, got 0)
For ex photograph
When I increase the threshold value in the code, the program runs. But I want this value to remain constant. The reason for this is related to the quality of the contours. Thank you from now.
Source Code:
_, frame = cap.read()
# hsv_frame = cv2.cvtColor(frame, cv2.COLOR.BGR2HSV)
bottle_gray = cv2.split(frame)[0]
bottle_gray = cv2.GaussianBlur(bottle_gray, (7,7), 0)
(T, bottle_threshold) = cv2.threshold(bottle_gray, 49.5, 250, cv2.THRESH_BINARY_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
bottle_open = cv2.morphologyEx(bottle_threshold, cv2.MORPH_CLOSE, kernel)
contours = cv2.findContours(bottle_open.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
bottle_clone = frame.copy()
cv2.drawContours(bottle_clone, contours, 0, (255,0,0), 2)
areas = [cv2.contourArea(contour) for contour in contours]
(contours, areas) = zip(*sorted(zip(contours,areas),key = lambda a:a[1]))```
import sys
import cv2
import numpy as np
# Fix HSV color range
def fixHSVRange(h, s, v):
return (180 * h / 360, 255 * s / 100, 255 * v / 100)
# Load image
dir = sys.path[0]
im = cv2.imread(dir+'/im.png')
# Convert image to HSV and find empty bottle range
hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
msk = cv2.inRange(hsv, fixHSVRange(0, 50, 60), fixHSVRange(5, 100, 100))
# Smooth noise
msk = cv2.medianBlur(msk, 21)
# Invert colors
msk = ~msk
# Find empty space bounderies
cnts, _ = cv2.findContours(msk, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
x, y, w, h = cv2.boundingRect(cnts[1])
# Draw level line
msk = cv2.cvtColor(msk, cv2.COLOR_GRAY2BGR)
cv2.line(msk, (0, y+h), (im.shape[1], y+h), (0, 255, 0), 2, cv2.LINE_AA)
# Save output
cv2.imwrite(dir+'/im_msk.png', np.hstack((im, msk)))

Python OpenCV not detecting obvious contours

Apologies as I'm very new to OpenCV and the world of image processing in general.
I'm using OpenCV in Python to detect contours/boxes in this image.
It almost manages to detect all contours, but for some odd reason it doesn't pick up the last row and column which are obvious contours. This image shows the bounding boxes for contours it manages to identify.
Not entirely sure why it's not able to easily pick up the remaining contours. I've researched similar questions but haven't found a suitable answer.
Here's my code.
import numpy as np
import cv2
import math
import matplotlib.pyplot as plt
#load image
img = cv2.imread(path)
#remove noise
img = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)
#convert to gray scale
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#make pixels darker
_, img = cv2.threshold(img, 240, 255, cv2.THRESH_TOZERO)
#thresholding the image to a binary image
thresh, img_bin = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
#inverting the image
img_bin = 255 - img_bin
# countcol(width) of kernel as 100th of total width
kernel_len = np.array(img).shape[1]//100
# Defining a vertical kernel to detect all vertical lines of image
ver_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_len))
# Defining a horizontal kernel to detect all horizontal lines of image
hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_len, 1))
# A kernel of 2x2
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
#Use vertical kernel to detect and save the vertical lines in a jpg
image_1 = cv2.erode(img_bin, ver_kernel, iterations = 3)
vertical_lines = cv2.dilate(image_1, np.ones((10, 4),np.uint8), iterations = 30)
vertical_lines = cv2.erode(vertical_lines, np.ones((10, 4),np.uint8), iterations = 29)
#Use horizontal kernel to detect and save the horizontal lines in a jpg
image_2 = cv2.erode(img_bin, np.ones((1, 5),np.uint8), iterations = 5)
horizontal_lines = cv2.dilate(image_2, np.ones((2, 40),np.uint8), iterations = 20)
horizontal_lines = cv2.erode(horizontal_lines, np.ones((2, 39),np.uint8), iterations = 19)
# Combine horizontal and vertical lines in a new third image, with both having same weight.
img_vh = cv2.addWeighted(vertical_lines, 0.5, horizontal_lines, 0.5, 0.0)
rows, cols = img_vh.shape
#shift image so the enhanced lines overlap with original image
M = np.float32([[1,0,-30],[0,1,-21]])
img_vh = cv2.warpAffine(img_vh ,M,(cols,rows))
#Eroding and thesholding the image
img_vh = cv2.erode(~img_vh, kernel, iterations = 2)
thresh, img_vh = cv2.threshold(img_vh, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
bitxor = cv2.bitwise_xor(img, img_vh)
bitnot = cv2.bitwise_not(bitxor)
#find contours
contours, _ = cv2.findContours(img_vh, cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
#create list empty list to append with contours less than a specified area
new_contours = []
for contour in contours:
if cv2.contourArea(contour) < 4000000:
new_contours.append(contour)
#get bounding boxes
bounding_boxes = [cv2.boundingRect(contour) for contour in new_contours]
#plot detected bounding boxes
img_og = cv2.imread(path)
for bounding_box in bounding_boxes:
x,y,w,h = bounding_box
img_plot = cv2.rectangle(img_og, (x, y), (x+w, y+h), (255, 0, 0) , 2)
plotting = plt.imshow(img_plot, cmap='gray')
plt.show()
Like #ypnos was suggesting, the dilation and erosion has most likely pushed the last line off the image in the "saving horizontal lines" section. So the image_vh wouldn't have the last row when it was being searched for contours. I tested (Note:1) this by viewing the image after each of your transformations.
Specifically, the number of iterations had been too much. You had used a reasonably sized kernel as it is. It gave perfect results with iterations = 2 on lines 43 and 44 of your code.
After modifying them to :
horizontal_lines = cv2.dilate(image_2, np.ones((2, 40), np.uint8), iterations=2)
horizontal_lines = cv2.erode(horizontal_lines, np.ones((2, 39), np.uint8), iterations=2)
the bounding box rectangles had shifted off the image a bit. That was fixed by changing line 51 of the code to:
M = np.float32([[1, 0, -30], [0, 1, -5]])
This was the result.
Note:
I test/debug using this function usually.
def test(image, title):
cv2.imshow(title, image)
cv2.waitKey(0)
cv2.destroyWindow(title)
The variable position and the handy waitkey calms me down.

Trying to segment characters using opencv - Ilumination problem

My code it's not detecting well binary image!
LpImg = cv2.imread('/content/drive/My Drive/TESTING/Placas_detectadas/CPVL92.png')
if (len(LpImg)): #check if there is at least one license image
# Scales, calculates absolute values, and converts the result to 8-bit.
plate_image = cv2.convertScaleAbs(LpImg[0], alpha=(255.0))
plate_image = LpImg #image_cropped
# convert to grayscale and blur the image
gray = cv2.cvtColor(plate_image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(7,7),0)
# Applied inversed thresh_binary
thresh_inv = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 39, 1)
#binary = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
thre_mor = cv2.morphologyEx(thresh_inv, cv2.MORPH_DILATE, kernel3)
# visualize results
fig = plt.figure(figsize=(12,7))
plt.rcParams.update({"font.size":18})
grid = gridspec.GridSpec(ncols=2,nrows=3,figure = fig)
plot_image = [plate_image, gray, blur, thresh_inv,thre_mor]
plot_name = ["plate_image","gray","blur","binary","dilation"]
for i in range(len(plot_image)):
fig.add_subplot(grid[i])
plt.axis(False)
plt.title(plot_name[i])
if i ==0:
plt.imshow(plot_image[i])
else:
plt.imshow(plot_image[i],cmap="gray")
This is the image:
With this results:
If I use adaptive threshhold
binary = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
to this line
thresh_inv = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 39, 1)
I have got this result:
Why this is happening? How can I solve it?
I was thinking use this:
LpImg = cv2.imread('/content/image.png')
# Set scaling factors and add
gamma1 = 0.3
gamma2 = 1.5
Iout = gamma1*Ioutlow[0:rows,0:cols] + gamma2*Iouthigh[0:rows,0:cols]
# Anti-log then rescale to [0,1]
Ihmf = np.expm1(Iout)
Ihmf = (Ihmf - np.min(Ihmf)) / (np.max(Ihmf) - np.min(Ihmf))
Ihmf2 = np.array(255*LpImg, dtype="uint8")
# Threshold the image - Anything below intensity 65 gets set to white
Ithresh = Ihmf2 < 65 #65
Ithresh = 255*Ithresh.astype("uint8")
Ihmf2 = np.array(255*Ihmf, dtype="uint8")
# Threshold the image - Anything below intensity 65 gets set to white
Ithresh = Ihmf2 < 65 #65
Ithresh = 255*Ithresh.astype("uint8")
That have this result:
But I still want to use this filters:
Grayscale
Blur
Binarization
Segmentation
Another approach is to use division normalization in Python/OpenCV.
Read the input
Convert to gray
Apply morphology dilation
Divide the input by the dilated image
Threshold
Save the results
Input:
import cv2
import numpy as np
# read the image
img = cv2.imread('license_chile.png')
# convert to gray
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# apply morphology
kernel = cv2.getStructuringElement(cv2.MORPH_RECT , (75,75))
smooth = cv2.morphologyEx(gray, cv2.MORPH_DILATE, kernel)
# divide gray by morphology image
division = cv2.divide(gray, smooth, scale=255)
# threshold
result = cv2.threshold(division, 0, 255, cv2.THRESH_OTSU )[1]
# save results
cv2.imwrite('license_chile_thresh.jpg',result)
# show results
cv2.imshow('smooth', smooth)
cv2.imshow('division', division)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

Categories

Resources