I have a code that detects sunspots but I want to count the number of sunspot groups instead of the individual spots like the current output I have here ("actual").
Here is my code. How do I get my output to group the sunspots and look like this ("desired") instead?
import os
import cv2 # opencv library
import numpy as np
import matplotlib.pyplot as plt
"""Make the pwd implementation"""
cwd = os.getcwd()
file = "/sunspot1.jpg"
path = cwd + file
image = cv2.imread(path,0)
image_1 = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
#plt.show()
#plot the image in graycolor
#gray = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
#plt.imshow(gray)
#plt.show()
# perform image thresholding
ret, thresh = cv2.threshold(image, 90, 255, cv2.THRESH_BINARY)
#plt.imshow(thresh, cmap = 'gray')
#plt.show()
#circle = cv2.circle(thresh, (249,249),(238),(0, 255, 0),1)
# plt.imshow(circle)
# plt.show()
# find taches contours
contours, hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
valid_cntrs = []
for i,cntr in enumerate(contours):
x,y,w,h = cv2.boundingRect(cntr)
#print("x = ",x,"y = ",y,"w = ",w,"h = ",h)
if ((x-249)**2 + (y-249)**2)<= 238**2:
valid_cntrs.append(cntr)
"""implement image size detection for the contour LINE 36"""
#count the taches number
taches= len(valid_cntrs);
#sunspot= 1*(10*groups+taches);
# count the number of dicovered sunspots
print("The number of taches is: ",taches)
if taches == 0:
plt.imshow(image_1)
plt.show()
else:
contour_sizes = [(cv2.contourArea(contour), contour) for contour in valid_cntrs]
for i in range(len(valid_cntrs)):
x,y,w,h = cv2.boundingRect(contour_sizes[i][1])
prevtaches = cv2.rectangle(image_1,(x,y),(x+w,y+h),(0,255,0),1)
plt.imshow(prevtaches)
plt.show()
Actual:
Desired:
What you can do is after thresholding (and before detecting the individual contours), you can perform morphological operations like dilation, to make the white area more broader, such that the nearby ones get connected and make one big contour. To do that, you can adjust you kernel size to fit your needs in the best way and can also play with the iterations argument.
You can refer this
After that, you can draw your contours.
Related
I want to extract each sticker 5x6 and to total 30 sticker
like below , how do I do so
(expect pic ) https://imgur.com/a/C5CiSxM
(original picture) https://imgur.com/a/V0lvqU3
from below link I come up my code
How extract pictures from an big image in python
following the suggestion:
The black pixels along the top are a distraction, so are the black
pixels of the QR codes. You are only interested in the white stickers.
So, take a copy of your image and threshold at a high value to give
you pure white stickers surrounded by black and with black QR codes
within each sticker. Now find white contours and reject black ones.
Apply the contours found on the thresholded image to your original
image.
I'm doing the Thresholding expecting pure white stickers surrounded by black and with black QR codes within each sticker
import numpy as np
import glob
import matplotlib.pyplot as plt
import skimage.io
import skimage.color
import skimage.filters
from PIL import Image
import pytesseract
import cv2 as cv
import numpy as np
def custom_blur_demo(image):
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32) #锐化
dst = cv.filter2D(image, -1, kernel=kernel)
cv.imwrite("/home/joy/桌面/test_11_4/sharpen_images.png", dst)
cv.imshow("custom_blur_demo", dst)
src = cv.imread("/home/joy/桌面/test_11_4/original.png")
cv.namedWindow("input image", cv.WINDOW_AUTOSIZE)
cv.imshow("input image", src)
custom_blur_demo(src)
cv.waitKey(0)
cv.destroyAllWindows()
# load the image
image = skimage.io.imread("/home/joy/桌面/test_11_4/sharpen_images.png")[:,:,:3]
# image = imageio.imread(image_name)[:,:,:3]
# img = rgb2gray(image)
fig, ax = plt.subplots()
plt.imshow(image)
# convert the image to grayscale
gray_image = skimage.color.rgb2gray(image)
# blur the image to denoise
blurred_image = skimage.filters.gaussian(gray_image, sigma=1.0)
fig, ax = plt.subplots()
plt.imshow(blurred_image, cmap="gray")
# create a histogram of the blurred grayscale image
histogram, bin_edges = np.histogram(blurred_image, bins=256, range=(0.0, 1.0))
fig, ax = plt.subplots()
plt.plot(bin_edges[0:-1], histogram)
plt.title("Grayscale Histogram")
plt.xlabel("grayscale value")
plt.ylabel("pixels")
plt.xlim(0, 1.0)
# create a mask based on the threshold
t1 = 0.72
t2 = 0.05
binary_mask = blurred_image < t1
fig, ax = plt.subplots()
plt.imshow(binary_mask, cmap="gray")
aaa = plt.imshow(binary_mask, cmap="gray")
plt.show()
plt.savefig("/home/joy/桌面/test_11_4/sharpen_images_del_gray_part.png", aaa)
img = Image.open('/home/joy/桌面/test_11_4/sharpen_images_del_gray_part.png')
text = pytesseract.image_to_string(img, lang='eng')
print("file name" ,"final output", ".png")
print("size")
print(img.size)
print(text)
here is the output for mine Thresholding : https://imgur.com/a/V0lvqU3
the product does after Thresholding but the word on every sticker seems blur (I'm going to OCR image to text every single sticker img later)
not correct yet, I want sticker part only that without gray color part
(pic 5b) in same link is how I reach for now
https://imgur.com/a/V0lvqU3
How to cut them in small piece, the sticker size
(expect pic ) https://imgur.com/a/C5CiSxM
The black pixels along the top are a distraction, so are the black pixels of the QR codes. You are only interested in the white stickers.
So, take a copy of your image and threshold at a high value to give you pure white stickers surrounded by black and with black QR codes within each sticker. Now find white contours and reject black ones.
Apply the contours found on the thresholded image to your original image.
This effort is probably not scientifically generalizable.
Just wanted to show it doesn't need Contour-finding/Binarization. just pixel value comparison might be enough.
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
orig_image = cv.imread("sample.png")
gray = cv.cvtColor(orig_image, cv.COLOR_BGR2GRAY)
kernel = np.ones((3,3),np.uint8)
eroded = cv.erode(gray,kernel,iterations = 1)
def show_img(img_bgr):
fig, ax = plt.subplots(figsize=(5, 5))
rgb = cv.cvtColor(img_bgr, cv.COLOR_BGR2RGB)
ax.imshow(rgb)
return fig
def detect_top_bottom(original_img, erodded_img):
"""Detects only top and bottom row number of each row of Qr_Codes.
"""
line_img = original_img.copy()
height, width = erodded_img.shape
top = 0
top_drawn = False
rows_range = []
for row in range(erodded_img.shape[0]):
for col in range(erodded_img.shape[1]):
if np.mean(erodded_img[row,col]) > 190:
if row - top > 3 and not top_drawn:
cv.line(line_img, (0, row), (width, row), (0,255,0), 2)
rows_range.append([row,None])
top_drawn = True
top = row
break
else:
if top_drawn:
cv.line(line_img, (0, row), (width, row), (0,255,0), 2)
rows_range[-1][1] = row
top_drawn = False
return line_img, rows_range
# make original image grayscale
gray = cv.cvtColor(orig_image, cv.COLOR_BGR2GRAY)
# erode image with 3x3 kernel in order to remove small noises
kernel = np.ones((3,3),np.uint8)
eroded = cv.erode(gray,kernel,iterations = 1)
line_img, rows_range = detect_top_bottom(orig_image, eroded)
# Rotate image 90 degs clockwise in order to use same function for detection
eroded_rotated_90 = cv.rotate(eroded, cv.ROTATE_90_CLOCKWISE)
line_img_rotated_90 = cv.rotate(line_img, cv.ROTATE_90_CLOCKWISE)
line_img, cols_range = detect_top_bottom(line_img_rotated_90, eroded_rotated_90)
# finally rotate 90 deg counter clockwise in to get orignal.
line_img= cv.rotate(line_img, cv.ROTATE_90_COUNTERCLOCKWISE)
fig = show_img(line_img)
fig.savefig("original_grid.png")
fig, axs = plt.subplots(len(rows_range), len(cols_range))
for i, row in enumerate(rows_range):
for j, col in enumerate(cols_range):
axs[i,j].axis('off')
# just for sake of visualization conver to RGB
# axs[i,j].imshow(orig_image[row[0]:row[1], :][:,col[0]:col[1]]) probabely is enough
orig_sub_channels = cv.cvtColor(orig_image[row[0]:row[1], :][:,col[0]:col[1]], cv.COLOR_BGR2RGB)
axs[i,j].imshow(orig_sub_channels)
fig.savefig("splitted_grid.png")
I'm looking for a proper solution how to count particles and measure their sizes in this image:
In the end I have to obtain the lists of particles' coordinates and area squares. After some search on the internet I realized there are 3 approaches for particles detection:
blobs
Contours
connectedComponentsWithStats
Looking at different projects I assembled some code with the mix of it.
import pylab
import cv2
import numpy as np
Gaussian blurring and thresholding
original_image = cv2.imread(img_path)
img = original_image
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img, (5, 5), 0)
img = cv2.blur(img, (5, 5))
img = cv2.medianBlur(img, 5)
img = cv2.bilateralFilter(img, 6, 50, 50)
max_value = 255
adaptive_method = cv2.ADAPTIVE_THRESH_GAUSSIAN_C
threshold_type = cv2.THRESH_BINARY
block_size = 11
img_thresholded = cv2.adaptiveThreshold(img, max_value, adaptive_method, threshold_type, block_size, -3)
filter small objects
min_size = 4
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(img, connectivity=8)
sizes = stats[1:, -1]
nb_components = nb_components - 1
# for every component in the image, you keep it only if it's above min_size
for i in range(0, nb_components):
if sizes[i] < min_size:
img[output == i + 1] = 0
generation of Contours for filling holes and measurements. pos_list and size_list is what we were looking for
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
pos_list = []
size_list = []
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
size_list.append(area)
(x, y), radius = cv2.minEnclosingCircle(contours[i])
pos_list.append((int(x), int(y)))
for the self-check, if we plot these coordinates over the original image
pts = np.array(pos_list)
pylab.figure(0)
pylab.imshow(original_image)
pylab.scatter(pts[:, 0], pts[:, 1], marker="x", color="green", s=5, linewidths=1)
pylab.show()
We might get something like the following:
And... I'm not really satisfied with the results. Some clearly visible particles are not included, on the other side, some doubt fluctuations of intensity have been counted. I'm playing now with different filters' settings, but the feeling is it's wrong.
If someone knows how to improve my solution, please share.
Since the particles are in white and the background in black, we can use Kmeans Color Quantization to segment the image into two groups with cluster=2. This will allow us to easily distinguish between particles and the background. Since the particles may be very tiny, we should try to avoid blurring, dilating, or any morphological operations which may alter the particle contours. Here's an approach:
Kmeans color quantization. We perform Kmeans with two clusters, grayscale, then Otsu's threshold to obtain a binary image.
Filter out super tiny noise. Next we find contours, remove tiny specs of noise using contour area filtering, and collect each particle (x, y) coordinate and its area. We remove tiny particles on the binary mask by "filling in" these contours to effectively erase them.
Apply mask onto original image. Now we bitwise-and the filtered mask onto the original image to highlight the particle clusters.
Kmeans with clusters=2
Result
Number of particles: 204
Average particle size: 30.537
Code
import cv2
import numpy as np
import pylab
# Kmeans
def kmeans_color_quantization(image, clusters=8, rounds=1):
h, w = image.shape[:2]
samples = np.zeros([h*w,3], dtype=np.float32)
count = 0
for x in range(h):
for y in range(w):
samples[count] = image[x][y]
count += 1
compactness, labels, centers = cv2.kmeans(samples,
clusters,
None,
(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.0001),
rounds,
cv2.KMEANS_RANDOM_CENTERS)
centers = np.uint8(centers)
res = centers[labels.flatten()]
return res.reshape((image.shape))
# Load image
image = cv2.imread('1.png')
original = image.copy()
# Perform kmeans color segmentation, grayscale, Otsu's threshold
kmeans = kmeans_color_quantization(image, clusters=2)
gray = cv2.cvtColor(kmeans, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find contours, remove tiny specs using contour area filtering, gather points
points_list = []
size_list = []
cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
AREA_THRESHOLD = 2
for c in cnts:
area = cv2.contourArea(c)
if area < AREA_THRESHOLD:
cv2.drawContours(thresh, [c], -1, 0, -1)
else:
(x, y), radius = cv2.minEnclosingCircle(c)
points_list.append((int(x), int(y)))
size_list.append(area)
# Apply mask onto original image
result = cv2.bitwise_and(original, original, mask=thresh)
result[thresh==255] = (36,255,12)
# Overlay on original
original[thresh==255] = (36,255,12)
print("Number of particles: {}".format(len(points_list)))
print("Average particle size: {:.3f}".format(sum(size_list)/len(size_list)))
# Display
cv2.imshow('kmeans', kmeans)
cv2.imshow('original', original)
cv2.imshow('thresh', thresh)
cv2.imshow('result', result)
cv2.waitKey()
I have an image like following, I want to find four coordinate (corners) from this image.
I have tried with below code:
# dilate thresholded image - merges top/bottom
kernel = np.ones((3,3), np.uint8)
dilated = cv2.dilate(img, kernel, iterations=3)
# Finding contours for the thresholded image
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
First, I dilated the image so that it fill up the scatter portion and tried to find out contours from there. But it gives me wrong output.
What I can do this for finding out four corner coordinates?
I have found your points by putting a regression line threw each of your sides and taking their interception points.
First I import stuff and find the contour points with open cv.
import numpy as np
import cv2
import matplotlib.pyplot as plt
from scipy.stats import linregress
from sympy import solve, symbols
import itertools
img = cv2.imread('ZrSqG.png')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshold, binarized_img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(binarized_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = (contours[1].reshape(-1,2)).T
Now I get a few of the top most, left most etc. points and put a line threw them. Then I calculate their interceptions and plot it all.
def interpolate_function(x,y):
line = interpolate(x,y)
return lambda x: x*line.slope+line.intercept
def interpolate(x,y):
idx = np.argsort(x)
line = linregress(x[idx], y[idx])
return line
def interception(line1, line2):
x = symbols('x')
x = solve(x*line1.slope+line1.intercept-(x*line2.slope+line2.intercept))[0]
return (x,x*line1[0]+line1[1])
idx_x = np.argsort(contours[0])
idx_y = np.argsort(contours[1])
left = [contours[0][idx_x[:30]], contours[1][idx_x[:30]]]
right = contours[0][idx_x[-10:]], contours[1][idx_x[-10:]]
top = contours[0][idx_y[:10]], contours[1][idx_y[:10]]
bottom = contours[0][idx_y[-30:]], contours[1][idx_y[-30:]]
contour_functions = [interpolate_function(*left), interpolate_function(*right),interpolate_function(*top), interpolate_function(*bottom)]
contour_function_eqs = [[interpolate(*left), interpolate(*right)],
[interpolate(*top), interpolate(*bottom)]]
for f in contour_functions:
t = np.linspace(0, img.shape[1], 10**4)
t = t[(0 < f(t)) & (f(t) < img.shape[0])]
plt.plot(t,f(t))
itersections = np.array([interception(line1, line2)
for line1, line2 in itertools.product(contour_function_eqs[0], contour_function_eqs[1])])
plt.scatter(itersections[:,0], itersections[:,1])
plt.imshow(img, cmap='gray')
And I get
Or if you prefer to follow the bottom left part you just reduce the points in the bottom by replacing
bottom = contours[0][idx_y[-30:]], contours[1][idx_y[-30:]]
with
bottom = contours[0][idx_y[-10:]], contours[1][idx_y[-10:]]
and you get
I got greyscale images which show particles on a surface. I like to write a program which finds the particles draws a circle around and gives counts the circles and the pixels inside the circles.
One of the main problems is that the particles overlapp. The next problem is that the contrast of the images is changing, from one image to the next.
Here is my first trial:
import matplotlib.pyplot as plt
import cv2 as cv
import imutils
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import os.path
fileref="test.png"
original = cv.imread(fileref)
img = original
cv.imwrite( os.path.join("inverse_"+fileref[:-4]+".png"), ~img );
img = cv.medianBlur(img,5)
img_grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
ret,th1 = cv.threshold(img_grey,130,255,cv.THRESH_BINARY)
th2 = cv.adaptiveThreshold(img_grey,255,cv.ADAPTIVE_THRESH_MEAN_C,\
cv.THRESH_BINARY,11,2)
th3 = cv.adaptiveThreshold(img_grey,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv.THRESH_BINARY,11,2)
titles = ['Original Image', 'Global Thresholding (v = 127)',
'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
images = [img, th1]
cv.imwrite( os.path.join("threshhold_"+fileref[:-4]+".jpg"), th1 );
cv.imwrite( os.path.join("adaptivthreshhold-m_"+fileref[:-4]+".jpg"), th2 );
cv.imwrite( os.path.join("adaptivthreshhold-g_"+fileref[:-4]+".jpg"), th3 );
imghsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
imghsv[:,:,2] = [[max(pixel - 25, 0) if pixel < 190 else min(pixel + 25, 255) for pixel in row] for row in imghsv[:,:,2]]
cv.imshow('contrast', cv.cvtColor(imghsv, cv.COLOR_HSV2BGR))
# Setup SimpleBlobDetector parameters.
params = cv.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 0
params.maxThreshold = 150
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.87
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.08 # 0.08
# Set edge gradient
params.thresholdStep = 0.5
# Filter by Area.
params.filterByArea = True
params.minArea = 300
# Set up the detector with default parameters.
detector = cv.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(original)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv.drawKeypoints(original, keypoints, np.array([]), (0, 0, 255),
cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
print(len(keypoints))
# Show keypoints
display=cv.resize(im_with_keypoints,None,fx=0.5,fy=0.5)
cv.imshow("Keypoints", display)
cv.waitKey(0)
cv.imwrite( os.path.join("keypoints_"+fileref[:-4]+".jpg"), im_with_keypoints );
It circles most particles but the parameters need to be changed for each image to get better results the circles can't overlapp and I don't know how to count the circles or count the pixels inside the circles.
Any help or hints which point me in the right direction are much appreciated.
I added a couple sample pics
This is an alternative approach and may not necessarily give better results than what you already have. You can try out plugging in different values for parameters and see if it gives you acceptable results.
import numpy as np
import cv2
import matplotlib.pyplot as plt
rgb = cv2.imread('/your/image/path/blobs_0002.jpeg')
gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
imh, imw = gray.shape
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,21,2)
th = cv2.adaptiveThreshold(gray,255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,15,15)
contours, hier = cv2.findContours(th.copy(),cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
out_img = rgb.copy()
for i in range(len(contours)):
if hier[0][i][3] != -1:
continue
x,y,w,h = cv2.boundingRect(contours[i])
ar = min(w,h)/max(w,h)
area = cv2.contourArea(contours[i])
extent = area / (w*h)
if 20 < w*h < 1000 and \
ar > 0.5 and extent > 0.4:
cv2.circle(out_img, (int(x+w/2), int(y+h/2)), int(max(w,h)/2), (255, 0, 0), 1)
plt.imshow(out_img)
For larger coalesced blobs you might try running Hough circles to see if partial contours fit the test. Just a thought. Just to acknowledge the fact that the images you are dealing with are challenging to come up with a clean solution.
I am trying to count the number of drops in this image and the coverage percentage of the area covered by those drops.
I tried to convert this image into black and white, but the center color of those drops seems too similar to the background. So I only got something like the second picture.
Is there any way to solve this problem or any better ideas?
Thanks a lot.
You can fill the holes of your binary image using scipy.ndimage.binary_fill_holes. I also recommend using an automatic thresholding method such as Otsu's (avaible in scikit-image).
from skimage import io, filters
from scipy import ndimage
import matplotlib.pyplot as plt
im = io.imread('ba3g0.jpg', as_grey=True)
val = filters.threshold_otsu(im)
drops = ndimage.binary_fill_holes(im < val)
plt.imshow(drops, cmap='gray')
plt.show()
For the number of drops you can use another function of scikit-image
from skimage import measure
labels = measure.label(drops)
print(labels.max())
And for the coverage
print('coverage is %f' %(drops.mean()))
I used the following code to detect the number of contours in the image using OpenCV and python.
import cv2
import numpy as np
img = cv2.imread('ba3g0.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,1)
contours,h = cv2.findContours(thresh,1,2)
for cnt in contours:
cv2.drawContours(img,[cnt],0,(0,0,255),1)
For further removing the contours inside another contour, you need to iterate over the entire list and compare and remove the internal contours. After that, the size of "contours" will give you the count
The idea is to isolate the background form the inside of the drops that look like the background.
Therefore i found the connected components for the background and the inside drops took the largest connected component and change its value to be like the foreground value which left me with an image which he inside drops as a different value than the background.
Than i used this image to fill in the original threshold image.
In the end using the filled image i calculated the relevant values
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Read image
I = cv2.imread('drops.jpg',0);
# Threshold
IThresh = (I>=118).astype(np.uint8)*255
# Remove from the image the biggest conneced componnet
# Find the area of each connected component
connectedComponentProps = cv2.connectedComponentsWithStats(IThresh, 8, cv2.CV_32S)
IThreshOnlyInsideDrops = np.zeros_like(connectedComponentProps[1])
IThreshOnlyInsideDrops = connectedComponentProps[1]
stat = connectedComponentProps[2]
maxArea = 0
for label in range(connectedComponentProps[0]):
cc = stat[label,:]
if cc[cv2.CC_STAT_AREA] > maxArea:
maxArea = cc[cv2.CC_STAT_AREA]
maxIndex = label
# Convert the background value to the foreground value
for label in range(connectedComponentProps[0]):
cc = stat[label,:]
if cc[cv2.CC_STAT_AREA] == maxArea:
IThreshOnlyInsideDrops[IThreshOnlyInsideDrops==label] = 0
else:
IThreshOnlyInsideDrops[IThreshOnlyInsideDrops == label] = 255
# Fill in all the IThreshOnlyInsideDrops as 0 in original IThresh
IThreshFill = IThresh
IThreshFill[IThreshOnlyInsideDrops==255] = 0
IThreshFill = np.logical_not(IThreshFill/255).astype(np.uint8)*255
plt.imshow(IThreshFill)
# Get numberof drops and cover precntage
connectedComponentPropsFinal = cv2.connectedComponentsWithStats(IThreshFill, 8, cv2.CV_32S)
NumberOfDrops = connectedComponentPropsFinal[0]
CoverPresntage = float(np.count_nonzero(IThreshFill==0)/float(IThreshFill.size))
# Print
print "Number of drops = " + str(NumberOfDrops)
print "Cover precntage = " + str(CoverPresntage)
Solution
image = cv2.imread('image path.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# (thresh, blackAndWhiteImage) = cv2.threshold(gray, 127, 255,
cv2.THRESH_BINARY)
plt.imshow(gray, cmap='gray')
blur = cv2.GaussianBlur(gray, (11, 11), 0)
plt.imshow(blur, cmap='gray')
canny = cv2.Canny(blur, 30, 40, 3)
plt.imshow(canny, cmap='gray')
dilated = cv2.dilate(canny, (1, 1), iterations=0)
plt.imshow(dilated, cmap='gray')
(cnt, hierarchy) = cv2.findContours(
dilated.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
cv2.drawContours(rgb, cnt, -1, (0, 255, 0), 2)
plt.imshow(rgb)
print("No of circles: ", len(cnt))