OpenCV python overlapping particles size and number - python

I got greyscale images which show particles on a surface. I like to write a program which finds the particles draws a circle around and gives counts the circles and the pixels inside the circles.
One of the main problems is that the particles overlapp. The next problem is that the contrast of the images is changing, from one image to the next.
Here is my first trial:
import matplotlib.pyplot as plt
import cv2 as cv
import imutils
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import os.path
fileref="test.png"
original = cv.imread(fileref)
img = original
cv.imwrite( os.path.join("inverse_"+fileref[:-4]+".png"), ~img );
img = cv.medianBlur(img,5)
img_grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
ret,th1 = cv.threshold(img_grey,130,255,cv.THRESH_BINARY)
th2 = cv.adaptiveThreshold(img_grey,255,cv.ADAPTIVE_THRESH_MEAN_C,\
cv.THRESH_BINARY,11,2)
th3 = cv.adaptiveThreshold(img_grey,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv.THRESH_BINARY,11,2)
titles = ['Original Image', 'Global Thresholding (v = 127)',
'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
images = [img, th1]
cv.imwrite( os.path.join("threshhold_"+fileref[:-4]+".jpg"), th1 );
cv.imwrite( os.path.join("adaptivthreshhold-m_"+fileref[:-4]+".jpg"), th2 );
cv.imwrite( os.path.join("adaptivthreshhold-g_"+fileref[:-4]+".jpg"), th3 );
imghsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
imghsv[:,:,2] = [[max(pixel - 25, 0) if pixel < 190 else min(pixel + 25, 255) for pixel in row] for row in imghsv[:,:,2]]
cv.imshow('contrast', cv.cvtColor(imghsv, cv.COLOR_HSV2BGR))
# Setup SimpleBlobDetector parameters.
params = cv.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 0
params.maxThreshold = 150
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.87
# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0.08 # 0.08
# Set edge gradient
params.thresholdStep = 0.5
# Filter by Area.
params.filterByArea = True
params.minArea = 300
# Set up the detector with default parameters.
detector = cv.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(original)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv.drawKeypoints(original, keypoints, np.array([]), (0, 0, 255),
cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
print(len(keypoints))
# Show keypoints
display=cv.resize(im_with_keypoints,None,fx=0.5,fy=0.5)
cv.imshow("Keypoints", display)
cv.waitKey(0)
cv.imwrite( os.path.join("keypoints_"+fileref[:-4]+".jpg"), im_with_keypoints );
It circles most particles but the parameters need to be changed for each image to get better results the circles can't overlapp and I don't know how to count the circles or count the pixels inside the circles.
Any help or hints which point me in the right direction are much appreciated.
I added a couple sample pics

This is an alternative approach and may not necessarily give better results than what you already have. You can try out plugging in different values for parameters and see if it gives you acceptable results.
import numpy as np
import cv2
import matplotlib.pyplot as plt
rgb = cv2.imread('/your/image/path/blobs_0002.jpeg')
gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
imh, imw = gray.shape
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,21,2)
th = cv2.adaptiveThreshold(gray,255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,15,15)
contours, hier = cv2.findContours(th.copy(),cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
out_img = rgb.copy()
for i in range(len(contours)):
if hier[0][i][3] != -1:
continue
x,y,w,h = cv2.boundingRect(contours[i])
ar = min(w,h)/max(w,h)
area = cv2.contourArea(contours[i])
extent = area / (w*h)
if 20 < w*h < 1000 and \
ar > 0.5 and extent > 0.4:
cv2.circle(out_img, (int(x+w/2), int(y+h/2)), int(max(w,h)/2), (255, 0, 0), 1)
plt.imshow(out_img)
For larger coalesced blobs you might try running Hough circles to see if partial contours fit the test. Just a thought. Just to acknowledge the fact that the images you are dealing with are challenging to come up with a clean solution.

Related

OpenCVs BlobDetector crashes

I'm trying to use OpenCV's SimpleBlobDetector. I'm using a very siple code, I read the image, I create the detector and I apply it. But when I excecute it I get the error Python has stopped working.
The image is properly read
Is a 1536x2048 image (too big?)
Could the PGN format be a problem?
import cv2
import numpy as np;
# Read image
im = cv2.imread("vlcsnap-2020-02-05-10h54m17s200.png", cv2.IMREAD_GRAYSCALE)
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector()
# Detect blobs.
keypoints = detector.detect(im) #No error if I coment this line
Does anyone have a clue of why would't this work?
Here is an answer that I posted some time ago in Python/OpenCV for doing blob analysis. Perhaps this will help you.
Input:
import numpy as np
import cv2
import math
# read image
img = cv2.imread("particles.jpg")
# convert to grayscale
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# apply Gaussian Blur
smoothed = cv2.GaussianBlur(gray, (0,0), sigmaX=9, sigmaY=9, borderType = cv2.BORDER_DEFAULT)
# do adaptive threshold on gray image
thresh = cv2.adaptiveThreshold(smoothed, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 65, 10)
cv2.imshow("Threshold", thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Set up the SimpleBlobdetector with default parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 0
params.maxThreshold = 256
# Filter by Area.
params.filterByArea = True
params.minArea = 30
params.maxArea = 10000
# Filter by Color (black=0)
params.filterByColor = True
params.blobColor = 0
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.5
params.maxCircularity = 1
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.5
params.maxConvexity = 1
# Filter by InertiaRatio
params.filterByInertia = True
params.minInertiaRatio = 0
params.maxInertiaRatio = 1
# Distance Between Blobs
params.minDistBetweenBlobs = 0
# Do detecting
detector = cv2.SimpleBlobDetector_create(params)
# Get keypoints
keypoints = detector.detect(thresh)
print(len(keypoints))
print('')
# Get keypoint locations and radius
for keypoint in keypoints:
x = int(keypoint.pt[0])
y = int(keypoint.pt[1])
s = keypoint.size
r = int(math.floor(s/2))
print (x,y,r)
#cv2.circle(img, (x, y), r, (0, 0, 255), 2)
# Draw blobs
blobs = cv2.drawKeypoints(thresh, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow("Keypoints", blobs)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Save result
cv2.imwrite("particle_blobs.jpg", blobs)

How do i ignore specific shapes on image processing, OpenCV, Python

I am working on cell segmentation and tracking. I've set of microscopical images. There are some circular noises caused by lamella. When I'm using my algorithm that may cause loss of cells some parts. I want to say to my program, "hey look those circular things are just noise, and just deny it, and work on real cell's membrane." The other one is, micro noises. There are some points with high or low contrast. I want to say to my program, "Hey, deny points, if its 10x10 pixels radius are the same with backgrounds contrast."
Work platform: Python 3.7.2, OpenCV 3.4.5
I hope, i clearly mentioned what my problem is. I am sharing one of those images.
4 circles on left are point noises.
2 circles on right are lamella noises.
enter image description here
import numpy
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('test001.tif')
gg = img.copy()
img_gray = cv.cvtColor(gg, cv.COLOR_BGR2GRAY)
clache = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
img_gray = clache.apply(img_gray)
_, img_bin = cv.threshold(img_gray, 50, 255,
cv.THRESH_OTSU)
img_bin = cv.morphologyEx(img_bin, cv.MORPH_OPEN,
numpy.ones((10, 9), dtype=int))
img_bin = cv.morphologyEx(img_bin, cv.MORPH_DILATE,
numpy.ones((5, 5), dtype=int), iterations= 1)
def segment(im1, img):
#morphological transformations
border = cv.dilate(img, None, iterations=10)
border = border - cv.erode(border, None, iterations=1)
#invert the image so black becomes white, and vice versa
img = -img
#applies distance transform and shows visualization
dt = cv.distanceTransform(img, 2, 3)
dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(numpy.uint8)
#reapply contrast to strengthen boundaries
clache = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
dt = clache.apply(dt)
#rethreshold the image
_, dt = cv.threshold(dt, 127, 255, cv.THRESH_BINARY)
ret, markers = cv.connectedComponents(dt)
markers = markers+1
# Complete the markers
markers[border == 255] = 255
markers = markers.astype(numpy.int32)
#apply watershed
cv.watershed(im1, markers)
markers[markers == -1] = 0
markers = markers.astype(numpy.uint8)
#return the image as one list, and the labels as another.
return dt, markers
dt, result = segment(img, img_bin)
cv.imshow('img',img)
cv.imshow('dt',dt)
cv.imshow('img_bin',img_bin)
cv.imshow('res',result)
Below one is serving as a guinea pig.
import numpy
import cv2 as cv
from matplotlib import pyplot as plt
img = cv.imread('test001.tif')
gg = img.copy()
img_gray = cv.cvtColor(gg, cv.COLOR_BGR2GRAY)
clache = cv.createCLAHE(clipLimit=2.0, tileGridSize=(20,20))
img_gray = clache.apply(img_gray)
cv.imshow('1img',img)
cv.imshow('2gray',img_gray)
#Threshold
_, img_bin = cv.threshold(img_gray, 127, 255, cv.THRESH_BINARY+cv.THRESH_OTSU)
cv.imshow('3threshold',img_bin)
#MorpClose
img_bin = cv.morphologyEx(img_bin, cv.MORPH_CLOSE,numpy.ones((5,5), dtype=int))
cv.imshow('4morp_close',img_bin)
#MorpErosion
img_bin = cv.erode(img_bin,numpy.ones((3,3),dtype=int),iterations = 1)
cv.imshow('5erosion',img_bin)
#MorpOpen
img_bin = cv.morphologyEx(img_bin, cv.MORPH_OPEN, numpy.ones((2, 2), dtype=int))
#cv.imshow('6morp_open',img_bin)
#MorpDilate
img_bin = cv.morphologyEx(img_bin, cv.MORPH_DILATE,numpy.ones((1, 1), dtype=int), iterations= 1)
#cv.imshow('7morp_dilate',img_bin)
#MorpBlackHat
img_bin = cv.morphologyEx(img_bin, cv.MORPH_BLACKHAT,numpy.ones((4,4),dtype=int))
#cv.imshow('8morpTophat',img_bin)
For those small dots you can try eroding and dilating:
You need to convert the image to grayscale and then process it, I'd create a mask with the eroded and dilated parts to remove those dots and then use that mask on the original image to delete the dots without compromising the resolution of you initial image.
For the big blury blobs, maybe add some noise to the image and compare with the original?
If most of those blobs are cv2.HoughCircles described here, it does something like this:
Of course you can tune those parameters to fit what you want and just ignore those parts of the image. Try that and also the noise, that might help reduce the false positives.
Best of luck!

OpenCV GrabCut Remove Background

I have been able to remove about 75% of the background from my original image, but I am struggling to fine tune my python code to remove the last bit.
Original Image
Output Image
As you can see there is one section of the background on the lower half of the image that isn't being removed along with the rest.
import os, time
import numpy as np
import cv2
import matplotlib.pyplot as plt
org_file_name = 'IMG_3237_reduced.jpg'
#Read Image File
img = cv2.imread(org_file_name))
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (1,1,1008,756)
rect2 = (11,222,975, 517)
# Perform the GrabCut on the Image File
t1 = time.clock()
cv2.grabCut(img,mask,rect2,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
t2 = time.clock()
print(t2-t1)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
gc_img = img*mask2[:,:,np.newaxis]
# convert to grayscale
gc_img_gray = cv2.cvtColor(gc_img, cv2.COLOR_BGR2GRAY)
_,alpha = cv2.threshold(gc_img_gray,0,255,cv2.THRESH_BINARY)
b, g, r = cv2.split(gc_img)
rgba = [b,g,r, alpha]
gc_split_img = cv2.merge(rgba,4)
# display results
#ax1 = plt.subplot(131); plt.imshow(img)
#ax1.set_title('Original')
#ax2 = plt.subplot(132); plt.imshow(gc_img)
#ax2.set_title('GrabCut')
ax3 = plt.subplot(111); plt.imshow(gc_split_img)
ax3.set_title('GrabCut Split')
plt.show()
I've attached the my working code above. I appreciate any help someone can offer. My plan is once the background is removed, I can do some analysis/statical modeling on the region of interest for further comparison.
If this is a one time process, I don't think you need to use grabcut. I'd suggest something like this, where you use a combination of spatial and simple code value thresholding:
import cv2
import numpy
# Read Image File
img = cv2.imread('NY3Ne.jpg')
# convert RGB to grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
mask, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
rect_x = [15, 990]
rect_y = [220, 530]
y, x = numpy.indices(img.shape[:2])
# threshold based on otsu's method
img[mask < thresh] = 0
# set everything outside the rectangle to 0
img[(x < rect_x[0])] = 0
img[(x > rect_x[1])] = 0
img[(y < rect_y[0])] = 0
img[(y > rect_y[1])] = 0
cv2.imshow('masked', img)
cv2.waitKey(0)
But if grabcut is necessary for some other reason, I could combine it with simple thresholding to get the desired result. 150 is kind of arbitrary based on your image, but you could substitute Otsu or any other adaptive binary threshold calculation method.
alpha = np.where(gc_img_gray < 150, 255, 0).astype(np.uint8)
gc_img[alpha==0] = 0
More info: https://docs.opencv.org/3.4.0/d7/d4d/tutorial_py_thresholding.html

How to count objects in image using python?

I am trying to count the number of drops in this image and the coverage percentage of the area covered by those drops.
I tried to convert this image into black and white, but the center color of those drops seems too similar to the background. So I only got something like the second picture.
Is there any way to solve this problem or any better ideas?
Thanks a lot.
You can fill the holes of your binary image using scipy.ndimage.binary_fill_holes. I also recommend using an automatic thresholding method such as Otsu's (avaible in scikit-image).
from skimage import io, filters
from scipy import ndimage
import matplotlib.pyplot as plt
im = io.imread('ba3g0.jpg', as_grey=True)
val = filters.threshold_otsu(im)
drops = ndimage.binary_fill_holes(im < val)
plt.imshow(drops, cmap='gray')
plt.show()
For the number of drops you can use another function of scikit-image
from skimage import measure
labels = measure.label(drops)
print(labels.max())
And for the coverage
print('coverage is %f' %(drops.mean()))
I used the following code to detect the number of contours in the image using OpenCV and python.
import cv2
import numpy as np
img = cv2.imread('ba3g0.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,1)
contours,h = cv2.findContours(thresh,1,2)
for cnt in contours:
cv2.drawContours(img,[cnt],0,(0,0,255),1)
For further removing the contours inside another contour, you need to iterate over the entire list and compare and remove the internal contours. After that, the size of "contours" will give you the count
The idea is to isolate the background form the inside of the drops that look like the background.
Therefore i found the connected components for the background and the inside drops took the largest connected component and change its value to be like the foreground value which left me with an image which he inside drops as a different value than the background.
Than i used this image to fill in the original threshold image.
In the end using the filled image i calculated the relevant values
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Read image
I = cv2.imread('drops.jpg',0);
# Threshold
IThresh = (I>=118).astype(np.uint8)*255
# Remove from the image the biggest conneced componnet
# Find the area of each connected component
connectedComponentProps = cv2.connectedComponentsWithStats(IThresh, 8, cv2.CV_32S)
IThreshOnlyInsideDrops = np.zeros_like(connectedComponentProps[1])
IThreshOnlyInsideDrops = connectedComponentProps[1]
stat = connectedComponentProps[2]
maxArea = 0
for label in range(connectedComponentProps[0]):
cc = stat[label,:]
if cc[cv2.CC_STAT_AREA] > maxArea:
maxArea = cc[cv2.CC_STAT_AREA]
maxIndex = label
# Convert the background value to the foreground value
for label in range(connectedComponentProps[0]):
cc = stat[label,:]
if cc[cv2.CC_STAT_AREA] == maxArea:
IThreshOnlyInsideDrops[IThreshOnlyInsideDrops==label] = 0
else:
IThreshOnlyInsideDrops[IThreshOnlyInsideDrops == label] = 255
# Fill in all the IThreshOnlyInsideDrops as 0 in original IThresh
IThreshFill = IThresh
IThreshFill[IThreshOnlyInsideDrops==255] = 0
IThreshFill = np.logical_not(IThreshFill/255).astype(np.uint8)*255
plt.imshow(IThreshFill)
# Get numberof drops and cover precntage
connectedComponentPropsFinal = cv2.connectedComponentsWithStats(IThreshFill, 8, cv2.CV_32S)
NumberOfDrops = connectedComponentPropsFinal[0]
CoverPresntage = float(np.count_nonzero(IThreshFill==0)/float(IThreshFill.size))
# Print
print "Number of drops = " + str(NumberOfDrops)
print "Cover precntage = " + str(CoverPresntage)
Solution
image = cv2.imread('image path.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# (thresh, blackAndWhiteImage) = cv2.threshold(gray, 127, 255,
cv2.THRESH_BINARY)
plt.imshow(gray, cmap='gray')
blur = cv2.GaussianBlur(gray, (11, 11), 0)
plt.imshow(blur, cmap='gray')
canny = cv2.Canny(blur, 30, 40, 3)
plt.imshow(canny, cmap='gray')
dilated = cv2.dilate(canny, (1, 1), iterations=0)
plt.imshow(dilated, cmap='gray')
(cnt, hierarchy) = cv2.findContours(
dilated.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
cv2.drawContours(rgb, cnt, -1, (0, 255, 0), 2)
plt.imshow(rgb)
print("No of circles: ", len(cnt))

Opencv unable to get ROI of highest intensity part

I have tried this code.
import sys
import numpy as np
sys.path.append('/usr/local/lib/python2.7/site-packages')
import cv2
from cv2.cv import *
img=cv2.imread("test2.jpg",cv2.IMREAD_COLOR)
gimg = cv2.imread("test2.jpg",cv2.IMREAD_GRAYSCALE)
b,g,r = cv2.split(img)
ret,thresh1 = cv2.threshold(gimg,127,255,cv2.THRESH_BINARY);
numrows = len(thresh1)
numcols = len(thresh1[0])
thresh = 170
im_bw = cv2.threshold(gimg, thresh, 255, cv2.THRESH_BINARY)[1]
trig=0
trigmax=0;
xmax=0
ymax=0
for x in range(0,numrows):
for y in range(0,numcols):
if(im_bw[x][y]==1):
trig=trig+1;
if(trig>5):
xmax=x;
ymax=y;
break;
print x,y,numrows,numcols,trig
roi=gimg[xmax:xmax+200,ymax-500:ymax]
cv2.imshow("roi",roi)
WaitKey(0)
here test2.jpg is what I am tring to do is to concentrate on the high intensity part of the image(i.e the circle with high intensity in image).But my code does not seem to do so.
Can anyone help?
I found answer to my question from here
here is my code
# import the necessary packages
import numpy as np
import cv2
# load the image and convert it to grayscale
image = cv2.imread('test2.jpg')
orig = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# apply a Gaussian blur to the image then find the brightest
# region
gray = cv2.GaussianBlur(gray, (41, 41), 0)
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(gray)
image = orig.copy()
roi=image[maxLoc[1]-250:maxLoc[1]+250,maxLoc[0]-250:maxLoc[0]+250,2:2]
cv2.imshow("Robust", roi)
cv2.waitKey(0)
test2.jpg
ROI
Try checking whether a pixel is not zero - it turns out those pixels have a value of 255 after thresholding, as it is a grayscale image after all.
The threshold seems to be wrong also, but I don't really know what you want to see (display it with imshow - it isn't just the circle). And your code matches the number '3' in the bottom left corner, therefore the ROI matrix indices are invalid in your example.
EDIT:
After playing around with the image, I ended up using a different approach. I used the SimpleBlobDetector and did an erosion on the image before, so the region you're interested in remains connected. For the blob detector the program inverts the image first. (You may want to read a SimpleBlobDetector tutorial as I did, parts of the code are based on that page - many thanks to the author!)
The following code displays the procedure step by step:
import cv2
import numpy as np
# Read image
gimg = cv2.imread("test2.jpg", cv2.IMREAD_GRAYSCALE)
# Invert the image
im_inv = 255 - gimg
cv2.imshow("Step 1 - inverted image", im_inv)
cv2.waitKey(0)
# display at a threshold level of 50
thresh = 45
im_bw = cv2.threshold(im_inv, thresh, 255, cv2.THRESH_BINARY)[1]
cv2.imshow("Step 2 - bw threshold", im_bw)
cv2.waitKey(0)
# erosion
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10))
im_bw = cv2.erode(im_bw, kernel, iterations = 1)
cv2.imshow('Step 3 - erosion connects disconnected parts', im_bw)
cv2.waitKey(0)
# Set up the detector with default parameters.
params = cv2.SimpleBlobDetector_Params()
params.filterByInertia = False
params.filterByConvexity = False
params.filterByCircularity = False
params.filterByColor = False
params.minThreshold = 0
params.maxThreshold = 50
params.filterByArea = True
params.minArea = 1000 # you may check with 10 --> finds number '3' also
params.maxArea = 100000 #im_bw.shape[0] * im_bw.shape[1] # max limit: image size
# Create a detector with the parameters
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
detector = cv2.SimpleBlobDetector(params)
else :
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(im_bw)
print "Found", len(keypoints), "blobs:"
for kpt in keypoints:
print "(%.1f, %.1f) diameter: %.1f" % (kpt.pt[0], kpt.pt[1], kpt.size)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the
# circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(gimg, keypoints, np.array([]), (0,0,255),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Show keypoints
cv2.imshow("Keypoints", im_with_keypoints)
cv2.waitKey(0)
This algorithm finds the coordinate (454, 377) as the center of the blob, but if you reduce the minArea to e.g. 10 then it will find the number 3 in the bottom corner as well.

Categories

Resources