Particle detection with Python OpenCV - python

I'm looking for a proper solution how to count particles and measure their sizes in this image:
In the end I have to obtain the lists of particles' coordinates and area squares. After some search on the internet I realized there are 3 approaches for particles detection:
blobs
Contours
connectedComponentsWithStats
Looking at different projects I assembled some code with the mix of it.
import pylab
import cv2
import numpy as np
Gaussian blurring and thresholding
original_image = cv2.imread(img_path)
img = original_image
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img, (5, 5), 0)
img = cv2.blur(img, (5, 5))
img = cv2.medianBlur(img, 5)
img = cv2.bilateralFilter(img, 6, 50, 50)
max_value = 255
adaptive_method = cv2.ADAPTIVE_THRESH_GAUSSIAN_C
threshold_type = cv2.THRESH_BINARY
block_size = 11
img_thresholded = cv2.adaptiveThreshold(img, max_value, adaptive_method, threshold_type, block_size, -3)
filter small objects
min_size = 4
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(img, connectivity=8)
sizes = stats[1:, -1]
nb_components = nb_components - 1
# for every component in the image, you keep it only if it's above min_size
for i in range(0, nb_components):
if sizes[i] < min_size:
img[output == i + 1] = 0
generation of Contours for filling holes and measurements. pos_list and size_list is what we were looking for
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
pos_list = []
size_list = []
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
size_list.append(area)
(x, y), radius = cv2.minEnclosingCircle(contours[i])
pos_list.append((int(x), int(y)))
for the self-check, if we plot these coordinates over the original image
pts = np.array(pos_list)
pylab.figure(0)
pylab.imshow(original_image)
pylab.scatter(pts[:, 0], pts[:, 1], marker="x", color="green", s=5, linewidths=1)
pylab.show()
We might get something like the following:
And... I'm not really satisfied with the results. Some clearly visible particles are not included, on the other side, some doubt fluctuations of intensity have been counted. I'm playing now with different filters' settings, but the feeling is it's wrong.
If someone knows how to improve my solution, please share.

Since the particles are in white and the background in black, we can use Kmeans Color Quantization to segment the image into two groups with cluster=2. This will allow us to easily distinguish between particles and the background. Since the particles may be very tiny, we should try to avoid blurring, dilating, or any morphological operations which may alter the particle contours. Here's an approach:
Kmeans color quantization. We perform Kmeans with two clusters, grayscale, then Otsu's threshold to obtain a binary image.
Filter out super tiny noise. Next we find contours, remove tiny specs of noise using contour area filtering, and collect each particle (x, y) coordinate and its area. We remove tiny particles on the binary mask by "filling in" these contours to effectively erase them.
Apply mask onto original image. Now we bitwise-and the filtered mask onto the original image to highlight the particle clusters.
Kmeans with clusters=2
Result
Number of particles: 204
Average particle size: 30.537
Code
import cv2
import numpy as np
import pylab
# Kmeans
def kmeans_color_quantization(image, clusters=8, rounds=1):
h, w = image.shape[:2]
samples = np.zeros([h*w,3], dtype=np.float32)
count = 0
for x in range(h):
for y in range(w):
samples[count] = image[x][y]
count += 1
compactness, labels, centers = cv2.kmeans(samples,
clusters,
None,
(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.0001),
rounds,
cv2.KMEANS_RANDOM_CENTERS)
centers = np.uint8(centers)
res = centers[labels.flatten()]
return res.reshape((image.shape))
# Load image
image = cv2.imread('1.png')
original = image.copy()
# Perform kmeans color segmentation, grayscale, Otsu's threshold
kmeans = kmeans_color_quantization(image, clusters=2)
gray = cv2.cvtColor(kmeans, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find contours, remove tiny specs using contour area filtering, gather points
points_list = []
size_list = []
cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
AREA_THRESHOLD = 2
for c in cnts:
area = cv2.contourArea(c)
if area < AREA_THRESHOLD:
cv2.drawContours(thresh, [c], -1, 0, -1)
else:
(x, y), radius = cv2.minEnclosingCircle(c)
points_list.append((int(x), int(y)))
size_list.append(area)
# Apply mask onto original image
result = cv2.bitwise_and(original, original, mask=thresh)
result[thresh==255] = (36,255,12)
# Overlay on original
original[thresh==255] = (36,255,12)
print("Number of particles: {}".format(len(points_list)))
print("Average particle size: {:.3f}".format(sum(size_list)/len(size_list)))
# Display
cv2.imshow('kmeans', kmeans)
cv2.imshow('original', original)
cv2.imshow('thresh', thresh)
cv2.imshow('result', result)
cv2.waitKey()

Related

Attempted to measure length of worms but found substantial variation, What could be problem?

I implemented contour to calculate length of worms, but found major difference in length on this Image:
Could not find the accurate length, What could be mistake here?
contour draw is almost accurate.
Need to know accurate way to measure length.
Can we have any other approach to calculate length of curve structure.
# Read Image
image = cv2.imread("M2 Output.jpg")
# Convert to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Create a binary threshold image
_, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)
# Find the contours from the threshold image
contours, hierarchy = cv2.findContours(binary, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2:]
# Draw each contour on the mask
Arc=[]
Perimeter=[]
RLength=[]
for i in range(len(contours)):
# Create a mask image that contains the contour
cimg = np.zeros_like(binary)
canvas = np.zeros_like(binary)
# Draw contours,sets all value to 255
cv2.drawContours(cimg,contours, i, color=255, thickness=-1)
# Apply thining to each contour image
thinimage = thin(cimg)
# Access the image pixels with non zero and create a numpy array
x,y = np.nonzero(thinimage)
length=reduce(lambda p1, p2: np.linalg.norm(p1 - p2), x,y)
arclen=0
Points=[]
perimeter=0
# Calculate Arc length
for i,j in zip(y,x):
Length = len(x)
arclen = np.sqrt((x[1] - x[0])**2 + (y[1] - y[0])**2)
for k in range(1, Length):
arclen = arclen + np.sqrt((x[k] - x[k-1])**2 + (y[k] - y[k-1])**2)
Points.append([i,j])
curve = cv2.circle(canvas,(i,j), radius=0, color=(255,0,255),
thickness=-1)
print('arclen',arclen)
ctr = np.array(Points).reshape((1,-1,2)).astype(np.int32)
perimeter = cv2.arcLength(ctr,False)
print('Perimeter:',perimeter)
print('R-Length:',length)
Arc.append(arclen)
Perimeter.append(perimeter)
RLength.append(length)
print('Arc-Length:',Arc)
print('Perimeter:',Perimeter)
print('R-Length',RLength)
for c in contours:
print('CPerimeter:',cv2.arcLength(c,False))
Output
Arc-Length: [25178.444010880532, 771.7484665460217, 397.2253967444164]
Perimeter: [25177.444003224373, 770.7484695911407, 396.2253956794739]
CPerimeter: 703.043718457222, 738.1147863864899, 806.4213538169861

Difficulty in detecting the outer circle with cv2.HoughCircles

I am trying to detect the outer boundary of the circular object in the images below:
I tried OpenCV's Hough Circle, but the code is not working for every image. I also tried to adjust parameters such as minRadius and maxRadius in Hough Circle but its not working on every image.
The aim is to detect the object from the image and crop it.
Expected output:
Source code:
import imutils
import cv2
import numpy as np
from matplotlib import pyplot as plt
image = cv2.imread("path to the image i have provided")
r = 600.0 / image.shape[1]
dim = (600, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
cv2.imwrite("path to were we want to save downscaled image", resized)
image = cv2.imread('path of downscaled image')
image1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image2 = cv2.GaussianBlur(image1, (5, 5), 0)
edged = cv2.Canny(image2, 30, 150)
img = cv2.medianBlur(image2,5)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(edged,cv2.HOUGH_GRADIENT,1,20,
param1=50,param2=30,minRadius=200,maxRadius=280)
circles = np.uint16(np.around(circles))
max_circle = max(circles[0,:], key=lambda x:x[2])
# print(max_circle)
# # Create mask
height,width = image1.shape
mask = np.zeros((height,width), np.uint8)
for i in [max_circle]:
cv2.circle(mask,(i[0],i[1]),i[2],(255,255,255),thickness=-1)
masked_data = cv2.bitwise_and(image, image, mask=mask)
_,thresh = cv2.threshold(mask,1,255,cv2.THRESH_BINARY)
# Find Contour
contours = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[0]
x,y,w,h = cv2.boundingRect(contours[0])
# Crop masked_data
crop = masked_data[y:y+h,x:x+w]
#Code to close Window
cv2.imshow('OG',image)
cv2.imshow('Cropped ROI',crop)
cv2.imwrite("path to save roi image", crop)
cv2.waitKey(0)
cv2.destroyAllWindows()
Second Answer: an approach based on color segmentation.
While I was editing the question to improve it's readability and was inserting and resizing all the images from the link you shared to make it easier for everyone to visualize what you are trying to do, it occurred to me that this problem might be a better candidate for an approach based on segmentation by color:
This simpler (but clever) approach assumes that the reel appears pretty much in the same location and has more or less the same dimensions every time:
To discover the approximate color of the reel in the image, define a list of Regions of Interest (ROIs) to sample pixels from and determine the min and max color of that area in the HSV color space. The location and size of the ROI are values derived from the size of the image. In the images below, you can see the ROIs as draw as blue-ish rectangles:
Once the min and max HSV colors have been found, a threshold operation with cv2.inRange() can be executed to segment the reel:
Then, iterate though all the contours in the binary image and assume that the largest one represents the reel. Use this contour and draw it in a separate mask to be able to extract the pixels from original image:
At this stage, it is also possible to compute a bounding box for the contour and extract it's precise location to be able to perform a crop operation later and completely isolate the reel in the image:
This approach works for EVERY image shared on the question.
Source code:
import cv2
import numpy as np
import sys
# initialize global H, S, V values
min_global_h = 179
min_global_s = 255
min_global_v = 255
max_global_h = 0
max_global_s = 0
max_global_v = 0
# load input image from the cmd-line
filename = sys.argv[1]
img = cv2.imread(sys.argv[1])
if (img is None):
print('!!! Failed imread')
sys.exit(-1)
# create an auxiliary image for debugging purposes
dbg_img = img.copy()
# initiailize a list of Regions of Interest that need to be scanned to identify good HSV values to threhsold by color
w = img.shape[1]
h = img.shape[0]
roi_w = int(w * 0.10)
roi_h = int(h * 0.10)
roi_list = []
roi_list.append( (int(w*0.25), int(h*0.15), roi_w, roi_h) )
roi_list.append( (int(w*0.25), int(h*0.60), roi_w, roi_h) )
# convert image to HSV color space
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# iterate through the ROIs to determine the min/max HSV color of the reel
for rect in roi_list:
x, y, w, h = rect
x2 = x + w
y2 = y + h
print('ROI rect=', rect)
cropped_hsv_img = hsv_img[y:y+h, x:x+w]
h, s, v = cv2.split(cropped_hsv_img)
min_h = np.min(h)
min_s = np.min(s)
min_v = np.min(v)
if (min_h < min_global_h):
min_global_h = min_h
if (min_s < min_global_s):
min_global_s = min_s
if (min_v < min_global_v):
min_global_v = min_v
max_h = np.max(h)
max_s = np.max(s)
max_v = np.max(v)
if (max_h > max_global_h):
max_global_h = max_h
if (max_s > max_global_s):
max_global_s = max_s
if (max_v > max_global_v):
max_global_v = max_v
# debug: draw ROI in original image
cv2.rectangle(dbg_img, (x, y), (x2, y2), (255,165,0), 4) # red
cv2.imshow('ROIs', cv2.resize(dbg_img, dsize=(0, 0), fx=0.5, fy=0.5))
#cv2.waitKey(0)
cv2.imwrite(filename[:-4] + '_rois.png', dbg_img)
# define min/max color for threshold
low_hsv = np.array([min_h, min_s, min_v])
max_hsv = np.array([max_h, max_s, max_v])
#print('low_hsv=', low_hsv)
#print('max_hsv=', max_hsv)
# threshold image by color
img_bin = cv2.inRange(hsv_img, low_hsv, max_hsv)
cv2.imshow('binary', cv2.resize(img_bin, dsize=(0, 0), fx=0.5, fy=0.5))
cv2.imwrite(filename[:-4] + '_binary.png', img_bin)
#cv2.imshow('img_bin', cv2.resize(img_bin, dsize=(0, 0), fx=0.5, fy=0.5))
#cv2.waitKey(0)
# create a mask to store the contour of the reel (hopefully)
mask = np.zeros((img_bin.shape[0], img_bin.shape[1]), np.uint8)
crop_x, crop_y, crop_w, crop_h = (0, 0, 0, 0)
# iterate throw all the contours in the binary image:
# assume that the first contour with an area larger than 100k belongs to the reel
contours, hierarchy = cv2.findContours(img_bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
area = cv2.contourArea(contours[contourIdx])
print('contourIdx=', contourIdx, 'area=', area)
# draw potential reel blob on the mask (in white)
if (area > 100000):
crop_x, crop_y, crop_w, crop_h = cv2.boundingRect(cnt)
centers, radius = cv2.minEnclosingCircle(cnt)
cv2.circle(mask, (int(centers[0]), int(centers[1])), int(radius), (255), -1) # fill with white
break
cv2.imshow('mask', cv2.resize(mask, dsize=(0, 0), fx=0.5, fy=0.5))
cv2.imwrite(filename[:-4] + '_mask.png', mask)
# copy just the reel area into its own image
reel_img = cv2.bitwise_and(img, img, mask=mask)
cv2.imshow('reel_img', cv2.resize(reel_img, dsize=(0, 0), fx=0.5, fy=0.5))
cv2.imwrite(filename[:-4] + '_reel.png', reel_img)
# crop the reel to a smaller image
if (crop_w != 0 and crop_h != 0):
cropped_reel_img = reel_img[crop_y:crop_y+crop_h, crop_x:crop_x+crop_w]
cv2.imshow('cropped_reel_img', cv2.resize(cropped_reel_img, dsize=(0, 0), fx=0.5, fy=0.5))
output_filename = filename[:-4] + '_crop.png'
cv2.imwrite(output_filename, cropped_reel_img)
cv2.waitKey(0)
First answer: an approach based on pre-processing the image and executing an adaptiveThreshold operation.
There might be other ways of solving this problem that are not based on Hough Circles. Here is the result of an approach that is not:
Preprocess the image! Decreasing the size of the image and executing a blur helps with segmentation:
The segmentation method uses a cv2.adaptiveThreshold() to create a binary image that preserves the most important objects: the center of the reel and the external edge of the reel. This is an important step since we are only interested in what exists between these two objects. However, life is not perfect and neither is this segmentation. The shadow of reel on the table became part of the binary objects detected. Also, the outer edge is not fully connected as you can see on the resulting image on the right (look at the top left of the circumference):
To join broken segments, a morphological operation can be executed:
Finally, the entire reel area can be exposed by iterating through the contours of the image above and discarding those whose area is larger than what is expected for a reel. The resulting binary image (on the left) can then be used as a mask to identify the reel location on the original image:
Keep in mind that I'm not trying to find an universal solution for your problem. I'm merely showing that there might be other solutions that don't depend on Hough Circles.
Also, this code might need some adjustments to work on a larger number of cases.
Source code:
import cv2
import numpy as np
import sys
img = cv2.imread("test_images/reel.jpg")
if (img is None):
print('!!! Failed imread')
sys.exit(-1)
# create output image
output_img = img.copy()
# 1. Preprocess the image: downscale to speed up processing and execute a blur
SCALE_FACTOR = 0.5
smaller_img = cv2.resize(img, dsize=(0, 0), fx=SCALE_FACTOR, fy=SCALE_FACTOR)
blur_img = cv2.medianBlur(smaller_img, 9)
cv2.imwrite('reel1_blur_img.png', blur_img)
# 2. Segment the image to identify the 2 most important contours: the center of the reel and the outter edge
gray_img = cv2.cvtColor(blur_img, cv2.COLOR_BGR2GRAY)
img_bin = cv2.adaptiveThreshold(gray_img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 19, 4)
cv2.imwrite('reel2_img_bin.png', img_bin)
green_mask = np.zeros((img_bin.shape[0], img_bin.shape[1]), np.uint8)
#green_mask = cv2.cvtColor(img_bin, cv2.COLOR_GRAY2RGB) # debug
contours, hierarchy = cv2.findContours(img_bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
x, y, w, h = cv2.boundingRect(cnt)
area = cv2.contourArea(contours[contourIdx])
#print('contourIdx=', contourIdx, 'w=', w, 'h=', h, 'area=', area)
# filter out tiny segments
if (area < 5000):
#cv2.fillPoly(green_mask, pts=[cnt], color=(0, 0, 255)) # red
continue
# draw green contour (filled)
#cv2.fillPoly(green_mask, pts=[cnt], color=(0, 255, 0)) # green
cv2.fillPoly(green_mask, pts=[cnt], color=(255)) # white
# debug:
#cv2.imshow('green_mask', green_mask)
#cv2.waitKey(0)
cv2.imshow('green_mask', green_mask)
cv2.imwrite('reel2_green_mask.png', green_mask)
# 3. Fix mask: join segments nearby
kernel = np.ones((3,3), np.uint8)
img_dilation = cv2.dilate(green_mask, kernel, iterations=1)
green_mask = cv2.erode(img_dilation, kernel, iterations=1)
cv2.imshow('fixed green_mask', green_mask)
cv2.imwrite('reel3_img.png', green_mask)
# 4. Extract the reel area from the green mask
reel_mask = np.zeros((green_mask.shape[0], green_mask.shape[1]), np.uint8)
#reel_mask = cv2.cvtColor(green_mask, cv2.COLOR_GRAY2RGB) # debug
contours, hierarchy = cv2.findContours(green_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
x, y, w, h = cv2.boundingRect(cnt)
area = cv2.contourArea(contours[contourIdx])
print('contourIdx=', contourIdx, 'w=', w, 'h=', h, 'area=', area)
# filter out smaller segments
if (area > 110000):
#cv2.fillPoly(reel_mask, pts=[cnt], color=(0, 0, 255)) # red
continue
# draw green contour (filled)
#cv2.fillPoly(reel_mask, pts=[cnt], color=(0, 255, 0)) # green
cv2.fillPoly(reel_mask, pts=[cnt], color=(255)) # white
# debug:
#cv2.imshow('reel_mask', reel_mask)
#cv2.waitKey(0)
cv2.imshow('reel_mask', reel_mask)
cv2.imwrite('reel4_reel_mask.png', reel_mask)
# 5. Draw the reel area on the original image
contours, hierarchy = cv2.findContours(reel_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contourIdx, cnt in enumerate(contours):
centers, radius = cv2.minEnclosingCircle(cnt)
# rescale these values back to the original image size
centers_orig = (centers[0] // SCALE_FACTOR, centers[1] // SCALE_FACTOR)
radius_orig = radius // SCALE_FACTOR
print('centers=', centers_orig, 'radius=', radius_orig)
cv2.circle(output_img, (int(centers_orig[0]), int(centers_orig[1])), int(radius_orig), (128,0,255), 5) # magenta
cv2.imshow('output_img', output_img)
cv2.imwrite('reel5_output.png', output_img)
# display just the pixels from the original image
larger_reel_mask = cv2.resize(reel_mask, (int(img.shape[1]), int(img.shape[0])))
output_reel_img = cv2.bitwise_and(img, img, mask=larger_reel_mask)
cv2.imshow('output_reel_img', output_reel_img)
cv2.imwrite('reel5_output_reel.png', output_reel_img)
cv2.waitKey(0)
At this point, its possible to use larger_reel_maskand compute a minimal enclosing circle, draw it over this mask to make it a little bit more round and allow us to retrieve the area of the reel more accurately:
But the 4 lines of code that achieve this improvement I leave as an exercise for the reader.

Detect multiple circles in an image

I am trying to detect the count of water pipes in this picture. For this, I am trying to use OpenCV and Python-based detection. The results, I am getting is a little confusing to me because the spread of circles is way too large and inaccurate.
The code
import numpy as np
import argparse
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Path to the image")
args = vars(ap.parse_args())
# load the image, clone it for output, and then convert it to grayscale
image = cv2.imread(args["image"])
output = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#detect circles in the image
#circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.2, param1=40,minRadius=10,maxRadius=35)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 8.5,70,minRadius=0,maxRadius=70)
#print(len(circles[0][0]))
# ensure at least some circles were found
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# count = count+1
# print(count)
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
# show the output image
# cv2.imshow("output", np.hstack([output]))
cv2.imwrite('output.jpg',np.hstack([output]),[cv2.IMWRITE_JPEG_QUALITY, 70])
cv2.waitKey(0)
After I run this, I do see a lot of circles detected, however, the results are complete haywire.
My question is, how do I improve this detection. Which parameters are specifically needed to optimize in the HoughCircles method to achieve greater accuracy? Or, should I take the approach of annotating hundreds of similar images via bounding boxes and then train them over a full-blown CNN like Yolo to perform detection?
Taking the approach mentioned in answer number 2 from here Measuring the diameter pictures of holes in metal parts, photographed with telecentric, monochrome camera with opencv . I got this output. This looks close to performing a count but misses on lot of actual pipes during the brightness transformation of the image.
The most important parameters for your HoughCircles call are:
param1: because you are using cv2.HOUGH_GRADIENT, param1 is the higher threshold for the edge detection algorithm and param1 / 2 is the lower threshold.
param2: it represents the accumulator threshold, so the lower the value, the more circles will be returned.
minRadius and maxRadius: the blue circles in the example have a diameter of roughly 20 pixels, so using 70 pixels for maxRadius is the reason why so many circles are being returned by the algorithm.
minDist: the minimum distance between the centers of two circles.
The parameterization defined below:
circles = cv2.HoughCircles(gray,
cv2.HOUGH_GRADIENT,
minDist=6,
dp=1.1,
param1=150,
param2=15,
minRadius=6,
maxRadius=10)
returns:
You could do an adaptive threshold as preprocessing. This basically looks for areas that are relatively brighter than the neighboring pixels, your global threshold loses some of the pipes, this keeps them a little better.
import cv2
import matplotlib.pyplot as plt
import numpy as np
img = cv2.imread('a2MTm.jpg')
blur_hor = cv2.filter2D(img[:, :, 0], cv2.CV_32F, kernel=np.ones((11,1,1), np.float32)/11.0, borderType=cv2.BORDER_CONSTANT)
blur_vert = cv2.filter2D(img[:, :, 0], cv2.CV_32F, kernel=np.ones((1,11,1), np.float32)/11.0, borderType=cv2.BORDER_CONSTANT)
mask = ((img[:,:,0]>blur_hor*1.2) | (img[:,:,0]>blur_vert*1.2)).astype(np.uint8)*255
plt.imshow(mask)
You can then carry on with the same post processing steps.
Here are some example processing steps:
circles = cv2.HoughCircles(mask,
cv2.HOUGH_GRADIENT,
minDist=8,
dp=1,
param1=150,
param2=12,
minRadius=4,
maxRadius=10)
output = img.copy()
for (x, y, r) in circles[0, :, :]:
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
You can adjust the parameters to get what you would like, read about the parameters here.
Instead of using cv2.HoughCircles another approach would be to use contour filtering. We can threshold the image then filter using aspect ratio, contour area, and radius of the blob. Here's the result:
Count: 344
Code
import cv2
image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,27,3)
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
count = 0
for c in cnts:
area = cv2.contourArea(c)
x,y,w,h = cv2.boundingRect(c)
ratio = w/h
((x, y), r) = cv2.minEnclosingCircle(c)
if ratio > .85 and ratio < 1.20 and area > 50 and area < 120 and r < 7:
cv2.circle(image, (int(x), int(y)), int(r), (36, 255, 12), -1)
count += 1
print('Count: {}'.format(count))
cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.waitKey()

How to optimize circle detection with Python OpenCV?

I have looked at several pages regarding optimizing circle detection using opencv in python. All seem to be specific to the individual circumstances of a given picture. What are some starting points for each of the parameters for cv2.HoughCircles? Since I am not sure what recommended values are, I have attempted looping over ranges but this is not producing any promising results. Why can't I detect any of the circles in this image?
import cv2
import numpy as np
image = cv2.imread('IMG_stack.png')
output = image.copy()
height, width = image.shape[:2]
maxWidth = int(width/10)
minWidth = int(width/20)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.2, 20,param1=50,param2=50,minRadius=minWidth,maxRadius=maxWidth)
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circlesRound = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circlesRound:
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
cv2.imwrite(filename = 'test.circleDraw.png', img = output)
cv2.imwrite(filename = 'test.circleDrawGray.png', img = gray)
else:
print ('No circles found')
This should be a straight forward circle detection, but all of the circles detected are not even close.
The main parameters that you should pay attention are minDist, minRadius and maxRadius.
Analyzing the radius first: you have an image that is 12 circles wide and 8 circles tall, which gives you a diameter of roughly width/12 for each circle, or a radius of (width/12)/2. The constraints that you have used allowed the algorithm to detect circles way bigger or smaller than necessary, therefore you should use a parameterization that is better fit for your image. In this case, I have used an interval [0.9 * radius, 1.1 * radius].
As there is no overlapping, you could say that the distance between two circles is at least the diameter, so minDist could be set to something like 2*minRadius.
This implementation is basically the same as yours, just updating those 3 parameters:
%matplotlib inline
import cv2
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread('data/balls.jpg')
output = image.copy()
height, width = image.shape[:2]
maxRadius = int(1.1*(width/12)/2)
minRadius = int(0.9*(width/12)/2)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(image=gray,
method=cv2.HOUGH_GRADIENT,
dp=1.2,
minDist=2*minRadius,
param1=50,
param2=50,
minRadius=minRadius,
maxRadius=maxRadius
)
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circlesRound = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circlesRound:
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
plt.imshow(output)
else:
print ('No circles found')
The result is:
Normally circle detection can be done using traditional image processing methods such as thresholding + contour detection, hough circles, or contour fitting but since your circles are overlapping/touching, watershed segmentation may be better. Here's a good resource.
import cv2
import numpy as np
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from scipy import ndimage
# Load in image, convert to gray scale, and Otsu's threshold
image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Remove small noise by filtering using contour area
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if cv2.contourArea(c) < 1000:
cv2.drawContours(thresh,[c], 0, (0,0,0), -1)
cv2.imshow('thresh', thresh)
# Compute Euclidean distance from every binary pixel
# to the nearest zero pixel then find peaks
distance_map = ndimage.distance_transform_edt(thresh)
local_max = peak_local_max(distance_map, indices=False, min_distance=20, labels=thresh)
# Perform connected component analysis then apply Watershed
markers = ndimage.label(local_max, structure=np.ones((3, 3)))[0]
labels = watershed(-distance_map, markers, mask=thresh)
# Iterate through unique labels
for label in np.unique(labels):
if label == 0:
continue
# Create a mask
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
# Find contours and determine contour area
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
c = max(cnts, key=cv2.contourArea)
cv2.drawContours(image, [c], -1, (36,255,12), -1)
cv2.imshow('image', image)
cv2.waitKey()

How to find the center and angle of objects in an image?

I am using python and OpenCV. I am trying to find the center and angle of the batteries:
Image of batteries with random angles:
The code than I have is this:
import cv2
import numpy as np
img = cv2.imread('image/baterias2.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img2 = cv2.imread('image/baterias4.png',0)
minLineLength = 300
maxLineGap = 5
edges = cv2.Canny(img2,50,200)
cv2.imshow('Canny',edges)
lines = cv2.HoughLinesP(edges,1,np.pi/180,80,minLineLength,maxLineGap)
print lines
salida = np.zeros((img.shape[0],img.shape[1]))
for x in range(0, len(lines)):
for x1,y1,x2,y2 in lines[x]:
cv2.line(salida,(x1,y1),(x2,y2),(125,125,125),0)# rgb
cv2.imshow('final',salida)
cv2.imwrite('result/hough.jpg',img)
cv2.waitKey(0)
Any ideas to work it out?
Almost identical to one of my other answers. PCA seems to work fine.
import cv2
import numpy as np
img = cv2.imread("test_images/battery001.png") #load an image of a single battery
img_gs = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #convert to grayscale
#inverted binary threshold: 1 for the battery, 0 for the background
_, thresh = cv2.threshold(img_gs, 250, 1, cv2.THRESH_BINARY_INV)
#From a matrix of pixels to a matrix of coordinates of non-black points.
#(note: mind the col/row order, pixels are accessed as [row, col]
#but when we draw, it's (x, y), so have to swap here or there)
mat = np.argwhere(thresh != 0)
#let's swap here... (e. g. [[row, col], ...] to [[col, row], ...])
mat[:, [0, 1]] = mat[:, [1, 0]]
#or we could've swapped at the end, when drawing
#(e. g. center[0], center[1] = center[1], center[0], same for endpoint1 and endpoint2),
#probably better performance-wise
mat = np.array(mat).astype(np.float32) #have to convert type for PCA
#mean (e. g. the geometrical center)
#and eigenvectors (e. g. directions of principal components)
m, e = cv2.PCACompute(mat, mean = np.array([]))
#now to draw: let's scale our primary axis by 100,
#and the secondary by 50
center = tuple(m[0])
endpoint1 = tuple(m[0] + e[0]*100)
endpoint2 = tuple(m[0] + e[1]*50)
red_color = (0, 0, 255)
cv2.circle(img, center, 5, red_color)
cv2.line(img, center, endpoint1, red_color)
cv2.line(img, center, endpoint2, red_color)
cv2.imwrite("out.png", img)
To find out the center of an object, you can use the Moments.
Threshold the image and get the contours of the object with findContours.
Compute the Moments withcv.Moments(arr, binary=0) → moments.
As arr you can pass the contours. Then the coordinates of the center are computed as x = m10/m00 and y = m01/m00.
To get the orientation, you can draw a minimum Rectangle around the object and compute the angle between the longer side of the rectangle and a vertical line.
You can reference the code.
import cv2
import imutils
import numpy as np
PIC_PATH = r"E:\temp\Battery.jpg"
image = cv2.imread(PIC_PATH)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 100, 220)
kernel = np.ones((5,5),np.uint8)
closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)
cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cv2.drawContours(image, cnts, -1, (0, 255, 0), 4)
cv2.imshow("Output", image)
cv2.waitKey(0)
The result picture is,

Categories

Resources