I have this image:
And, I process it in this way in order to pass it to Tesseract in the best way possible:
sharpened = unsharp_mask(img, amount=1.5)
cv2.imwrite(TEMP_FOLDER + 'sharpened.png', sharpened)
thr = cv2.threshold(sharpened, 220, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# im = cv2.resize(thr, None, fx=2, fy=2, interpolation=cv2.INTER_AREA)
os.makedirs(TEMP_FOLDER, exist_ok=True)
cv2.imwrite(TEMP_FOLDER + 'inverted.png', thr)
inverted = cv2.imread(TEMP_FOLDER + 'inverted.png')
filtered_inverted = remove_black_boundaries(inverted)
filtered_inverted = cv2.resize(filtered_inverted, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
# kernel = np.ones((2, 2), np.uint8)
# filtered_inverted = cv2.dilate(filtered_inverted, kernel)
cv2.imwrite(TEMP_FOLDER + 'filtered.png', filtered_inverted)
median = cv2.medianBlur(filtered_inverted, 5)
# median = cv2.cvtColor(median, cv2.COLOR_RGB2GRAY)
# median = cv2.threshold(median, 127, 255, cv2.THRESH_BINARY)[1]
cv2.imwrite(TEMP_FOLDER + 'median.png', median)
The function unsharp_mask is defined as:
def unsharp_mask(image: np.ndarray, kernel_size: Tuple[int] = (5, 5),
sigma: float = 1.0, amount: float = 1.0, threshold: float = 0) -> np.ndarray:
"""Return a sharpened version of the image, using an unsharp mask."""
blurred = cv2.GaussianBlur(image, kernel_size, sigma)
sharpened = float(amount + 1) * image - float(amount) * blurred
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
sharpened = sharpened.round().astype(np.uint8)
if threshold > 0:
low_contrast_mask = np.absolute(image - blurred) < threshold
np.copyto(sharpened, image, where=low_contrast_mask)
return sharpened
And, the function remove_black_boundaries (in this case it is useless since there aren't black boundaries in the image) is defined as:
def remove_black_boundaries(img: np.ndarray) -> np.ndarray:
hh, ww = img.shape[:2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
thresh = cv2.erode(thresh, np.ones((3, 3), np.uint8))
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
cnt = max(contours, key=cv2.contourArea)
# draw white contour on black background as mask
mask = np.zeros((hh, ww), dtype=np.uint8)
cv2.drawContours(mask, [cnt], 0, (255, 255, 255), cv2.FILLED)
# invert mask so shapes are white on black background
mask_inv = 255 - mask
# create new (white) background
bckgnd = np.full_like(img, (255, 255, 255))
# apply mask to image
image_masked = cv2.bitwise_and(img, img, mask=mask)
# apply inverse mask to background
bckgnd_masked = cv2.bitwise_and(bckgnd, bckgnd, mask=mask_inv)
# add together
result = cv2.add(image_masked, bckgnd_masked)
return result
So I get the sharpened as:
And, I get the inverted (and the filtered) as:
So, the image passed to Tesseract is:
But, what I get from Tesseract is Conteggio: 2900 without the first line. I tried also to resize the image, but I get the same output. Any idea on how I can improve the image sent to Tesseract?
You haven't shown your actual pytesseract code, but without any (pre)processing, I get the correct result solely switching to page segmentation method 6, which is:
Assume a single uniform block of text.
import cv2
import pytesseract
img = cv2.imread('KQ49Y.png', cv2.IMREAD_GRAYSCALE)
text = pytesseract.image_to_string(img, config='--psm 6')
print(text.replace('\f', ''))
# Facebook
# Conteggio: 2900
----------------------------------------
System information
----------------------------------------
Platform: Windows-10-10.0.19042-SP0
Python: 3.9.6
PyCharm: 2021.2
OpenCV: 4.5.3
pytesseract: 5.0.0-alpha.20201127
----------------------------------------
Related
I am trying to draw a contour on the leaf to calculate the area of the leaf using OpenCV but cv2.findContours() does not draw a contour around the leaf but the other regions.
My Code
def Contours_Detection(image_path):
img = cv2.imread(image_path)
image = cv2.resize(img,(500,500), interpolation = cv2.INTER_AREA)
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# store the a-channel
a_channel = lab[:,:,1]
# Automate threshold using Otsu method
th = cv2.threshold(a_channel,127,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
# Mask the result with the original image
masked = cv2.bitwise_and(image, image, mask = th)
blur = cv2.medianBlur(masked, 7)
gray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
binary = cv2.threshold(gray,125, 255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
MORPH_close = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel, iterations=2)
result = 255 - MORPH_close
contours, hierarchy = cv2.findContours(result, mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_NONE)
Num_of_contour = len(contours)
image_copy = image.copy()
spot_on_org_img = cv2.drawContours(image_copy, contours, -1, (0, 255, 0), thickness=2)
black_img = np.zeros(image.shape)
# draw the contours on the black image
spot_on_black_img = cv2.drawContours(black_img, contours, -1, (0,255,0), 1)
return spot_on_black_img
image_path = '/content/IMG_3407.jpg'
spot_on_black_img = Contours_Detection(image_path)
cv2_imshow(spot_on_black_img)
Input Image
Output Image
Problem statement:
fundus image, i need to suppress the blood vessels so that they do not appear on the image for the classifier being used.
There were a few thoughts; but easiest approach was to segment out a rough outline of the vasculature. This is achieved.
Next was to get the colour for the surround area and use it to blend it to the white area found; then merge it back to the original image.
Any suggestions for identifying the colour and blending it in. then merging it back to the original image so that the vessels are not visible. One thought was to do a contour of the each white area and find the corresponding colour outside and use it to blend it back.
Any alternate approach is also welcome.
eyepac image - vessels are visible; want to make it the colour of its surrounding so that they are not visible
Here is the code:
def apply_threshold_with_denoising(image):
image = cv2.adaptiveThreshold(image, 250, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)
image = cv2.fastNlMeansDenoising(image, 1.5, 5, 5)
return image
def delete_small_components(image, size):
_, blackAndWhite = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(blackAndWhite, None, None, None, 8, cv2.CV_32S)
sizes = stats[1:, -1] # get CC_STAT_AREA component
image = np.zeros(labels.shape, np.uint8)
for i in range(0, nlabels - 1):
if sizes[i] >= 150: # filter small dotted regions
image[labels == i + 1] = 255
return cv2.bitwise_not(image)
def kernel(num1, num2):
return np.ones((num1, num2), np.uint8)
def resize(img):
scale_percent = 50 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
return cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
def get_large_vessels(image):
struct_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 4))
opening = cv2.morphologyEx(image, cv2.MORPH_OPEN, struct_kernel, iterations=1)
cnts = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
if area < 250:
cv2.drawContours(opening, [c], -1, (0, 0, 0), -1)
return opening
def get_small_vessels(both, large):
large = cv2.dilate(large, kernel(3, 3), iterations=5)
subtract = cv2.subtract(both, large)
return subtract
def remove_background(image, mask):
image = cv2.bitwise_and(cv2.bitwise_not(image), cv2.bitwise_not(image), mask=mask)
image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel(2, 2))
return image
img = cv2.imread('2001_left.jpeg')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
l_b = np.array([0, 0, 30])
u_b = np.array([255, 255, 255])
mask = cv2.inRange(hsv, l_b, u_b)
mask = cv2.erode(mask, kernel(2, 2), iterations=5)
grayscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshold = apply_threshold_with_denoising(grayscale)
kernel22 = cv2.dilate(threshold, kernel(2, 2), iterations=2)
remove_small1kernel22 = delete_small_components(kernel22, 5)
remove_small1kernel22)
dilation = cv2.dilate(threshold, kernel(2, 1), iterations=2)
remove_small1 = delete_small_components(dilation, 150)
dilation = cv2.dilate(threshold, kernel(1, 2), iterations=2)
remove_small2 = delete_small_components(dilation, 150)
merge = cv2.addWeighted(remove_small1, 0.5, remove_small2, 0.5, 0)
threshold_merge = apply_threshold_with_denoising(merge)
remove_small3 = delete_small_components(threshold_merge, 150)
large_vessels = get_large_vessels(remove_background(remove_small3, mask))
cv2.imshow('Large blood vasculature', large_vessels)
small_vessels = get_small_vessels(remove_background(remove_small3, mask), large_vessels)
cv2.imshow('small vasculature', small_vessels)
I tried the above code.
It produces the vessels - small and large.
I need a way to use this map to recolour using a mean colour value outside each contour such that it blends in when put back.
Is there a way to just identify the segment - colour it and then merge back.
My code it's not detecting well binary image!
LpImg = cv2.imread('/content/drive/My Drive/TESTING/Placas_detectadas/CPVL92.png')
if (len(LpImg)): #check if there is at least one license image
# Scales, calculates absolute values, and converts the result to 8-bit.
plate_image = cv2.convertScaleAbs(LpImg[0], alpha=(255.0))
plate_image = LpImg #image_cropped
# convert to grayscale and blur the image
gray = cv2.cvtColor(plate_image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(7,7),0)
# Applied inversed thresh_binary
thresh_inv = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 39, 1)
#binary = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
thre_mor = cv2.morphologyEx(thresh_inv, cv2.MORPH_DILATE, kernel3)
# visualize results
fig = plt.figure(figsize=(12,7))
plt.rcParams.update({"font.size":18})
grid = gridspec.GridSpec(ncols=2,nrows=3,figure = fig)
plot_image = [plate_image, gray, blur, thresh_inv,thre_mor]
plot_name = ["plate_image","gray","blur","binary","dilation"]
for i in range(len(plot_image)):
fig.add_subplot(grid[i])
plt.axis(False)
plt.title(plot_name[i])
if i ==0:
plt.imshow(plot_image[i])
else:
plt.imshow(plot_image[i],cmap="gray")
This is the image:
With this results:
If I use adaptive threshhold
binary = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
to this line
thresh_inv = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 39, 1)
I have got this result:
Why this is happening? How can I solve it?
I was thinking use this:
LpImg = cv2.imread('/content/image.png')
# Set scaling factors and add
gamma1 = 0.3
gamma2 = 1.5
Iout = gamma1*Ioutlow[0:rows,0:cols] + gamma2*Iouthigh[0:rows,0:cols]
# Anti-log then rescale to [0,1]
Ihmf = np.expm1(Iout)
Ihmf = (Ihmf - np.min(Ihmf)) / (np.max(Ihmf) - np.min(Ihmf))
Ihmf2 = np.array(255*LpImg, dtype="uint8")
# Threshold the image - Anything below intensity 65 gets set to white
Ithresh = Ihmf2 < 65 #65
Ithresh = 255*Ithresh.astype("uint8")
Ihmf2 = np.array(255*Ihmf, dtype="uint8")
# Threshold the image - Anything below intensity 65 gets set to white
Ithresh = Ihmf2 < 65 #65
Ithresh = 255*Ithresh.astype("uint8")
That have this result:
But I still want to use this filters:
Grayscale
Blur
Binarization
Segmentation
Another approach is to use division normalization in Python/OpenCV.
Read the input
Convert to gray
Apply morphology dilation
Divide the input by the dilated image
Threshold
Save the results
Input:
import cv2
import numpy as np
# read the image
img = cv2.imread('license_chile.png')
# convert to gray
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# apply morphology
kernel = cv2.getStructuringElement(cv2.MORPH_RECT , (75,75))
smooth = cv2.morphologyEx(gray, cv2.MORPH_DILATE, kernel)
# divide gray by morphology image
division = cv2.divide(gray, smooth, scale=255)
# threshold
result = cv2.threshold(division, 0, 255, cv2.THRESH_OTSU )[1]
# save results
cv2.imwrite('license_chile_thresh.jpg',result)
# show results
cv2.imshow('smooth', smooth)
cv2.imshow('division', division)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
I have been working with OpenCV in order to detect an squared obstacle. So far this is the image I get after applying filters and canny.
The obstacle I am trying to identify is the horizontal one, the three vertical rectangles are guide lines on the floor.My goal is to keep only the horizontal rectangle, separating it from the others, but after applying find Contours I only get I single object that includes all the shapes.This is the code I have been using in order to fin only the biggest rectangle by their area:
# find the biggest countour (c) by the area
if contours != 0:
if not contours:
print("Empty")
else:
bigone = max(contours, key=cv2.contourArea) if max else None
area = cv2.contourArea(bigone)
if area > 10000:
x, y, w, h = cv2.boundingRect(bigone)
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2)
cv2.putText(img, "Obstacle", (x+w/2, y-20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
pts = np.array(
[[[x, y], [x+w, y], [x+w, y+h], [x, y+h]]], dtype=np.int32)
cv2.fillPoly(mask, pts, (255, 255, 255))
#values = img[np.where((mask == (255, 255, 255)).all(axis=2))]
res = cv2.bitwise_and(img, mask) # View only the obstacle
obs_area = w*h
print(obs_area)
if obs_area <= 168000:
command_publisher.publish("GO")
cv2.putText(
img, "GO", (380, 400), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1)
else:
command_publisher.publish("STOP")
cv2.putText(img, "STOP", (380, 400),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1)
# show the output image
cv2.imshow("Image", img)
cv2.waitKey(1)
And this is the result I am getting:
Is there a way of separating my obstacle from the lines on the floor with some kind of filter or algorithm?
Here is an example image to work with:
Here is one way to do that using Python/OpenCV.
- Read the input
- Convert to HSV and extract only the saturation channel (black/white/gray have zero saturation)
- Threshold
- Apply morphology open and close to remove the extranous white regions
- Get the contour and approximate to simple polygon
- Draw the polygon on the input
- Save the results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('board.png')
# convert to HSV and extract saturation channel
sat = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)[:,:,1]
# threshold
thresh = cv2.threshold(sat, 90, 255, 0)[1]
# apply morphology close to fill interior regions in mask
kernel = np.ones((7,7), np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = np.ones((13,13), np.uint8)
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
# get contours (presumably only 1) and fit to simple polygon (quadrilateral)
cntrs = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
c = cntrs[0]
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.05 * peri, True)
# draw polygon on input
result = img.copy()
cv2.polylines(result, [np.int32(approx)], True, (0,0,255), 1, cv2.LINE_AA)
# write result to disk
cv2.imwrite("board_saturation.png", sat)
cv2.imwrite("board_thresh.png", thresh)
cv2.imwrite("board_morph.png", morph)
cv2.imwrite("board_contour.png", result)
# display it
cv2.imshow("IMAGE", img)
cv2.imshow("SAT", sat)
cv2.imshow("THRESH", thresh)
cv2.imshow("MORPH", morph)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Saturation channel image:
Thresholded image:
Morphology cleaned image:
Contour on input:
In your image the problem seems white rectangles. My approach is checking each line and if line consist many pixels which are close to white(255,255,255) then make the line black.
Here is my code:
import cv2
import numpy as np
import random as rng
img=cv2.imread("/ur/image/directory/obstacle.png")
height, width, channels = img.shape
cv2.imshow('Source',img)
# Check each line and eliminate white rectangles(if line consist white pixels more than limit)
for x in range(0,height):
white_counter = 0
for y in range(0,width):
if img[x,y,0] >= 180 and img[x,y,1] >= 180 and img[x,y,2] >= 180:
white_counter = white_counter + 1
if white_counter>10:
for y in range(0,width):
img[x,y,0] = 0
img[x,y,1] = 0
img[x,y,2] = 0
cv2.imshow('Elimination White Rectangles', img)
# Find contours and draw rectangle for each
src_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshold = 300
canny_output = cv2.Canny(src_gray, threshold, threshold * 2)
contours, _ = cv2.findContours(canny_output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_poly = [None]*len(contours)
boundRect = [None]*len(contours)
for i, c in enumerate(contours):
contours_poly[i] = cv2.approxPolyDP(c, 3, True)
boundRect[i] = cv2.boundingRect(contours_poly[i])
rng.seed(12345)
drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
for i in range(len(contours)):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
cv2.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \
(int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)
cv2.imshow('Output', drawing)
cv2.waitKey(0)
cv2.destroyAllWindows()
Eliminate White Rectangles:
Result:
I have many images of specimen which have uncontrollable background color. Some of them have black background. Some of them have white background. Some of them have green background, etc.
I would like to remove these background color of a given image where the object in the image is just only one specimen. I try this code but it does not work as i expect.
def get_holes(image, thresh):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
im_bw = cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY)[1]
im_bw_inv = cv2.bitwise_not(im_bw)
_, contour, _ = cv2.findContours(im_bw_inv, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contour:
cv2.drawContours(im_bw_inv, [cnt], 0, 255, -1)
nt = cv2.bitwise_not(im_bw)
im_bw_inv = cv2.bitwise_or(im_bw_inv, nt)
return im_bw_inv
def remove_background(image, thresh, scale_factor=.25, kernel_range=range(1, 15), border=None):
border = border or kernel_range[-1]
holes = get_holes(image, thresh)
small = cv2.resize(holes, None, fx=scale_factor, fy=scale_factor)
bordered = cv2.copyMakeBorder(small, border, border, border, border, cv2.BORDER_CONSTANT)
for i in kernel_range:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2*i+1, 2*i+1))
bordered = cv2.morphologyEx(bordered, cv2.MORPH_CLOSE, kernel)
unbordered = bordered[border: -border, border: -border]
mask = cv2.resize(unbordered, (image.shape[1], image.shape[0]))
fg = cv2.bitwise_and(image, image, mask=mask)
return fg
file = your_file_location
img = cv2.imread(file)
nb_img = dm.remove_background(img, 255)
These are some example images
May i have your suggestions?
Here's a simple approach with the assumption that there is only one specimen per image.
Kmeans color quantization. We load the image then perform Kmeans color quantization to segment the image into a specified cluster of colors. For instance with clusters=4, the image will be labeled into four colors.
Obtain binary image. Convert to grayscale, Gaussian blur, adaptive threshold.
Draw largest enclosing circle onto mask. Find contours, sort for largest contour using contour area filtering then draw the largest enclosing circle onto a mask using cv2.minEnclosingCircle.
Bitwise-and. Since we have isolated the desired sections to extract, we simply bitwise-and the mask and input image
Input image -> Kmeans -> Binary image
Detected largest enclosing circle -> Mask -> Result
Here's the output for the second image
Input image -> Kmeans -> Binary image
Detected largest enclosing circle -> Mask -> Result
Code
import cv2
import numpy as np
# Kmeans color segmentation
def kmeans_color_quantization(image, clusters=8, rounds=1):
h, w = image.shape[:2]
samples = np.zeros([h*w,3], dtype=np.float32)
count = 0
for x in range(h):
for y in range(w):
samples[count] = image[x][y]
count += 1
compactness, labels, centers = cv2.kmeans(samples,
clusters,
None,
(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.0001),
rounds,
cv2.KMEANS_RANDOM_CENTERS)
centers = np.uint8(centers)
res = centers[labels.flatten()]
return res.reshape((image.shape))
# Load image and perform kmeans
image = cv2.imread('2.jpg')
original = image.copy()
kmeans = kmeans_color_quantization(image, clusters=4)
# Convert to grayscale, Gaussian blur, adaptive threshold
gray = cv2.cvtColor(kmeans, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,21,2)
# Draw largest enclosing circle onto a mask
mask = np.zeros(original.shape[:2], dtype=np.uint8)
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
for c in cnts:
((x, y), r) = cv2.minEnclosingCircle(c)
cv2.circle(image, (int(x), int(y)), int(r), (36, 255, 12), 2)
cv2.circle(mask, (int(x), int(y)), int(r), 255, -1)
break
# Bitwise-and for result
result = cv2.bitwise_and(original, original, mask=mask)
result[mask==0] = (255,255,255)
cv2.imshow('thresh', thresh)
cv2.imshow('result', result)
cv2.imshow('mask', mask)
cv2.imshow('kmeans', kmeans)
cv2.imshow('image', image)
cv2.waitKey()