Extract the floor layout and threshold with OpenCV and Python - python

I've tried to use SSIM to extract the difference between two images to get only the floor area (image_a is the original and image_b has painted floor).
The output that was expected, is a threshold mask.
The problem I had was that the thresholding of ssim difference just didn't work in my case (example is shown below).
Can someone provide a better technique or theory of thresholding?
from skimage.measure import compare_ssim
import cv2
...
image_a = cv2.imread(first)
image_b = cv2.imread(second)
gray_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)
gray_b = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)
_, diff = compare_ssim(gray_a, gray_b, full=True, gaussian_weights=True)
diff = (diff * 255).astype("uint8")
thresh = cv2.threshold(diff, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
if len(contour_sizes) > 0:
largest_contour = max(contour_sizes, key=lambda x: x[0])[1]
x, y, w, h = cv2.boundingRect(largest_contour)
cv2.rectangle(image_a, (x, y), (x + w, y + h), (36, 255, 12), 2)
cv2.rectangle(image_b, (x, y), (x + w, y + h), (36, 255, 12), 2)
cv2.imwrite('image_a.jpg', image_a)
cv2.imwrite('image_b.jpg',image_b)
cv2.imwrite('thresh.jpg', thresh)
image_a with max contour detected
image_b with max contour detected
thresh

A better result can be obtained by thresholding the mean of the difference beetween given images.
def get_mask(img1, img2, thresh):
if img1.shape != img2.shape:
return
diff = cv2.absdiff(img1, img2)
diff = np.mean(diff, axis=2)
diff[diff <= thresh] = 0
diff[diff > thresh] = 255
mask = np.dstack([diff] * 3)
return mask
Artifacts may appear in the resulting mask and can be reduced by applying Morphological Transformations.

Related

Separate objects countours with OpenCV

I have been working with OpenCV in order to detect an squared obstacle. So far this is the image I get after applying filters and canny.
The obstacle I am trying to identify is the horizontal one, the three vertical rectangles are guide lines on the floor.My goal is to keep only the horizontal rectangle, separating it from the others, but after applying find Contours I only get I single object that includes all the shapes.This is the code I have been using in order to fin only the biggest rectangle by their area:
# find the biggest countour (c) by the area
if contours != 0:
if not contours:
print("Empty")
else:
bigone = max(contours, key=cv2.contourArea) if max else None
area = cv2.contourArea(bigone)
if area > 10000:
x, y, w, h = cv2.boundingRect(bigone)
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2)
cv2.putText(img, "Obstacle", (x+w/2, y-20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
pts = np.array(
[[[x, y], [x+w, y], [x+w, y+h], [x, y+h]]], dtype=np.int32)
cv2.fillPoly(mask, pts, (255, 255, 255))
#values = img[np.where((mask == (255, 255, 255)).all(axis=2))]
res = cv2.bitwise_and(img, mask) # View only the obstacle
obs_area = w*h
print(obs_area)
if obs_area <= 168000:
command_publisher.publish("GO")
cv2.putText(
img, "GO", (380, 400), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1)
else:
command_publisher.publish("STOP")
cv2.putText(img, "STOP", (380, 400),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1)
# show the output image
cv2.imshow("Image", img)
cv2.waitKey(1)
And this is the result I am getting:
Is there a way of separating my obstacle from the lines on the floor with some kind of filter or algorithm?
Here is an example image to work with:
Here is one way to do that using Python/OpenCV.
- Read the input
- Convert to HSV and extract only the saturation channel (black/white/gray have zero saturation)
- Threshold
- Apply morphology open and close to remove the extranous white regions
- Get the contour and approximate to simple polygon
- Draw the polygon on the input
- Save the results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('board.png')
# convert to HSV and extract saturation channel
sat = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)[:,:,1]
# threshold
thresh = cv2.threshold(sat, 90, 255, 0)[1]
# apply morphology close to fill interior regions in mask
kernel = np.ones((7,7), np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = np.ones((13,13), np.uint8)
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
# get contours (presumably only 1) and fit to simple polygon (quadrilateral)
cntrs = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
c = cntrs[0]
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.05 * peri, True)
# draw polygon on input
result = img.copy()
cv2.polylines(result, [np.int32(approx)], True, (0,0,255), 1, cv2.LINE_AA)
# write result to disk
cv2.imwrite("board_saturation.png", sat)
cv2.imwrite("board_thresh.png", thresh)
cv2.imwrite("board_morph.png", morph)
cv2.imwrite("board_contour.png", result)
# display it
cv2.imshow("IMAGE", img)
cv2.imshow("SAT", sat)
cv2.imshow("THRESH", thresh)
cv2.imshow("MORPH", morph)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Saturation channel image:
Thresholded image:
Morphology cleaned image:
Contour on input:
In your image the problem seems white rectangles. My approach is checking each line and if line consist many pixels which are close to white(255,255,255) then make the line black.
Here is my code:
import cv2
import numpy as np
import random as rng
img=cv2.imread("/ur/image/directory/obstacle.png")
height, width, channels = img.shape
cv2.imshow('Source',img)
# Check each line and eliminate white rectangles(if line consist white pixels more than limit)
for x in range(0,height):
white_counter = 0
for y in range(0,width):
if img[x,y,0] >= 180 and img[x,y,1] >= 180 and img[x,y,2] >= 180:
white_counter = white_counter + 1
if white_counter>10:
for y in range(0,width):
img[x,y,0] = 0
img[x,y,1] = 0
img[x,y,2] = 0
cv2.imshow('Elimination White Rectangles', img)
# Find contours and draw rectangle for each
src_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshold = 300
canny_output = cv2.Canny(src_gray, threshold, threshold * 2)
contours, _ = cv2.findContours(canny_output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_poly = [None]*len(contours)
boundRect = [None]*len(contours)
for i, c in enumerate(contours):
contours_poly[i] = cv2.approxPolyDP(c, 3, True)
boundRect[i] = cv2.boundingRect(contours_poly[i])
rng.seed(12345)
drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
for i in range(len(contours)):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
cv2.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \
(int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)
cv2.imshow('Output', drawing)
cv2.waitKey(0)
cv2.destroyAllWindows()
Eliminate White Rectangles:
Result:

Get white text on black background

I want to identify text in a set of images. There are some images with both white and black colored text.
I used otsu thresholding to binarize image
After contour identification and removal of non text regions I identified the required text region.
I need all the text in white color. But I don't know how to do it. I thought of using a bitwise operator but couldn't find a method. Can someone help me with this?
Expected output:
import cv2
import numpy as np
def process(img):
# read image
img_no = str(img)
rgb = cv2.imread(img_no + '.jpg')
# cv2.imshow('original', rgb)
# convert image to grayscale
gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
_, bw_copy = cv2.threshold(gray, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# bilateral filter
blur = cv2.bilateralFilter(gray, 5, 75, 75)
# cv2.imshow('blur', blur)
# morphological gradient calculation
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
grad = cv2.morphologyEx(blur, cv2.MORPH_GRADIENT, kernel)
# cv2.imshow('gradient', grad)
# binarization
_, bw = cv2.threshold(grad, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# cv2.imshow('otsu', bw)
# closing
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 1))
closed = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
# cv2.imshow('closed', closed)
# finding contours
contours, hierarchy = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
mask = np.zeros(closed.shape, dtype=np.uint8)
mask1 = np.zeros(bw_copy.shape, dtype=np.uint8)
for idx in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[idx])
mask[y:y + h, x:x + w] = 0
area = cv2.contourArea(contours[idx])
aspect_ratio = float(w) / h
cv2.drawContours(mask, contours, idx, (255, 255, 255), -1)
r = float(cv2.countNonZero(mask[y:y + h, x:x + w])) / (w * h)
# identify region of interest
if r > 0.34 and 0.52 < aspect_ratio < 13 and area > 145.0:
cv2.drawContours(mask1, [contours[idx]], -1, (255, 255, 255), -1)
result = cv2.bitwise_and(bw_copy, mask1)
cv2.imshow('result', result)
print(img_no + " Done")
cv2.waitKey()
New Image
Accepted answer doesn't work with this picture.
At first glance this looks like a simple question but it is quite tricky to solve.
However you already have all the ingredients needed to solve the problem and only require a slight tweak to your algorithm.
Here are the gists:
What you need is a an inverted image(wb_copy) of your thresholded image(bw_copy).
You have done a great job creating a mask
Run bitwise_and operation on both bw_copy and wb_copy with the mask above. You should get the result shown below.
As you can see, your answer is abit from both images. All you need to do is for every font blob, count the non-zero pixel from both images and select the one with the higher count. Doing so will provide the result you wanted.
Here are the modifications I made to the code
# finding contours
_,contours,_ = cv2.findContours(closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
mask = np.zeros(closed.shape, dtype=np.uint8)
mask1 = np.zeros(bw_copy.shape, dtype=np.uint8)
wb_copy = cv2.bitwise_not(bw_copy)
new_bw = np.zeros(bw_copy.shape, dtype=np.uint8)
for idx in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[idx])
mask[y:y + h, x:x + w] = 0
area = cv2.contourArea(contours[idx])
aspect_ratio = float(w) / h
cv2.drawContours(mask, contours, idx, (255, 255, 255), -1)
r = float(cv2.countNonZero(mask[y:y + h, x:x + w])) / (w * h)
# identify region of interest
if r > 0.34 and 0.52 < aspect_ratio < 13 and area > 145.0:
cv2.drawContours(mask1, [contours[idx]], -1, (255, 255, 255), -1)
bw_temp = cv2.bitwise_and(mask1[y:y + h, x:x + w],bw_copy[y:y + h, x:x + w])
wb_temp = cv2.bitwise_and(mask1[y:y + h, x:x + w],wb_copy[y:y + h, x:x + w])
bw_count = cv2.countNonZero(bw_temp)
wb_count = cv2.countNonZero(wb_temp)
if bw_count > wb_count:
new_bw[y:y + h, x:x + w]=np.copy(bw_copy[y:y + h, x:x + w])
else:
new_bw[y:y + h, x:x + w]=np.copy(wb_copy[y:y + h, x:x + w])
cv2.imshow('new_bw', new_bw)
INPUT IMAGE:
I did the following:
import cv2 as cv
import numpy as np
img_path = '10002.png'
img = cv.imread(img_path)
img = cv.resize(img, (None, None), None, 0.4, 0.4)
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
img = np.array(img, dtype='float32')
result = np.where(img == 0., 255, 0)
result = np.array(result, dtype='uint8')
result = cv.erode(result, kernel=np.ones(shape=(3, 3)), iterations=1)
cv.imshow('Image', img)
cv.imshow('Result', result)
cv.waitKey(0)
cv.destroyAllWindows()
OUTPUT IMAGE:
You can even invert the image using bitwise not and get black text on white background.

How to detect and extract signature from an image with OpenCV?

I am importing the attached image. After importing the image, I want to remove horizontal lines, detect the signature and then extract it, create rectangle around signature, crop the rectangle and save it. I am struggling to identify entire region of a signature as one contour or a group of contours.
I have already tried findcontour and then various ways to detect signature region. Please refer the code below.
Python Script:
imagePath
#read image
image = cv2.imread(imagePath,cv2.COLOR_BGR2RGB)
#Convert to greyscale
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) # grayscale
#Apply threshold
ret,thresh1 = cv2.threshold(gray, 0, 255,cv2.THRESH_OTSU|cv2.THRESH_BINARY_INV)
plt.imshow(thresh1,cmap = 'gray')
#preprocessing
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15,15))
dilation = cv2.dilate(thresh1, rect_kernel, iterations = 1)
plt.imshow(dilation,cmap = 'gray')
#Detect contours
contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours[0]
height, width, _ = image.shape
min_x, min_y = width, height
max_x = max_y = 0
for contour, hier in zip(contours, hierarchy):
(x,y,w,h) = cv2.boundingRect(contour)
min_x, max_x = min(x, min_x), max(x+w, max_x)
min_y, max_y = min(y, min_y), max(y+h, max_y)
if w > 80 and h > 80:
cv2.rectangle(frame, (x,y), (x+w,y+h), (255, 0, 0), 2)
if max_x - min_x > 0 and max_y - min_y > 0:
fin=cv2.rectangle(image, (min_x, min_y), (max_x, max_y), (255, 0, 0), 2)
plt.imshow(fin)
final=cv2.drawContours(image, contours,-1,(0,0,255),6)
plt.imshow(final,cmap = 'gray')
Final objective is to create rectangle around entire signature
Trying to generalize on the other image:
Instead of removing the horizontal lines, it may be easier to perform HSV color thresholding. The idea is to isolate the signature onto a mask and then extract it. We convert the image to HSV format then use a lower/upper color threshold to generate a mask
lower = np.array([90, 38, 0])
upper = np.array([145, 255, 255])
mask = cv2.inRange(image, lower, upper)
Mask
To detect the signature, we can get the combined bounding box for all of the contours with np.concatenate() then use cv2.boundingRect() to obtain the coordinates
Now that we have the bounding box coordinates, we can use Numpy slicing to crop and extract the ROI
import numpy as np
import cv2
# Load image and HSV color threshold
image = cv2.imread('1.jpg')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower = np.array([90, 38, 0])
upper = np.array([145, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(image, image, mask=mask)
result[mask==0] = (255, 255, 255)
# Find contours on extracted mask, combine boxes, and extract ROI
cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = np.concatenate(cnts)
x,y,w,h = cv2.boundingRect(cnts)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)
ROI = result[y:y+h, x:x+w]
cv2.imshow('result', result)
cv2.imshow('mask', mask)
cv2.imshow('image', image)
cv2.imshow('ROI', ROI)
cv2.waitKey()
Note: The lower/upper color ranges were obtained from choosing the correct upper and lower HSV boundaries for color detection with cv::inRange (OpenCV)

OpenCV contour detection

I have been trying to detect contours using OpenCV. I am trying to detect the nucleus of white blood cells. I tested it on other images of mine and it turned out to be okay except for images where the nuclei are too far away from each other. This is the result from the program I made:
At the bottom part, the nucleus was not detected as one but they are detected as two because they are not conjoined or sticking together. How can I make the program to detect it as only one cell?
Here is my code:
import cv2
import numpy as np
limit_area = 1000
x = 0
y = 0
w = 0
h = 0
nuclei = []
count = 0
number_name = 1
img1 = cv2.imread('7.bmp')
img = cv2.add(img1, 0.70)
img_3 = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask1 = cv2.inRange(img_3, (90,140,0), (255,255,255))
mask2 = cv2.inRange(img_3, (90,90,0), (255,255,255))
mask1 = cv2.equalizeHist(mask1)
mask2 = cv2.equalizeHist(mask2)
mask = mask1 + mask2
kernel = np.ones((1,4),np.uint8)
mask = cv2.dilate(mask,kernel,iterations = 1)
kernel_close = np.ones((3,3),np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel_close)
blur2 = cv2.medianBlur(mask,7)
canny = cv2.Canny(blur2, 100,200)
im2, contours, hierarchy = cv2.findContours(canny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt) >= limit_area:
nuclei.append(cnt)
print(cv2.contourArea(cnt))
x, y, w, h = cv2.boundingRect(cnt)
roi = blur2[y:y+h, x:x+w]
outfile = '%d.jpg' % number_name
image_roi = cv2.resize(roi, (128,128), interpolation=cv2.INTER_AREA)
image_roi = cv2.medianBlur(image_roi, 5)
(T, thresh) = cv2.threshold(image_roi, 10, 255, cv2.THRESH_BINARY)
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = [i for i in contours if cv2.contourArea(i) <= 5000]
cv2.fillPoly(thresh, contours, color=(0,0,0))
image_roi = thresh
cv2.imshow(outfile, image_roi)
cv2.rectangle(img, (x, y), (x+w, y+h), (0,255,0), 7)
number_name += 1
count += 1
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here is the Original Image:
One simple way can be to merge closely detected regions. There is a concept called Intersection over Union in Image Localization, in which two bounding boxes are merged if their IoU score is greater than a certain threshold. A psuedo cade will be like
xi1 = max(box1[0], box2[0])
yi1 = max(box1[1], box2[1])
xi2 = min(box1[2], box2[2])
yi2 = min(box1[3], box2[3])
inter_area = max((xi2 - xi1), 0) * max((yi2 - yi1), 0)
box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
union_area = box1_area + box2_area - inter_area
iou = inter_area/union_area
Another approach you can try is the Flood Fill Algorithm, I think it should work fine as the nucleus is one entity after all (fully connected). Try this before dialation which probably is the reason your contours are breaking into two.

Find dominant color on contour opencv

I am trying to find the dominant color inside a contour (black or white).
I am using OpenCV to read an image and extract white on black images. This is what I got so far:
The green outline is the contour, the blue lines the bounding box. So I this instance I am trying to extract the numbers 87575220 but as you can see it also recognizes some random artifacts and for instance the letter G. I think the solution would be to find the dominant colour inside of the contours and that colour should be close to white. I don't have any idea how to do this though.
This the code I have at the moment:
import argparse
import cv2
import imutils
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("--image", "-i", required=True, help="Image to detect blobs from")
args = vars(parser.parse_args())
image = cv2.imread(args["image"])
image = imutils.resize(image, width=1200)
grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(grey)
maxval_10 = maxVal * 0.5
ret, threshold = cv2.threshold(grey, maxval_10, 255, cv2.THRESH_BINARY)
canny = cv2.Canny(grey, 200, 250)
lines = cv2.HoughLines(canny, 1, np.pi / 180, 140)
print(maxVal)
theta_min = 60 * np.pi / 180.
theta_max = 120 * np.pi / 180.0
theta_avr = 0
theta_deg = 0
filteredLines = []
for rho, theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
if theta_min <= theta <= theta_max:
filteredLines.append(theta)
theta_avr += theta
if len(filteredLines) > 0:
theta_avr /= len(filteredLines)
theta_deg = (theta_avr / np.pi * 180) - 90
else:
print("Failed to detect skew")
image = imutils.rotate(image, theta_deg)
canny = imutils.rotate(canny, theta_deg)
im2, contours, hierarchy = cv2.findContours(canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# cv2.drawContours(image, contours, -1, (0, 255, 0), 1)
cv2.imshow('Contours', im2)
boundingBoxes = []
filteredContours = []
for cnt in contours:
(x, y, w, h) = cv2.boundingRect(cnt)
if (h > 20 and h < 90 and w > 5 and w < h):
if cv2.contourArea(cnt, True) <= 0:
boundingBoxes.append((x, y, w, h))
filteredContours.append(cnt)
for x, y, w, h in boundingBoxes:
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.drawContours(image, filteredContours, -1, (0, 255, 0), 1)
cv2.imshow('Image', image)
cv2.imshow('Edges', canny)
cv2.imshow('Threshold', threshold)
cv2.waitKey(0)
cv2.destroyAllWindows()
This is the original picture:
I would try to make a ROI before I start searching for numbers. You have not give the original image so this example is made with the image you posted (with boxes and contours allready drawn). Should aslo work with the original though. Steps are written in the example code. Hope it helps. Cheers!
Example code:
import cv2
import numpy as np
# Read the image and make a copy then create a blank mask
img = cv2.imread('dominant.jpg')
img2 = img.copy()
h,w = img.shape[:2]
mask = np.zeros((h,w), np.uint8)
# Transform to gray colorspace and perform histogram equalization
gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
equ = cv2.equalizeHist(gray)
# Transform all pixels above thershold to white
black = np.where(equ>10)
img2[black[0], black[1], :] = [255, 255, 255]
# Transform to gray colorspace and make a thershold then dilate the thershold
gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
kernel = np.ones((15,15),np.uint8)
dilation = cv2.dilate(thresh,kernel,iterations = 1)
# Search for contours and select the biggest one and draw it on mask
_, contours, hierarchy = cv2.findContours(dilation,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
cv2.drawContours(mask, [cnt], 0, 255, -1)
# Perform a bitwise operation
res = cv2.bitwise_and(img, img, mask=mask)
# Display the ROI
cv2.imshow('img', res)
Result:
You could create a mask out of each contour:
mask = np.zeros(image.shape, dtype="uint8")
cv2.drawContours(mask, [cnt], -1, 255, -1)
and then calculate the mean value of all pixels inside of the mask:
mean = cv2.mean(image, mask=mask)
and then check whether mean is close enough to white
Colors and mean do not match well due to color space properties. I would create an histogram and select the most frequent one (some color down sampling could be applied too)

Categories

Resources