OpenCV Segmentation of Largest contour in Breast Mammograms - python

This might be a bit too "general" question, but how do I perform GRAYSCALE image segmentation and keep the largest contour? I am trying to remove background noise (i.e. labels) from breast mammograms, but I am not successful. Here is the original image:
First, I applied AGCWD algorithm (based on paper "Efficient Contrast Enhancement Using Adaptive Gamma Correction With Weighting Distribution") in order to get better contrast of the image pixels, like so:
Afterwards, I tried executing following steps:
Image segmentation using OpenCV's KMeans clustering algorithm:
enhanced_image_cpy = enhanced_image.copy()
reshaped_image = np.float32(enhanced_image_cpy.reshape(-1, 1))
number_of_clusters = 10
stop_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.1)
ret, labels, clusters = cv2.kmeans(reshaped_image, number_of_clusters, None, stop_criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
clusters = np.uint8(clusters)
Canny Edge Detection:
removed_cluster = 1
canny_image = np.copy(enhanced_image_cpy).reshape((-1, 1))
canny_image[labels.flatten() == removed_cluster] = [0]
canny_image = cv2.Canny(canny_image,100,200).reshape(enhanced_image_cpy.shape)
show_images([canny_image])
Find and Draw Contours:
initial_contours_image = np.copy(canny_image)
initial_contours_image_bgr = cv2.cvtColor(initial_contours_image, cv2.COLOR_GRAY2BGR)
_, thresh = cv2.threshold(initial_contours_image, 50, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(initial_contours_image_bgr, contours, -1, (255,0,0), cv2.CHAIN_APPROX_SIMPLE)
show_images([initial_contours_image_bgr])
Here is how image looks after I draw 44004 contours:
I am not sure how can I get one BIG contour, instead of 44004 small ones. Any ideas how to fix my approach, or possibly any ideas on using alternative approach to get rid of label in top right corner.
Thanks in advance!

Here is one way to do that in Python OpenCV
Read the image
Threshold and invert so the borders are black
Remove the borders of the image as follows (so as to make it easier to get the relevant contours later):
Count the number of non-zero pixels in each column and find the first and last column that have counts greater than 0
Count the number of non-zero pixels in each row and find the first and last row that have counts greater than 0
Crop the image to remove the borders
Crop thresh1 and invert to make thresh2
Get the external contours from thresh2
Find the largest contour and draw as white filled on a black background as a mask
Make all pixels in the cropped image black where the mask is black
Save the results -
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('xray3.png')
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold and invert
thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
thresh1 = 255 - thresh1
# remove borders
# count number of white pixels in columns as new 1D array
count_cols = np.count_nonzero(thresh1, axis=0)
# get first and last x coordinate where black
first_x = np.where(count_cols>0)[0][0]
last_x = np.where(count_cols>0)[0][-1]
print(first_x,last_x)
# count number of white pixels in rows as new 1D array
count_rows = np.count_nonzero(thresh1, axis=1)
# get first and last y coordinate where black
first_y = np.where(count_rows>0)[0][0]
last_y = np.where(count_rows>0)[0][-1]
print(first_y,last_y)
# crop image
crop = img[first_y:last_y+1, first_x:last_x+1]
# crop thresh1 and invert
thresh2 = thresh1[first_y:last_y+1, first_x:last_x+1]
thresh2 = 255 - thresh2
# get external contours and keep largest one
contours = cv2.findContours(thresh2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
big_contour = max(contours, key=cv2.contourArea)
# make mask from contour
mask = np.zeros_like(thresh2 , dtype=np.uint8)
cv2.drawContours(mask, [big_contour], 0, 255, -1)
# make crop black everywhere except where largest contour is white in mask
result = crop.copy()
result[mask==0] = (0,0,0)
# write result to disk
cv2.imwrite("xray3_thresh1.jpg", thresh1)
cv2.imwrite("xray3_crop.jpg", crop)
cv2.imwrite("xray3_thresh2.jpg", thresh2)
cv2.imwrite("xray3_mask.jpg", mask)
cv2.imwrite("xray3_result.png", result)
# display it
cv2.imshow("thresh1", thresh1)
cv2.imshow("crop", crop)
cv2.imshow("thresh2", thresh2)
cv2.imshow("mask", mask)
cv2.imshow("result", result)
cv2.waitKey(0)
Threshold 1 image:
Cropped image:
Threshold 2 image:
Mask image:
Result:

Related

Get the location of all contours present in image using opencv, but skipping text

I want to retrieve all contours of the image below, but ignore text.
Image:
When I try to find the contours of the current image I get the following:
I have no idea how to go about this as I am new to using OpenCV and image processing. I want to get ignore the text, how can I achieve this? If ignoring is not possible but making a single bounding box surrounding the text is, than that would be good too.
Edit:
Criteria that I need to match:
The contours may very in size and shape.
The colors from the image may differ.
The colors and size of the text inside the image may differ.
Here is one way to do that in Python/OpenCV.
Read the input
Convert to grayscale
Get Canny edges
Apply morphology close to ensure they are closed
Get all contour hierarchy
Filter contours to keep only those above threshold in perimeter
Draw contours on input
Draw each contour on a black background
Save results
Input:
import numpy as np
import cv2
# read input
img = cv2.imread('short_title.png')
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# get canny edges
edges = cv2.Canny(gray, 1, 50)
# apply morphology close to ensure they are closed
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
edges = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
# get contours
contours = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours = contours[0] if len(contours) == 2 else contours[1]
# filter contours to keep only large ones
result = img.copy()
i = 1
for c in contours:
perimeter = cv2.arcLength(c, True)
if perimeter > 500:
cv2.drawContours(result, c, -1, (0,0,255), 1)
contour_img = np.zeros_like(img, dtype=np.uint8)
cv2.drawContours(contour_img, c, -1, (0,0,255), 1)
cv2.imwrite("short_title_contour_{0}.jpg".format(i),contour_img)
i = i + 1
# save results
cv2.imwrite("short_title_gray.jpg", gray)
cv2.imwrite("short_title_edges.jpg", edges)
cv2.imwrite("short_title_contours.jpg", result)
# show images
cv2.imshow("gray", gray)
cv2.imshow("edges", edges)
cv2.imshow("result", result)
cv2.waitKey(0)
Grayscale:
Edges:
All contours on input:
Contour 1:
Contour 2:
Contour 3:
Contour 4:
Here are two options for erasing the text:
Using pytesseract OCR.
Finding white (and small) connected components.
Both solution build a mask, dilate the mask and use cv2.inpaint for erasing the text.
Using pytesseract:
Find text boxes using pytesseract.image_to_boxes.
Fill the boxes in the mask with 255.
Code sample:
import cv2
import numpy as np
from pytesseract import pytesseract, Output
# Tesseract path
pytesseract.tesseract_cmd = "C:\\Program Files\\Tesseract-OCR\\tesseract.exe"
img = cv2.imread('ShortAndInteresting.png')
# https://stackoverflow.com/questions/20831612/getting-the-bounding-box-of-the-recognized-words-using-python-tesseract
boxes = pytesseract.image_to_boxes(img, lang='eng', config=' --psm 6') # Run tesseract, returning the bounding boxes
h, w, _ = img.shape # assumes color image
mask = np.zeros((h, w), np.uint8)
# Fill the bounding boxes on the image
for b in boxes.splitlines():
b = b.split(' ')
mask = cv2.rectangle(mask, (int(b[1]), h - int(b[2])), (int(b[3]), h - int(b[4])), 255, -1)
mask = cv2.dilate(mask, np.ones((5, 5), np.uint8)) # Dilate the boxes in the mask
clean_img = cv2.inpaint(img, mask, 2, cv2.INPAINT_NS) # Remove the text using inpaint (replace the masked pixels with the neighbor pixels).
# Show mask and clean_img for testing
cv2.imshow('mask', mask)
cv2.imshow('clean_img', clean_img)
cv2.waitKey()
cv2.destroyAllWindows()
Mask:
Finding white (and small) connected components:
Use mask = cv2.inRange(img, (230, 230, 230), (255, 255, 255)) for finding the text (assume the text is white).
Finding connected components in the mask using cv2.connectedComponentsWithStats(mask, 4)
Remove large components from the mask - fill components with large area with zeros.
Code sample:
import cv2
import numpy as np
img = cv2.imread('ShortAndInteresting.png')
mask = cv2.inRange(img, (230, 230, 230), (255, 255, 255))
nlabel, labels, stats, centroids = cv2.connectedComponentsWithStats(mask, 4) # Finding connected components with statistics
# Remove large components from the mask (fill components with large area with zeros).
for i in range(1, nlabel):
area = stats[i, cv2.CC_STAT_AREA] # Get area
if area > 1000:
mask[labels == i] = 0 # Remove large connected components from the mask (fill with zero)
mask = cv2.dilate(mask, np.ones((5, 5), np.uint8)) # Dilate the text in the maks
cv2.imwrite('mask2.png', mask)
clean_img = cv2.inpaint(img, mask, 2, cv2.INPAINT_NS) # Remove the text using inpaint (replace the masked pixels with the neighbor pixels).
# Show mask and clean_img for testing
cv2.imshow('mask', mask)
cv2.imshow('clean_img', clean_img)
cv2.waitKey()
cv2.destroyAllWindows()
Mask:
Clean image:
Note:
My assumption is that you know how to split the image into contours, and the only issue is the present of the text.
I would recommend using flood fill, find the seed point for each color region, flood fill it to ignore the text values within. Hope that helps!
Refer to example of using floodfill here: https://www.programcreek.com/python/example/89425/cv2.floodFill
Example below copied from link above
def fillhole(input_image):
'''
input gray binary image get the filled image by floodfill method
Note: only holes surrounded in the connected regions will be filled.
:param input_image:
:return:
'''
im_flood_fill = input_image.copy()
h, w = input_image.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
im_flood_fill = im_flood_fill.astype("uint8")
cv.floodFill(im_flood_fill, mask, (0, 0), 255)
im_flood_fill_inv = cv.bitwise_not(im_flood_fill)
img_out = input_image | im_flood_fill_inv
return img_out

Delete unused shapes in OpenCV

I've got a shape detection with OpenCV in Python going on; bolts and nuts. I take a picture, make it binary, and detect edges. Now the white area is always grainy because of dust and grime. My detection uses the largest areas as parts, which works great. But how can I delete the thousands of objects caused by dust? 
In short, I want to reduce the array of shapes to only the biggest ones for further processing.
Here is one way to do that as per my comment above using Python/OpenCV.
From your binary image get the contours. Then select the largest contour. Then draw a white filled contour on a black background image the same size as your input as a mask. Then use numpy to blacken everything in your image that is black in your mask.
Input:
import cv2
import numpy as np
# load image
img = cv2.imread("coke_bottle2.png")
hh, ww = img.shape[:2]
# convert to gray
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# threshold using inRange
thresh = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY)[1]
# apply morphology closing to fill black holes and smooth outline
# could use opening to remove white spots, but we will use contours
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (25,25))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# get the largest contour
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
big_contour = max(contours, key=cv2.contourArea)
# draw largest contour as white filled on black background as mask
mask = np.zeros((hh,ww), dtype=np.uint8)
cv2.drawContours(mask, [big_contour], 0, 255, -1)
# use mask to black all but largest contour
result = img.copy()
result[mask==0] = (0,0,0)
# write result to disk
cv2.imwrite("coke_bottle2_threshold.png", thresh)
cv2.imwrite("coke_bottle2_mask.png", mask)
cv2.imwrite("coke_bottle2_background_removed.jpg", result)
# display it
cv2.imshow("thresh", thresh)
cv2.imshow("mask", mask)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold Image (contains small extraneous white regions):
Mask Image (only the largest filled contour):
Result:

How to detect text on an X-Ray image with OpenCV

I want to detect text on x-ray images. The goal is to extract the oriented bounding boxes as a matrix where each row is a detected bounding box and each row contains the coordinates of all four edges i.e. [x1, x2, y1, y2]. I'm using python 3 and OpenCV 4.2.0.
Here is a sample image:
The string "test word", "a" and "b" should be detected.
I followed this OpenCV tutorial about creating rotated boxes for contours and this stackoverflow answer about detecting a text area in an image.
The resulting boundary boxes should look something like this:
I was able to detect the text, but the result included a lot of boxes without text.
Here is what I tried so far:
img = cv2.imread(file_name)
## Open the image, convert it into grayscale and blur it to get rid of the noise.
img2gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, mask = cv2.threshold(img2gray, 180, 255, cv2.THRESH_BINARY)
image_final = cv2.bitwise_and(img2gray, img2gray, mask=mask)
ret, new_img = cv2.threshold(image_final, 180, 255, cv2.THRESH_BINARY) # for black text , cv.THRESH_BINARY_INV
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
dilated = cv2.dilate(new_img, kernel, iterations=6)
canny_output = cv2.Canny(dilated, 100, 100 * 2)
cv2.imshow('Canny', canny_output)
## Finds contours and saves them to the vectors contour and hierarchy.
contours, hierarchy = cv2.findContours(canny_output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Find the rotated rectangles and ellipses for each contour
minRect = [None] * len(contours)
for i, c in enumerate(contours):
minRect[i] = cv2.minAreaRect(c)
# Draw contours + rotated rects + ellipses
drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
for i, c in enumerate(contours):
color = (255, 0, 255)
# contour
cv2.drawContours(drawing, contours, i, color)
# rotated rectangle
box = cv2.boxPoints(minRect[i])
box = np.intp(box) # np.intp: Integer used for indexing (same as C ssize_t; normally either int32 or int64)
cv2.drawContours(img, [box], 0, color)
cv2.imshow('Result', img)
cv2.waitKey()
Do I need to run the results through OCR to make sure whether it is text or not? What other approaches should I try?
PS: I'm quite new to computer vision and not familiar with most concepts yet.
Here's a simple approach:
Obtain binary image. Load image, create blank mask, convert to grayscale, Gaussian blur, then Otsu's threshold
Merge text into a single contour. Since we want to extract the text as one piece, we perform morphological operations to connect individual text contours into a single contour.
Extract text. We find contours then filter using contour area with cv2.contourArea and aspect ratio using cv2.arcLength + cv2.approxPolyDP. If a contour passes the filter, we find the rotated bounding box and draw this onto our mask.
Isolate text. We perform an cv2.bitwise_and operation to extract the text.
Here's a visualization of the process. Using this screenshotted input image (since your provided input image was connected as one image):
Input image -> Binary image
Morph close -> Detected text
Isolated text
Results with the other image
Input image -> Binary image + morph close
Detected text -> Isolated text
Code
import cv2
import numpy as np
# Load image, create mask, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread('1.png')
original = image.copy()
blank = np.zeros(image.shape[:2], dtype=np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Merge text into a single contour
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=3)
# Find contours
cnts = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
# Filter using contour area and aspect ratio
x,y,w,h = cv2.boundingRect(c)
area = cv2.contourArea(c)
ar = w / float(h)
if (ar > 1.4 and ar < 4) or ar < .85 and area > 10 and area < 500:
# Find rotated bounding box
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(image,[box],0,(36,255,12),2)
cv2.drawContours(blank,[box],0,(255,255,255),-1)
# Bitwise operations to isolate text
extract = cv2.bitwise_and(thresh, blank)
extract = cv2.bitwise_and(original, original, mask=extract)
cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.imshow('close', close)
cv2.imshow('extract', extract)
cv2.waitKey()
I removed the text using the following comand (after the code of above):
gray2 = cv2.cvtColor(extract, cv2.COLOR_BGR2GRAY)
blur2 = cv2.GaussianBlur(gray2, (5,5), 0)
thresh2 = cv2.threshold(blur2, 0, 255, cv2.THRESH_BINARY)[1]
test = cv2.inpaint(original, thresh2, 7, cv2.INPAINT_TELEA)

How to use OpenCV to crop an image based on a certain criteria?

I would like to crop the images like the one below using python's OpenCV library. The area of interest is inside the squiggly lines on the top and bottom, and the lines on the side. The problem is that every image is slightly different. This means that I need some automated way of cropping for the area of interest. I guess the top and the sides would be easy since you could just crop it by 10 pixels or so. But how can I crop out the bottom half of the image where the line is not straight? I have included this example image. The image that follows highlights in pink the area of the image that I am interested in keeping.
Here is one way using Python/OpenCV.
Read input
Get center point (assume it is inside the desired region)
Convert image to grayscale
Floodfill the gray image and set background to black
Get the largest contour and its bounding box
Draw the largest contour as filled on black background as mask
Apply the mask to the input image
Crop the masked input image
Input:
import cv2
import numpy as np
# load image and get dimensions
img = cv2.imread("odd_region.png")
hh, ww, cc = img.shape
# compute center of image (as integer)
wc = ww//2
hc = hh//2
# create grayscale copy of input as basis of mask
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# create zeros mask 2 pixels larger in each dimension
zeros = np.zeros([hh + 2, ww + 2], np.uint8)
# do floodfill at center of image as seed point
ffimg = cv2.floodFill(gray, zeros, (wc,hc), (255), (0), (0), flags=8)[1]
# set rest of ffimg to black
ffimg[ffimg!=255] = 0
# get contours, find largest and its bounding box
contours = cv2.findContours(ffimg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
area_thresh = 0
for cntr in contours:
area = cv2.contourArea(cntr)
if area > area_thresh:
area = area_thresh
outer_contour = cntr
x,y,w,h = cv2.boundingRect(outer_contour)
# draw the filled contour on a black image
mask = np.full([hh,ww,cc], (0,0,0), np.uint8)
cv2.drawContours(mask,[outer_contour],0,(255,255,255),thickness=cv2.FILLED)
# mask the input
masked_img = img.copy()
masked_img[mask == 0] = 0
#masked_img[mask != 0] = img[mask != 0]
# crop the bounding box region of the masked img
result = masked_img[y:y+h, x:x+w]
# draw the contour outline on a copy of result
result_outline = result.copy()
cv2.drawContours(result_outline,[outer_contour],0,(0,0,255),thickness=1,offset=(-x,-y))
# display it
cv2.imshow("img", img)
cv2.imshow("ffimg", ffimg)
cv2.imshow("mask", mask)
cv2.imshow("masked_img", masked_img)
cv2.imshow("result", result)
cv2.imshow("result_outline", result_outline)
cv2.waitKey(0)
cv2.destroyAllWindows()
# write result to disk
cv2.imwrite("odd_region_cropped.png", result)
cv2.imwrite("odd_region_cropped_outline.png", result_outline)
Result:
Result With Contour Drawn:

Square detection in image

I am trying to detect all the squared shaped dice images so that i can crop them individually and use that for OCR.
Below is the Original image:
Here is the code i have got but it is missing some squares.
def find_squares(img):
img = cv2.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for thrs in range(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
_retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
bin, contours, _hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in range(4)])
#print(cnt)
a = (cnt[1][1] - cnt[0][1])
if max_cos < 0.1 and a < img.shape[0]*0.8:
squares.append(cnt)
return squares
dice = cv2.imread('img1.png')
squares = find_squares(dice)
cv2.drawContours(dice, squares, -1, (0, 255, 0), 3)
Here are the Output images:
As per my analysis, some squares are missing due to missing canny edges along the dice because of smooth intensity transition between dice and background.
Given the constraint that there will always be 25 dices in square grid pattern (5*5) can we predict the missing square positions based on recognised squares?
Or can we modify above algorithm for square detection algorithm?
Sharpen square edges. Load the image, convert to grayscale, median blur to smooth, and sharpen to enhance edges.
Obtain binary image and remove noise. We threshold to obtain a black/white binary image. Depending on the image, Otsu's thresholding or adaptive thresholding would work. From here we create a rectangular kernel and perform morphological transformations to remove noise and enhance the square contours.
Detect and extract squares. Next we find contours and filter using minimum/maximum threshold area. Any contours that pass our filter will be our squares so to extract each ROI, we obtain the bounding rectangle coordinates, crop using Numpy slicing, and save each square image.
Sharpen image with
cv2.filter2D() using a generic sharpening kernel, other kernels can be found here.
Now threshold to get a binary image
There's little particles of noise so to remove them, we perform morphological operations
Next find contours and filter using cv2.contourArea() with minimum/maximum threshold values.
We can crop each desired square region using Numpy slicing and save each ROI like this
x,y,w,h = cv2.boundingRect(c)
ROI = image[y:y+h, x:x+w]
cv2.imwrite('ROI_{}.png'.format(image_number), ROI)
import cv2
import numpy as np
# Load image, grayscale, median blur, sharpen image
image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray, 5)
sharpen_kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
sharpen = cv2.filter2D(blur, -1, sharpen_kernel)
# Threshold and morph close
thresh = cv2.threshold(sharpen, 160, 255, cv2.THRESH_BINARY_INV)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)
# Find contours and filter using threshold area
cnts = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
min_area = 100
max_area = 1500
image_number = 0
for c in cnts:
area = cv2.contourArea(c)
if area > min_area and area < max_area:
x,y,w,h = cv2.boundingRect(c)
ROI = image[y:y+h, x:x+w]
cv2.imwrite('ROI_{}.png'.format(image_number), ROI)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 2)
image_number += 1
cv2.imshow('sharpen', sharpen)
cv2.imshow('close', close)
cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.waitKey()
That extra piece of information is absolutely golden. Yes, given the 5x5 matrix of dice, you can nail the positions quite well. The dice you can identify give you the center, size, and orientation of the dice. Simply continue those patterns along both axes. For your second pass, increase the contrast in each "region of interest" where you expect to find the edge of a douse (never say die!). You know within a few pixels where the edges will be: simply attenuate the image until you identify those edges.

Categories

Resources