Increase contour detection accuracy of chess board squares using openCV in python - python

I wanted to detect contours of chess board black squares from the following image.
The following code is detecting only few black squares successfully, how can we increase the accuracy?
import cv2
import numpy as np
imPath = r" " # <----- image path
def imageResize(orgImage, resizeFact):
dim = (int(orgImage.shape[1]*resizeFact),
int(orgImage.shape[0]*resizeFact)) # w, h
return cv2.resize(orgImage, dim, cv2.INTER_AREA)
img = imageResize(cv2.imread(imPath), 0.5)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.inRange(gray, 135, 155) # to pick only black squares
# find contours
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cntImg = img.copy()
minArea, maxArea = 3000, 3500
valid_cnts = []
for c in cnts:
area = cv2.contourArea(c)
if area > minArea and area < maxArea:
valid_cnts.append(c)
# draw centers for troubleshooting
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(cntImg, (cX, cY), 5, (0, 0, 255), -1)
cv2.drawContours(cntImg, valid_cnts, -1, (0, 255, 0), 2)
cv2.imshow('org', img)
cv2.imshow('threshold', thresh)
cv2.imshow('contour', cntImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
Gives threshold and contour -
[0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 7.0, 7.5, 9.5, 3248.5, 3249.0, 6498.0] are the unique cnts areas. Typical areas for desired black squares are 3248.5, 3249.0, here's a quick snippet for getting unique cnts areas -
cntAreas = [cv2.contourArea(x) for x in cnts]
print(sorted(set(cntAreas)))
Highly appreciate any help!!

The problem was due to gaps in canny edges which was initiated from noise in the grayscale image. By using dilate morph operation, the noise is reduced and now giving well connected canny edges to make closed contours.
Full code -
import cv2
import numpy as np
imPath = r" " # <----- image path
def imageResize(orgImage, resizeFact):
dim = (int(orgImage.shape[1]*resizeFact),
int(orgImage.shape[0]*resizeFact)) # w, h
return cv2.resize(orgImage, dim, cv2.INTER_AREA)
img = imageResize(cv2.imread(imPath), 0.5)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2)) # <-----
morphed = cv2.dilate(gray, kernel, iterations=1)
thresh = cv2.inRange(morphed, 135, 155) # to pick only black squares
# find canny edge
edged_wide = cv2.Canny(thresh, 10, 200, apertureSize=3)
cv2.waitKey(0)
# find Contours
contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # cv2.CHAIN_APPROX_NONE stores all coords unlike SIMPLE, cv2.RETR_EXTERNAL
cntImg = img.copy()
minArea, maxArea = 2000, 4000
valid_cnts = []
for c in contours:
area = cv2.contourArea(c)
if area > minArea and area < maxArea:
valid_cnts.append(c)
# draw centers
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(cntImg, (cX, cY), 5, (0, 0, 255), -1)
cv2.drawContours(cntImg, valid_cnts, -1, (0, 255, 0), 2)
cv2.imshow('threshold', thresh)
cv2.imshow('morphed', morphed)
cv2.imshow('canny edge', edged_wide)
cv2.imshow('contour', cntImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here's the contour plot -

Related

use python open-cv for segmenting newspaper article

I'm using the code below for segmenting the articles from an image of newspaper.
def segmenter(image_received):
# Process 1: Lines Detection
img = image_received
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to binary gray image
edges = cv2.Canny(gray, 75, 150) # determine contours
lines = cv2.HoughLinesP(edges, 0.017, np.pi / 180, 60, minLineLength=100, maxLineGap=0.1) # houghlines generation
# drawing houghlines
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 128), 12) # the houghlines of color (0,0,128) is drawn
# Drawing brown border
bold = cv2.copyMakeBorder(
img, # image source
5, # top width
5, # bottomm width
5, # left width
5, # right width
cv2.BORDER_CONSTANT,
value=(0, 0, 128) # brown color value
)
image = bold
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if int(len(c) >= 10):
cv2.drawContours(image, [c], 0, (0, 17, 255), 1)
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))
detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
if int(len(c) >= 10):
cv2.drawContours(image, [c], 0, (0, 17, 255), 1)
cv2.imwrite(f'tmp/{str(str(uuid.uuid4()))}.jpg', image)
for instance
the input image is
and the output image is :
There are three problems:
the output rectangles aren't complete in all cases.
Images also are segmented inside articles as part of articles. But what I need is to segment only the text of the newspaper and crop all the other things out. Something like this one:
Consider the following image:
The article indicated by borders is not rectangular and is much more complicated. How can I achieve the correct borders using python open-cv or other image processing libraries?
(the question has an answer here for matlab. But I need a python code.
here my pipeline.
I think can be optimized.
Initialization
%matplotlib inline
import numpy as np
import cv2
from matplotlib import pyplot as plt
Load image
image_file_name = 'paper.jpg'
image = cv2.imread(image_file_name)
# gray convertion
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
The first important thing is to remove the lines. So I search the lines.
grad_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
grad_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
# threshold
thresh_x = cv2.threshold(abs_grad_x, 0, 255, cv2.THRESH_OTSU)[1]
thresh_y = cv2.threshold(abs_grad_y, 0, 255, cv2.THRESH_OTSU)[1]
# bluring
kernel_size = 3
blur_thresh_x = cv2.GaussianBlur(thresh_x,(kernel_size, kernel_size),0)
blur_thresh_y = cv2.GaussianBlur(thresh_y,(kernel_size, kernel_size),0)
# Run Hough on edge detected image
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 200 # minimum number of pixels making up a line
max_line_gap = 1 # maximum gap in pixels between connectable line segments
line_image = np.copy(gray) * 0 # creating a blank to draw lines on
# Vertical lines
vertical_lines = cv2.HoughLinesP(blur_thresh_x, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
if vertical_lines is not None:
for line in vertical_lines:
for x1,y1,x2,y2 in line:
# here it's possible to add a selection of only vertical lines
if np.abs(y1-y2)> 0.1 * np.abs(x1-x2):
cv2.line(line_image,(x1,y1),(x2,y2),255,5)
# Horizontal lines
horizontal_lines = cv2.HoughLinesP(blur_thresh_y, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
if horizontal_lines is not None:
for line in horizontal_lines:
for x1,y1,x2,y2 in line:
# here it's possible to add a selection of only horizontal lines
if np.abs(x1-x2)> 0.1 * np.abs(y1-y2):
cv2.line(line_image,(x1,y1),(x2,y2),255,5)
After I remove the lines from the threshold
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# remove lines
clean_thresh = cv2.subtract(thresh, line_image)
Then I search the phrases
# search the phrases
dilatation_type = cv2.MORPH_RECT
horizontal_dilatation = 20 #This is the gap. 20 for the first image, 10 for the second image
vertical_dilatation = 1
element = cv2.getStructuringElement(dilatation_type, (2*horizontal_dilatation + 1, 2*vertical_dilatation+1), (horizontal_dilatation, vertical_dilatation))
dilatation_thresh = cv2.dilate(clean_thresh, element)
# Fill
filled_tresh = dilatation_thresh.copy()
contours, hierarchy = cv2.findContours(dilatation_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
cv2.drawContours(filled_tresh, [cnt], -1, 255, cv2.FILLED)
Now I detect the bounding boxes
# Draw bounding boxes
bounding_box1 = filled_tresh.copy()
contours, hierarchy = cv2.findContours(bounding_box1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(bounding_box1,(x,y),(x+w,y+h),255,cv2.FILLED)
# REPEAT Draw bounding boxes and Find the mean text width
mean_bb_width = 0 # mean bounding box width
bounding_box2 = bounding_box1.copy()
contours, hierarchy = cv2.findContours(bounding_box2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
num_cnt=0
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(bounding_box2,(x,y),(x+w,y+h),255,cv2.FILLED)
mean_bb_width = mean_bb_width+w
num_cnt=num_cnt+1
mean_bb_width=mean_bb_width/num_cnt
Now I separate the titles from the text
# define title what has width bigger than 1.5* mean_width
min_title_width = 1.5 * mean_bb_width
raw_title = np.copy(gray) * 0
raw_text = np.copy(gray) * 0
# separate titles from phrases
contours, hierarchy = cv2.findContours(bounding_box2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
if w >=min_title_width :
cv2.drawContours(raw_title, [cnt], -1, 255, cv2.FILLED)
else :
cv2.drawContours(raw_text, [cnt], -1, 255, cv2.FILLED)
and then the final processing
image_out = image.copy()
# Closing parameters
horizontal_closing = 1
vertical_closing = 20
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(horizontal_closing,vertical_closing))
# Processing titles
# Closing
closing_title = cv2.morphologyEx(raw_title, cv2.MORPH_CLOSE, kernel)
# Find contours
contours, hierarchy = cv2.findContours(closing_title, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Draw bounding boxes
bounding_title = closing_title.copy()
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image_out,(x,y),(x+w,y+h),(255,0,0),2)
# Processing text
# Closing
closing_text = cv2.morphologyEx(raw_text, cv2.MORPH_CLOSE, kernel)
# Find contours
contours, hierarchy = cv2.findContours(closing_text , cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Draw bounding boxes
bounding_text = closing_text.copy()
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image_out,(x,y),(x+w,y+h),(0,255,0),2)
The result is
Changing the parameter horizontal_dilatation from 20 to 10, I obtain for the second image (where I remove the red border that you added) the following result

Finding each centroid of multiple connected objects

I am SUPER new to python coding and would like some help. I was able to segment each cell outline within a biological tissue (super cool!) and now I am trying to find the centroid of each cell within a tissue using this:
I am using this code:
img = cv2.imread('/Users/kate/Desktop/SegmenterTest/SegmentedCells/Seg1.png')
image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(image, 60, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for c in cnts:
# compute the center of the contour
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw the contour and center of the shape on the image
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.circle(image, (cX, cY), 7, (255, 255, 255), -1)
cv2.putText(image, "center", (cX - 20, cY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
# show the image
cv2.imshow("Image", image)
cv2.waitKey(0)
However, when I use this code, it is giving me the centroid of the ENTIRE object, and not each individual object to give this.
I have no idea where to go from here, so a nudge in the right direction would be greatly appreciated!
You can use the function regionprops from the module scikit-image in your case. Here is what I got.
This is the code I used.
import cv2
import matplotlib.pyplot as plt
from skimage import measure
import numpy as np
cells = cv2.imread('cells.png',0)
ret,thresh = cv2.threshold(cells,20,255,cv2.THRESH_BINARY_INV)
labels= measure.label(thresh, background=0)
bg_label = labels[0,0]
labels[labels==bg_label] = 0 # Assign background label to 0
props = measure.regionprops(labels)
fig,ax = plt.subplots(1,1)
plt.axis('off')
ax.imshow(cells,cmap='gray')
centroids = np.zeros(shape=(len(np.unique(labels)),2)) # Access the coordinates of centroids
for i,prop in enumerate(props):
my_centroid = prop.centroid
centroids[i,:]= my_centroid
ax.plot(my_centroid[1],my_centroid[0],'r.')
# print(centroids)
# fig.savefig('out.png', bbox_inches='tight', pad_inches=0)
plt.show()
Good luck with your research!
Problem
cv2.findContours uses an algorithm which has a few different 'retrieval modes'. These affect which contours are returned and how they are returned. This is documented here. These are given as the second argument to findContours. Your code uses cv2.RETR_EXTERNAL which means findContours will only return the outermost border of separate objects.
Solution
Changing this argument to cv2.RETR_LIST will give you all the contours in the image (including the one outermost border). This is the simplest solution.
E.g.
import cv2
import imutils
img = cv2.imread('/Users/kate/Desktop/SegmenterTest/SegmentedCells/Seg1.png')
image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(image, 60, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for c in cnts:
# compute the center of the contour
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw the contour and center of the shape on the image
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.circle(image, (cX, cY), 7, (255, 255, 255), -1)
cv2.putText(image, "center", (cX - 20, cY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
# show the image
cv2.imshow("Image", image)
cv2.waitKey(0)
Selecting only the innermost objects
To reliably omit the outer contours you can take advantage of the ability of findContours to return a hierarchy of the contours it detects. To do this, you can change the retrieval mode argument once again to RETR_TREE, which will generate a full hierarchy.
The hierarchy is an array containing arrays of 4 values for each contour in the image. Each value is an index of a contour in the contour array. From the docs:
For each i-th contour contours[i], the elements hierarchy[i][0] ,
hierarchy[i][1] , hierarchy[i][2], and hierarchy[i][3] are set to
0-based indices in contours of the next and previous contours at the
same hierarchical level, the first child contour and the parent
contour, respectively. If for the contour i there are no next,
previous, parent, or nested contours, the corresponding elements of
hierarchy[i] will be negative.
When we say 'innermost', what we mean is contours that have no children (contours inside of them). So we want those contours whose entry in the hierarchy has a negative 3rd value. That is, contours[i], such that hierarchy[i][2] < 0
A small wrinkle is that although findContours returns a tuple which includes the hierarchy, imutils.grabContours discards the hierarchy and returns just the array of contours. All this means is that we have to do the work of grabContours ourselves, if we intend on working with different versions of OpenCV. This is just a simple if else statement.
res = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# switch for different versions of OpenCV
if len(cnts) == 3:
_, cnts, hierarchy = res
else:
cnts, hierarchy = res
Once you have hierarchy, checking if a contour, cnts[i] is 'innermost' can be done with hierarchy[0][i][2] < 0, which should be False for contours that contain other contours.
A full example based on your question's code:
import cv2
import imutils
img = cv2.imread('/Users/kate/Desktop/SegmenterTest/SegmentedCells/Seg1.png')
image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(image, 60, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# switch for different versions of OpenCV
if len(cnts) == 3:
_, cnts, hierarchy = cnts
else:
cnts, hierarchy = cnts
# loop over the contours
for i, c in enumerate(cnts):
# check that it is 'innermost'
if hierarchy[0][i][2] < 0:
# compute the center of the contour
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw the contour and center of the shape on the image
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.circle(image, (cX, cY), 7, (255, 255, 255), -1)
cv2.putText(image, "center", (cX - 20, cY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
# show the image
cv2.imshow("Image", image)
cv2.waitKey(0)

Separate objects countours with OpenCV

I have been working with OpenCV in order to detect an squared obstacle. So far this is the image I get after applying filters and canny.
The obstacle I am trying to identify is the horizontal one, the three vertical rectangles are guide lines on the floor.My goal is to keep only the horizontal rectangle, separating it from the others, but after applying find Contours I only get I single object that includes all the shapes.This is the code I have been using in order to fin only the biggest rectangle by their area:
# find the biggest countour (c) by the area
if contours != 0:
if not contours:
print("Empty")
else:
bigone = max(contours, key=cv2.contourArea) if max else None
area = cv2.contourArea(bigone)
if area > 10000:
x, y, w, h = cv2.boundingRect(bigone)
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), 2)
cv2.putText(img, "Obstacle", (x+w/2, y-20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
pts = np.array(
[[[x, y], [x+w, y], [x+w, y+h], [x, y+h]]], dtype=np.int32)
cv2.fillPoly(mask, pts, (255, 255, 255))
#values = img[np.where((mask == (255, 255, 255)).all(axis=2))]
res = cv2.bitwise_and(img, mask) # View only the obstacle
obs_area = w*h
print(obs_area)
if obs_area <= 168000:
command_publisher.publish("GO")
cv2.putText(
img, "GO", (380, 400), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1)
else:
command_publisher.publish("STOP")
cv2.putText(img, "STOP", (380, 400),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255), 1)
# show the output image
cv2.imshow("Image", img)
cv2.waitKey(1)
And this is the result I am getting:
Is there a way of separating my obstacle from the lines on the floor with some kind of filter or algorithm?
Here is an example image to work with:
Here is one way to do that using Python/OpenCV.
- Read the input
- Convert to HSV and extract only the saturation channel (black/white/gray have zero saturation)
- Threshold
- Apply morphology open and close to remove the extranous white regions
- Get the contour and approximate to simple polygon
- Draw the polygon on the input
- Save the results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('board.png')
# convert to HSV and extract saturation channel
sat = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)[:,:,1]
# threshold
thresh = cv2.threshold(sat, 90, 255, 0)[1]
# apply morphology close to fill interior regions in mask
kernel = np.ones((7,7), np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = np.ones((13,13), np.uint8)
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
# get contours (presumably only 1) and fit to simple polygon (quadrilateral)
cntrs = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
c = cntrs[0]
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.05 * peri, True)
# draw polygon on input
result = img.copy()
cv2.polylines(result, [np.int32(approx)], True, (0,0,255), 1, cv2.LINE_AA)
# write result to disk
cv2.imwrite("board_saturation.png", sat)
cv2.imwrite("board_thresh.png", thresh)
cv2.imwrite("board_morph.png", morph)
cv2.imwrite("board_contour.png", result)
# display it
cv2.imshow("IMAGE", img)
cv2.imshow("SAT", sat)
cv2.imshow("THRESH", thresh)
cv2.imshow("MORPH", morph)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Saturation channel image:
Thresholded image:
Morphology cleaned image:
Contour on input:
In your image the problem seems white rectangles. My approach is checking each line and if line consist many pixels which are close to white(255,255,255) then make the line black.
Here is my code:
import cv2
import numpy as np
import random as rng
img=cv2.imread("/ur/image/directory/obstacle.png")
height, width, channels = img.shape
cv2.imshow('Source',img)
# Check each line and eliminate white rectangles(if line consist white pixels more than limit)
for x in range(0,height):
white_counter = 0
for y in range(0,width):
if img[x,y,0] >= 180 and img[x,y,1] >= 180 and img[x,y,2] >= 180:
white_counter = white_counter + 1
if white_counter>10:
for y in range(0,width):
img[x,y,0] = 0
img[x,y,1] = 0
img[x,y,2] = 0
cv2.imshow('Elimination White Rectangles', img)
# Find contours and draw rectangle for each
src_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
threshold = 300
canny_output = cv2.Canny(src_gray, threshold, threshold * 2)
contours, _ = cv2.findContours(canny_output, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_poly = [None]*len(contours)
boundRect = [None]*len(contours)
for i, c in enumerate(contours):
contours_poly[i] = cv2.approxPolyDP(c, 3, True)
boundRect[i] = cv2.boundingRect(contours_poly[i])
rng.seed(12345)
drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
for i in range(len(contours)):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
cv2.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \
(int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)
cv2.imshow('Output', drawing)
cv2.waitKey(0)
cv2.destroyAllWindows()
Eliminate White Rectangles:
Result:

Faster algorithms to detect an orange square platform than cv2.FindContours+convexHull?

I have a drone computer vision project I'm working on with a time limit of .05 seconds. I essentially have to detect an orange square-ish platform with a best-fit square. Right now I'm using openCV backprojection+findContours+convexHull. This works but it's too slow (around .12 seconds). Any other algorithms or better ideas? Should I try implementing the findContour function myself?
Here is my code:
import cv2
import numpy as np
import time
import math
# creates region of interest histogram
roi = cv2.imread("./images/roi.jpeg")
roi_hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
bin_size = 3
roi_hist = cv2.calcHist([roi_hsv], [0, 1], None, [bin_size, bin_size], [0, 256, 0, 256])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
# returns mask of image after backprojection + thresholding
def backproject(image, roi_hist):
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.calcBackProject([image_hsv], [0, 1], roi_hist, [0, 256, 0, 256], 1)
_, mask = cv2.threshold(mask, 254, 255, cv2.THRESH_BINARY)
return mask
"""
# test image
image = cv2.imread("./images/img3.jpeg")
image = cv2.resize(image, (600, 600))
mask = backproject(image, roi_hist)
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea)
cnt = contours[-1] # selects biggest contour
# hull = cv2.convexHull(cnt) # creates convex hull
M = cv2.moments(cnt)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cntX = cnt[:,0,0]
cntY = cnt[:,0,1]
radii = np.sqrt((cntX - cX) ** 2 + (cntY - cY) ** 2)
min_radius = int(sorted(radii)[0])
print(min_radius)
cv2.circle(image, (cX, cY), 2, (0, 255, 255), 3)
cv2.circle(image, (cX, cY), min_radius, (0, 255, 255), 3)
cv2.drawContours(image, [cnt], 0, (255,0,255), 3)
cv2.imshow('image', image)
cv2.waitKey(0)
"""
# for webcam
cap = cv2.VideoCapture(0)
while(True):
start_time = time.time()
_, frame = cap.read()
frame = cv2.resize(frame, (800, 600))
mask = backproject(frame, roi_hist)
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea)
cnt = contours[-1] # selects biggest contour
cnt = cv2.convexHull(cnt)
M = cv2.moments(cnt)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cntX = cnt[:,0,0]
cntY = cnt[:,0,1]
radii = np.sqrt((cntX - cX) ** 2 + (cntY - cY) ** 2)
min_radius = int(sorted(radii)[0])
cv2.circle(frame, (cX, cY), 2, (0, 255, 255), 3)
cv2.circle(frame, (cX, cY), min_radius, (0, 255, 255), 3)
cv2.drawContours(frame, [cnt], 0, (255, 0, 255), 3)
cv2.imshow('image', frame)
end_time = time.time()
print(end_time - start_time)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Here is a
sample image
let me point out this is clearly not a square. I am currently just trying to get the contour detection and everything tuned and robust. My end goal is to find the xy-postion and orientation of the best-fit square inscribed in the platform.

How to find colour of contours in OpenCv with python

I'm working with a requirement where I need to find the colour of region inside contours. We are using OpenCv with Python and here is my code in Python:
import imutils
import cv2
import numpy as np
path = "multiple_grains_1.jpeg"
img = cv2.imread(path)
resized = imutils.resize(img, width=900)
ratio = img.shape[0] / float(resized.shape[0])
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
(ret, thresh) = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
edge = cv2.Canny(thresh, 100, 200)
( _,cnts, _) = cv2.findContours(edge.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
area = cv2.contourArea(c)
if area > 1:
cv2.drawContours(resized,[box],0,(0,0,255),2)
cv2.drawContours(resized, [c], -1, (0, 255, 0), 2)
#print("area : "+str(area))
#print('\nContours: ' + str(c[0]))
#img[c[0]]
pixelpoints = np.transpose(np.nonzero(c))
#print('\pixelpoints: ' + str(pixelpoints))
# accessed the center of the contour using the followi
M = cv2.moments(c)
if M["m00"] != 0:
cX = int((M["m10"] / M["m00"]) * ratio)
cY = int((M["m01"] / M["m00"]) * ratio)
#print (cX,cY)
cord = img[int(cX)+3,int(cY)+3]
print(cord)
cv2.imshow("Output", resized)
cv2.waitKey(0)
exit()
When I check the centroid colour of the contour, I'm unable to fetch correct colour. Do any one knows how to fetch the colour inside the contour using OpenCv and python?
I simplified your code and was able to get the color of the centroids without using moment.
import imutils
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread("multiplegrains.png")
resized = imutils.resize(img, width=900)
ratio = img.shape[0] / float(resized.shape[0])
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
_, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# if you want cv2.contourArea >1, you can just comment line bellow
cnts = np.array(cnts)[[cv2.contourArea(c)>10 for c in cnts]]
grains = [np.int0(cv2.boxPoints(cv2.minAreaRect(c))) for c in cnts]
centroids =[(grain[2][1]-(grain[2][1]-grain[0][1])//2, grain[2][0]-(grain[2][0]-grain[0][0])//2) for grain in grains]
colors = [resized[centroid] for centroid in centroids]

Categories

Resources