Detect markers in 2D Image [Python/OpenCV] - python

I would like to create a script in Python (with use OpenCV library) that determines which markers are in the picture. Markers look something like this:
Markers
After loading the image, the script should print which markers are in the picture (return numbers of markers). For example, if I load this picture:
Image with markers
For this image a script should return three numbers: 1, 2 and 3. I have a script, which load image and recognize figures (circles, squares etc. - in script below only squares) but I haven't any idea to recognise whole marker, which consists of several figures. Any ideas? Please for any suggestions about algorithm or any solution.
import numpy as np
import cv2
img = cv2.imread('B.jpg')
gray = cv2.imread('B.jpg',0)
ret,thresh = cv2.threshold(gray,120,255,1)
contours,h = cv2.findContours(thresh,1,2)
for cnt in contours:
approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
print len(approx)
if len(approx)==4:
print "square"
cv2.drawContours(img,[cnt],0,(0,0,255))
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
But obviously it isn't what I need it only surrounds rectangulars.
Thanks for any tips and help.

Firstly I try to use feature matching (proposition by #tfv user) but it probably isn't a good solution. In many approaches it gives too many matches (but images are completely different) or too few matches (when images are the same)
Now I will try use an algorithm by #Micka but I have a problem with second and third point. The code in my post above find squares. How do I save the new rectangles as new images?

Feature matching will perform poorly because these markers have very few features (you want clusters of corners and fine details for feature matching).
These markers lend themselves better for a Template Matching scheme. The correlation between the input image and the templates will be the highest for the correct template type (given that the template is properly positioned, oriented, and scaled). The following code works for your example. It includes a lot of required preprocessing but the essence is to find the highest correlation: np.correlate(img.flatten(), templ.flatten()). The code could be improved in many ways to make it more robust against variations in marker position, scale, orientation, noise etc.
import matplotlib.pyplot as plt
import numpy as np
import cv2
# Detect and store the 6 templates (markers)
fig0, axs0 = plt.subplots(2)
imgs = cv2.imread("markers.png")
imgs_gray = cv2.cvtColor(imgs, cv2.COLOR_BGR2GRAY)
ret, imgs_th = cv2.threshold(imgs_gray, 100, 255, cv2.THRESH_BINARY)
xywh = np.zeros((0, 4), dtype=np.int32)
contours, hierarchies = cv2.findContours(
imgs_th, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
for idx, c in enumerate(contours):
if hierarchies[0, idx, 3] == 0 and cv2.contourArea(c) > 20000:
x, y, w, h = cv2.boundingRect(c)
xywh = np.vstack((xywh, np.array([x, y, w, h])))
axs0[0].set_title("thresholded markers image")
axs0[0].imshow(imgs_th, cmap="gray")
sortx_xywh = xywh[np.argsort(xywh[:, 0])]
sortyx_xywh = sortx_xywh[np.argsort(sortx_xywh[:, 1])]
max_w = np.amax(sortyx_xywh[:, 2])
max_h = np.amax(sortyx_xywh[:, 3])
templates = np.zeros((max_h, max_w, 6))
for i, xy in enumerate(sortyx_xywh[:, 0:2]):
templates[:, :, i] = imgs_th[xy[1] : xy[1] + max_h, xy[0] : xy[0] + max_w]
# Detect the marker regions in the input image
img_in = cv2.imread("input.jpg")
img_gray = cv2.cvtColor(img_in, cv2.COLOR_BGR2GRAY)
ret, img_th = cv2.threshold(img_gray, 100, 255, cv2.THRESH_BINARY)
xywh = np.zeros((0, 4), dtype=np.int32)
contours, hierarchies = cv2.findContours(img_th, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for idx, c in enumerate(contours):
if hierarchies[0, idx, 3] == 0 and cv2.contourArea(c) > 20000:
x, y, w, h = cv2.boundingRect(c)
xywh = np.vstack((xywh, np.array([x, y, w, h])))
axs0[1].set_title("thresholded input image")
axs0[1].imshow(img_th, cmap="gray")
fig0.show()
# Use simplified template matching (correlation) to determine marker type and orientation
for xy in xywh[:, 0:2]:
fig1, axs1 = plt.subplots(5, 6)
img = img_th[xy[1] : xy[1] + max_h, xy[0] : xy[0] + max_w]
axs1[0, 0].imshow(img, cmap="gray")
axs1[0, 0].set_title("input image")
corr = np.zeros((4, 6))
for t in range(6): # 6 templates
templ = templates[:, :, t]
for o in range(4): # 4 orientations
corr[o, t] = np.correlate(img.flatten(), templ.flatten())
axs1[o + 1, t].imshow(templ, cmap="gray")
axs1[o + 1, t].set_title("corr = {:.2e}".format(corr[o, t]))
templ = np.rot90(templ)
rot, typ = np.unravel_index(np.argmax(corr, axis=None), corr.shape)
print("Input marker at ({},{}) is type {}, rotated {} degrees.".format(xy[0], xy[1], typ + 1, rot * 90))
fig1.tight_layout(pad=0.001)
fig1.show()

Related

Particle detection with Python OpenCV

I'm looking for a proper solution how to count particles and measure their sizes in this image:
In the end I have to obtain the lists of particles' coordinates and area squares. After some search on the internet I realized there are 3 approaches for particles detection:
blobs
Contours
connectedComponentsWithStats
Looking at different projects I assembled some code with the mix of it.
import pylab
import cv2
import numpy as np
Gaussian blurring and thresholding
original_image = cv2.imread(img_path)
img = original_image
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img, (5, 5), 0)
img = cv2.blur(img, (5, 5))
img = cv2.medianBlur(img, 5)
img = cv2.bilateralFilter(img, 6, 50, 50)
max_value = 255
adaptive_method = cv2.ADAPTIVE_THRESH_GAUSSIAN_C
threshold_type = cv2.THRESH_BINARY
block_size = 11
img_thresholded = cv2.adaptiveThreshold(img, max_value, adaptive_method, threshold_type, block_size, -3)
filter small objects
min_size = 4
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(img, connectivity=8)
sizes = stats[1:, -1]
nb_components = nb_components - 1
# for every component in the image, you keep it only if it's above min_size
for i in range(0, nb_components):
if sizes[i] < min_size:
img[output == i + 1] = 0
generation of Contours for filling holes and measurements. pos_list and size_list is what we were looking for
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
pos_list = []
size_list = []
for i in range(len(contours)):
area = cv2.contourArea(contours[i])
size_list.append(area)
(x, y), radius = cv2.minEnclosingCircle(contours[i])
pos_list.append((int(x), int(y)))
for the self-check, if we plot these coordinates over the original image
pts = np.array(pos_list)
pylab.figure(0)
pylab.imshow(original_image)
pylab.scatter(pts[:, 0], pts[:, 1], marker="x", color="green", s=5, linewidths=1)
pylab.show()
We might get something like the following:
And... I'm not really satisfied with the results. Some clearly visible particles are not included, on the other side, some doubt fluctuations of intensity have been counted. I'm playing now with different filters' settings, but the feeling is it's wrong.
If someone knows how to improve my solution, please share.
Since the particles are in white and the background in black, we can use Kmeans Color Quantization to segment the image into two groups with cluster=2. This will allow us to easily distinguish between particles and the background. Since the particles may be very tiny, we should try to avoid blurring, dilating, or any morphological operations which may alter the particle contours. Here's an approach:
Kmeans color quantization. We perform Kmeans with two clusters, grayscale, then Otsu's threshold to obtain a binary image.
Filter out super tiny noise. Next we find contours, remove tiny specs of noise using contour area filtering, and collect each particle (x, y) coordinate and its area. We remove tiny particles on the binary mask by "filling in" these contours to effectively erase them.
Apply mask onto original image. Now we bitwise-and the filtered mask onto the original image to highlight the particle clusters.
Kmeans with clusters=2
Result
Number of particles: 204
Average particle size: 30.537
Code
import cv2
import numpy as np
import pylab
# Kmeans
def kmeans_color_quantization(image, clusters=8, rounds=1):
h, w = image.shape[:2]
samples = np.zeros([h*w,3], dtype=np.float32)
count = 0
for x in range(h):
for y in range(w):
samples[count] = image[x][y]
count += 1
compactness, labels, centers = cv2.kmeans(samples,
clusters,
None,
(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.0001),
rounds,
cv2.KMEANS_RANDOM_CENTERS)
centers = np.uint8(centers)
res = centers[labels.flatten()]
return res.reshape((image.shape))
# Load image
image = cv2.imread('1.png')
original = image.copy()
# Perform kmeans color segmentation, grayscale, Otsu's threshold
kmeans = kmeans_color_quantization(image, clusters=2)
gray = cv2.cvtColor(kmeans, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find contours, remove tiny specs using contour area filtering, gather points
points_list = []
size_list = []
cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
AREA_THRESHOLD = 2
for c in cnts:
area = cv2.contourArea(c)
if area < AREA_THRESHOLD:
cv2.drawContours(thresh, [c], -1, 0, -1)
else:
(x, y), radius = cv2.minEnclosingCircle(c)
points_list.append((int(x), int(y)))
size_list.append(area)
# Apply mask onto original image
result = cv2.bitwise_and(original, original, mask=thresh)
result[thresh==255] = (36,255,12)
# Overlay on original
original[thresh==255] = (36,255,12)
print("Number of particles: {}".format(len(points_list)))
print("Average particle size: {:.3f}".format(sum(size_list)/len(size_list)))
# Display
cv2.imshow('kmeans', kmeans)
cv2.imshow('original', original)
cv2.imshow('thresh', thresh)
cv2.imshow('result', result)
cv2.waitKey()

Pytesseract-OCR wrong japanese detection

I installed Pystesseract 5.0 from Here and marked the Japanese language during installation.
It is not correctly detecting Japanese letters.
Note: the same code is detecting English characters alright.
Here is my code :
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
try:
from PIL import Image
except ImportError:
import Image
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
#################################################################################################
# Read your file
file = 'removed.png'
img = cv2.imread(file, 0)
img.shape
# thresholding the image to a binary image
thresh, img_bin = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# inverting the image
img_bin = 255 - img_bin
# Plotting the image to see the output
plotting = plt.imshow(img_bin, cmap='gray')
plt.show()
# Define a kernel to detect rectangular boxes
# Length(width) of kernel as 100th of total width
kernel_len = np.array(img).shape[1] // 100
# Defining a vertical kernel to detect all vertical lines of image
ver_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_len))
# Defining a horizontal kernel to detect all horizontal lines of image
hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_len, 1))
# A kernel of 2x2
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
#### Vertical LINES ####
# Use vertical kernel to detect and save the vertical lines in a jpg
image_1 = cv2.erode(img_bin, ver_kernel, iterations=5)
vertical_lines = cv2.dilate(image_1, ver_kernel, iterations=45)
# Plot the generated image
plotting = plt.imshow(image_1, cmap='gray')
plt.show()
#### HORTIZONAL LINES ####
# Use horizontal kernel to detect and save the horizontal lines in a jpg
image_2 = cv2.erode(img_bin, hor_kernel, iterations=5)
horizontal_lines = cv2.dilate(image_2, hor_kernel, iterations=45)
# Plot the generated image
plotting = plt.imshow(image_2, cmap='gray')
plt.show()
# Combining both H and V
# Combine horizontal and vertical lines in a new third image, with both having same weight.
img_vh = cv2.addWeighted(vertical_lines, 0.5, horizontal_lines, 0.5, 0.0)
# Eroding and thesholding the image
img_vh = cv2.erode(~img_vh, kernel, iterations=1)
thresh, img_vh = cv2.threshold(img_vh, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
plotting = plt.imshow(img_vh, cmap='gray')
plt.show()
##
bitxor = cv2.bitwise_xor(img, img_vh)
bitnot = cv2.bitwise_not(bitxor)
# Plotting the generated image
plotting = plt.imshow(bitnot, cmap='gray')
plt.show()
# Detect contours for following box detection
contours, hierarchy = cv2.findContours(img_vh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b: b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
# Sort all the contours by top to bottom.
contours, boundingBoxes = sort_contours(contours, method="bottom-to-top")
################
# Creating a list of heights for all detected boxes
heights = [boundingBoxes[i][3] for i in range(len(boundingBoxes))]
# Get mean of heights
mean = np.mean(heights)
# Create list box to store all boxes in
box = []
# Get position (x,y), width and height for every contour and show the contour on image
for c in contours:
x, y, w, h = cv2.boundingRect(c)
if (w > 110 and h < 85):
image = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
box.append([x, y, w, h])
plotting = plt.imshow(image, cmap="cividis")
plt.show()
# Creating two lists to define row and column in which cell is located
row = []
column = []
j = 0
# Sorting the boxes to their respective row and column
for i in range(len(box)):
if (i == 0):
column.append(box[i])
previous = box[i]
else:
if (box[i][1] <= previous[1] + mean / 2):
column.append(box[i])
previous = box[i]
if (i == len(box) - 1):
row.append(column)
else:
row.append(column)
column = []
previous = box[i]
column.append(box[i])
print("Columns:", column)
print("Rows: ", row)
# calculating maximum number of cells
countcol = 0
for i in range(len(row)):
countcol = len(row[i])
if countcol > countcol:
countcol = countcol
print("Max cells: ", countcol)
# Retrieving the center of each column
center = [int(row[i][j][0] + row[i][j][2] / 2) for j in range(len(row[i])) if row[0]]
center = np.array(center)
center.sort()
# Regarding the distance to the columns center, the boxes are arranged in respective order
finalboxes = []
for i in range(len(row)):
lis = []
for k in range(countcol):
lis.append([])
for j in range(len(row[i])):
diff = abs(center - (row[i][j][0] + row[i][j][2] / 4))
minimum = min(diff)
indexing = list(diff).index(minimum)
lis[indexing].append(row[i][j])
finalboxes.append(lis)
# from every single image-based cell/box the strings are extracted via pytesseract and stored in a list
outer = []
for i in range(len(finalboxes)):
for j in range(len(finalboxes[i])):
inner = ''
if (len(finalboxes[i][j]) == 0):
outer.append(' ')
else:
for k in range(len(finalboxes[i][j])):
y, x, w, h = finalboxes[i][j][k][0], finalboxes[i][j][k][1], finalboxes[i][j][k][2], \
finalboxes[i][j][k][3]
finalimg = bitnot[x:x + h, y:y + w]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 1))
border = cv2.copyMakeBorder(finalimg, 2, 2, 2, 2, cv2.BORDER_CONSTANT, value=[255, 255])
resizing = cv2.resize(border, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
dilation = cv2.dilate(resizing, kernel, iterations=1)
erosion = cv2.erode(dilation, kernel, iterations=1)
out = pytesseract.image_to_string(erosion)
if (len(out) == 0):
out = pytesseract.image_to_string(erosion, config='--psm 3')
inner = inner + " " + out
outer.append(inner)
##Printing all detected cells
for element in outer:
if len(element) > 10:
for line in element.splitlines():
if len(line)>2:
print("##", line.strip())
And here is the output for the last segment of code:
## LotNo. |
## fo
## 900-1829-B
## 2021/09/17
## 1, 000
## Wes FE —Cu-ESEIRNI
## LCH2
## 4X17. 05
## TYYY GISA-5AIB
## /448642-2 B
As you can see it is finding difficulty detecting japanese letters
tesseract as default uses only English and you may have to set other language(s) as parameter.
At console you can test it as
tesseract.exe image.png output.txt -l jpn
or even with many languages
tesseract.exe image.png output.txt -l jpn+eng
(instead of output.txt you can use - to display text directly in console)
In code it can be
pytesseract.image_to_string('image.png', config='-l jpn')
pytesseract.image_to_string('image.png', config='-l jpn+eng')
or
pytesseract.image_to_string('image.png', lang='jpn')
pytesseract.image_to_string('image.png', lang='jpn+eng')

Error Finding corner points of a rectangle contour with cv.boundingRect

I'm trying to extract the corner points of a rectangular section containing bubbles from an OMR sheet so I can later use those points to warpPerspective to get bird's eye view on that section but I am not getting expected results.
Following is the OMR sheet image :- OMRsheet.jpg
Code :-
import cv2
import numpy as np
def extract_rect(contours): #Function to extract rectangular contours above a certain area unit
rect_contours = []
for c in contours:
if cv2.contourArea(c) > 10000:
perimeter = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02*perimeter, True) #approximates a curve or a polygon with another curve/polygon with less vertices so that the distance between them is less or equal to the specified precision. Uses Douglas-Peucker algorithm
if len(approx) == 4:
rect_contours.append(c)
rect_contours = sorted(rect_contours, key=cv2.contourArea,reverse=True) # Sorting the contours based on area from large to small
return rect_contours
def rect_points(rect_contour): #Function to find corner points of the contour passed #Something wrong with this Not giving expected results. Messing up the warping of the image
perimeter = cv2.arcLength(rect_contour, True)
approx = cv2.approxPolyDP(rect_contour, 0.02*perimeter, True)
print("APPROX")
print(type(approx))
print(approx)
cv2.drawContours(img, approx, -1, (100,10,55), 18) #Rechecking if cotour passed to this function is the correct one
cv2.drawContours(img, rect_contour, -1, (100,10,55), 1)
x, y, w, h = cv2.boundingRect(rect_contour) #I Suspect Logical error in this line as it returns corner points for the outer rectangle instead of the contour passed to it
print("printing x y w h")
print(x, y, w, h)
# Corner points of the rectangle further used to be used to warp the rectangular section
point_1 = np.array([x, y])
point_2 = np.array([x+w, y])
point_3 = np.array([x, y+h])
point_4 = np.array([w, h])
corner_list = np.ndarray(shape=(4,2), dtype=np.int32)
np.append(corner_list, point_1)
np.append(corner_list, point_2)
np.append(corner_list, point_3)
np.append(corner_list, point_4)
print("corners list")
print(corner_list)
myPointsNew = np.zeros((4, 1, 2), np.int32)
add = corner_list.sum(1)
# print(add)
# print(np.argmax(add))
myPointsNew[0] = corner_list[np.argmin(add)] #[0,0] #Setting up points in a coordinate system
myPointsNew[3] = corner_list[np.argmax(add)] #[w,h]
diff = np.diff(corner_list, axis=1)
myPointsNew[1] = corner_list[np.argmin(diff)] #[w,0]
myPointsNew[2] = corner_list[np.argmax(diff)] #[h,0]
print("mypointsnew")
print(myPointsNew.shape)
return myPointsNew
img_path = 'OMRsheet.jpg'
img = cv2.imread(img_path)
img_width = 700
img_height = 700
img = cv2.resize(img, (img_width, img_height), interpolation=cv2.INTER_AREA)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_blur = cv2.GaussianBlur(img_gray, (5,5), 0) # blurred image
img_canny = cv2.Canny(img_blur, 20, 110) # Edge detection on processed image using Canny edge detection , binary thresholding could have been an alternative (i.e If the pixel value is smaller than the threshold, it is set to 0, otherwise it is set to a maximum value. )
contours, heirarchy = cv2.findContours(img_canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) # Find contours.
#parameters are (input_image, retrieval_mode, approximation_method)
img_contours = img.copy()
cv2.drawContours(img_contours, contours, -1, (0,255,0), 1) #parameters are (image, contours, countour_idx, contour_color, contour_thickness) . contour_idx is -1 for all contours
cv2.imshow('Contours', img_contours)
rect_contours = extract_rect(contours)
cv2.drawContours(img, rect_contours[1], -1, (0,255,0), 1)
rect_2 = rect_points(rect_contours[1])
cv2.drawContours(img, rect_2, -1, (0,0,255), 12)
warp_img_width = int(img_width/1.2)
warp_img_height = int(img_height/1.2)
warp_from = np.float32(rect_2)
warp_to = np.float32([[0,0], [warp_img_width, 0], [0, warp_img_height], [warp_img_width, warp_img_height]])
transformation_matrix = cv2.getPerspectiveTransform(warp_from, warp_to)
img_warp = cv2.warpPerspective(img, transformation_matrix, (warp_img_height, warp_img_height))
cv2.imshow('Warped Perspective', img_warp)
cv2.imshow('Original', img)
cv2.waitKey(0)
Output for cv2.imshow('Original', img) :- OMRsheet_contours.jpg
Output for cv2.imshow('Warped Perspective', img_warp) :-Bird's Eye perspective.jpg
EXPECTED Output for cv2.imshow('Warped Perspective', img_warp) :- Expected Bird's eye.jpg
Instead of getting warped perspective of the section containing only bubbles I am getting warped perspective for the whole paper which means either the points returned by rect_points function or the contour passed to the function i.e rect_contours[1] must have a mistake. The latter seemed to be fine as suggested after drawing contour lines for the contour passed to rect_points function. I suspect x, y, w, h = cv2.boundingRect(rect_contour) is returning incorrect points.
Any idea on how I could solve this problem and get the Expected Bird's eye.jpg ?

Python: Sorting items from top left to bottom right with OpenCV

How can I go about trying to order the items of a picture from top left to bottom right, such as in the image below? Currently receiving this error with the following code .
Error:
a = sorted(keypoints, key=lambda p: (p[0]) + (p1))[0] # find upper left point
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
This question is modelled from this: Ordering coordinates from top left to bottom right
def preprocess(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_blur = cv2.GaussianBlur(img_gray, (5, 5), 1)
img_canny = cv2.Canny(img_blur, 50, 50)
kernel = np.ones((3, 3))
img_dilate = cv2.dilate(img_canny, kernel, iterations=2)
img_erode = cv2.erode(img_dilate, kernel, iterations=1)
return img_erode
image_final = preprocess(picture_example.png)
keypoints, hierarchy = cv2.findContours(image_final, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
points = []
while len(keypoints) > 0:
a = sorted(keypoints, key=lambda p: (p[0]) + (p[1]))[0] # find upper left point
b = sorted(keypoints, key=lambda p: (p[0]) - (p[1]))[-1] # find upper right point
cv2.line(image_final, (int(a.pt[0]), int(a.pt[1])), (int(b.pt[0]), int(b.pt[1])), (255, 0, 0), 1)
# convert opencv keypoint to numpy 3d point
a = np.array([a.pt[0], a.pt[1], 0])
b = np.array([b.pt[0], b.pt[1], 0])
row_points = []
remaining_points = []
for k in keypoints:
p = np.array([k.pt[0], k.pt[1], 0])
d = k.size # diameter of the keypoint (might be a theshold)
dist = np.linalg.norm(np.cross(np.subtract(p, a), np.subtract(b, a))) / np.linalg.norm(b) # distance between keypoint and line a->b
if d/2 > dist:
row_points.append(k)
else:
remaining_points.append(k)
points.extend(sorted(row_points, key=lambda h: h.pt[0]))
keypoints= remaining_points
New Picture:
Reference Ordering Picture:
Will use center of mass to determine center point ordering.
The resulting numbering depends on how many rows you want there to be. With the program I will show you how to make, you can specify the number of rows before you run the program.
For example, here is the original image:
Here is the numbered image when you specify 4 rows:
Here is the numbered image when you specify 6 rows:
For the other image you provided (with its frame cropped so the frame won't be detected as a shape), you can see there will be 4 rows, so putting 4 into the program will give you:
Let's have a look at the workflow considering 4 rows. The concept I used is to divide the image into 4 segments along the y axis, forming 4 rows. For each segment of the image, find every shape that has its center in that segment. Finally, order the shapes in each segment by their x coordinate.
Import the necessary libraries:
import cv2
import numpy as np
Define a function that will take in an image input and return the image processed to something that will allow python to later retrieve their contours:
def process_img(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_canny = cv2.Canny(img_gray, 100, 100)
kernel = np.ones((2, 3))
img_dilate = cv2.dilate(img_canny, kernel, iterations=1)
img_erode = cv2.erode(img_dilate, kernel, iterations=1)
return img_erode
Define a function that will return the center of a contour:
def get_centeroid(cnt):
length = len(cnt)
sum_x = np.sum(cnt[..., 0])
sum_y = np.sum(cnt[..., 1])
return int(sum_x / length), int(sum_y / length)
Define a function that will take in a processed image and return the center points of the shapes found in the image:
def get_centers(img):
contours, hierarchies = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
if cv2.contourArea(cnt) > 100:
yield get_centeroid(cnt)
Define a function that will take in an image array, img, an array of coordinates, centers, the number of segments for the image, row_amt, and the height of each segment, row_h, as input. It will return row_amt arrays (sorted by their x coordinates), each containing every point in centers that lies in its corresponding row of the image:
def get_rows(img, centers, row_amt, row_h):
centers = np.array(centers)
d = row_h / row_amt
for i in range(row_amt):
f = centers[:, 1] - d * i
a = centers[(f < d) & (f > 0)]
yield a[a.argsort(0)[:, 0]]
Read in the image, get its processed form using the processed function defined, and get the center of each shape in the image using the centers function defined:
img = cv2.imread("shapes.png")
img_processed = process_img(img)
centers = list(get_centers(img_processed))
Get the height of the image to use for the get_rows function defined, and define a count variable, count, to keep track of the numbering:
h, w, c = img.shape
count = 0
Loop through the centers of the shape divided into 4 rows, drawing the line that connects the rows for visualization:
for row in get_rows(img, centers, 4, h):
cv2.polylines(img, [row], False, (255, 0, 255), 2)
for x, y in row:
Add to the count variable, and draw the count onto the specific location on the image from the row array:
count += 1
cv2.circle(img, (x, y), 10, (0, 0, 255), -1)
cv2.putText(img, str(count), (x - 10, y + 5), 1, cv2.FONT_HERSHEY_PLAIN, (0, 255, 255), 2)
Finally, show the image:
cv2.imshow("Ordered", img)
cv2.waitKey(0)
Altogether:
import cv2
import numpy as np
def process_img(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_canny = cv2.Canny(img_gray, 100, 100)
kernel = np.ones((2, 3))
img_dilate = cv2.dilate(img_canny, kernel, iterations=1)
img_erode = cv2.erode(img_dilate, kernel, iterations=1)
return img_erode
def get_centeroid(cnt):
length = len(cnt)
sum_x = np.sum(cnt[..., 0])
sum_y = np.sum(cnt[..., 1])
return int(sum_x / length), int(sum_y / length)
def get_centers(img):
contours, hierarchies = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
if cv2.contourArea(cnt) > 100:
yield get_centeroid(cnt)
def get_rows(img, centers, row_amt, row_h):
centers = np.array(centers)
d = row_h / row_amt
for i in range(row_amt):
f = centers[:, 1] - d * i
a = centers[(f < d) & (f > 0)]
yield a[a.argsort(0)[:, 0]]
img = cv2.imread("shapes.png")
img_processed = process_img(img)
centers = list(get_centers(img_processed))
h, w, c = img.shape
count = 0
for row in get_rows(img, centers, 4, h):
cv2.polylines(img, [row], False, (255, 0, 255), 2)
for x, y in row:
count += 1
cv2.circle(img, (x, y), 10, (0, 0, 255), -1)
cv2.putText(img, str(count), (x - 10, y + 5), 1, cv2.FONT_HERSHEY_PLAIN, (0, 255, 255), 2)
cv2.imshow("Ordered", img)
cv2.waitKey(0)
This is not completly the same task as in your linked question you took the code from:
You have contours, while the other question has points.
You have to come up with a method to sort contours (they might overlap in one dimension and so on...). There are multiple ways to do that, depending on your use case. The easiest might be to use the center of mass of your contour. This can be done like here: Center of mass in contour (Python, OpenCV). Then you can make an array of objects out of it, that contain points and use the code that you found.
The code that you found assumes that the points are basically more or less on a grid. So all the points 1-5 on your reference image are roughly on a line. In the new picture you posted, this is not really the case. It might be better to go for a clustering approach here: Cluster the center points y coordinates with some aproach (maybe one from here). Then for each cluster: sort the elements by there centers x coordinate.
As I already said there are multiple ways in doing that and it depends hardly on your use case.

Getting the co-ordinates of the highest point of the contour

After extracting the contour of the given image, I actually want to plot the highest on the contour indicated by a red circle in the image.
Image is:
My idea was to iterate through all the contour coordinates (x and y) and then select the highest y coordinate and corresponding x coordinate and plot it.
But I am not sure whether this is the correct approach.
Code to plot all the points:
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import cv2
font = cv2.FONT_HERSHEY_COMPLEX
img2 = cv2.imread('Feature Extraction/images/Contour Detection Results/Result 2/Final_Image_with_Contour.jpg', cv2.IMREAD_COLOR)
# Reading same image in another
# variable and converting to gray scale.
img = cv2.imread('Feature Extraction/images/Contour Detection Results/Result 2/Final_Image_with_Contour.jpg', cv2.IMREAD_GRAYSCALE)
# Converting image to a binary image
# ( black and white only image).
_, threshold = cv2.threshold(img, 110, 255, cv2.THRESH_BINARY)
# Detecting contours in image.
contours, _= cv2.findContours(threshold, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# Going through every contours found in the image.
for cnt in contours :
approx = cv2.approxPolyDP(cnt, 0.009 * cv2.arcLength(cnt, True), True)
# draws boundary of contours.
cv2.drawContours(img2, [approx], 0, (0, 0, 255), 1)
# Used to flatted the array containing
# the co-ordinates of the vertices.
n = approx.ravel()
print(n)
i = 0
for j in n :
if(i % 2 == 0):
x = n[i]
y = n[i + 1]
# String containing the co-ordinates.
string = str(x) + " " + str(y)
if(i == 0):
# text on topmost co-ordinate.
cv2.putText(img2, "Arrow tip", (x, y),
font, 0.1, (255, 0, 0))
else:
# text on remaining co-ordinates.
cv2.putText(img2, string, (x, y),
font, 0.1, (0, 255, 0))
i = i + 1
Showing the final image.
cv2.imshow('image2', img2)
# Exiting the window if 'q' is pressed on the keyboard.
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows()
Then I tried to make the coordinates of X and Y coordinates and append it to a list like:
xList = [x[0][0][0] for x in cnts]
yList = [y[0][0][1] for y in cnts]
When I try to plot this, I see nothing.
How do I modify the code to find the desired point? I don't really understand what I am missing.
Any help is appreciated.
Thank You.
Edit 1:
Red Line indicates approximate curve which fits the contour.
To get the x and y coordinate, you need to use
xList = [x[0][0] for x in cnts]
yList = [y[0][1] for y in cnts]
x[0] gives you the tuple containing the x, y coordinates of the point and x[0][0] gives you the x coordinate, and x[0][1] gives you the y coordinate.
If you want the 'topmost' coordinate that you've circled, you'll need to do a lot more noise cleanup. Currently, the topmost point is going to the be the artifacts at the top edge of the image. A few morphological operations like cv2.dilate() and cv2.erode() could have helped but the target edge is quite thin, so it may distort the target edge..
The red lines show the contours that have been detected. Notice that the edge of the image has been selected as a contour because there are non-zero pixels on the edge of the image. You'll need to remove that noise to detect your desired top-most point. There's a lot of work to be done to get to your desired result.
Here's the code
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import cv2
font = cv2.FONT_HERSHEY_COMPLEX
img2 = cv2.imread(r"/path/to/image", cv2.IMREAD_COLOR)
# Reading same image in another
# variable and converting to gray scale.
img = cv2.imread(r"/path/to/image", cv2.IMREAD_GRAYSCALE)
# Converting image to a binary image
# ( black and white only image).
_, threshold = cv2.threshold(img, 110, 255, cv2.THRESH_BINARY)
# Detecting contours in image.
contours, _= cv2.findContours(threshold, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# Going through every contours found in the image.
for cnt in contours :
approx = cv2.approxPolyDP(cnt, 0.009 * cv2.arcLength(cnt, True), True)
# draws boundary of contours.
cv2.drawContours(img2, [approx], 0, (0, 0, 255), 2)
# Used to flatted the array containing
# the co-ordinates of the vertices.
n = approx.ravel()
print(n)
i = 0
for j in n :
if(i % 2 == 0):
x = n[i]
y = n[i + 1]
# String containing the co-ordinates.
string = str(x) + " " + str(y)
# if(i == 0):
# # text on topmost co-ordinate.
# cv2.putText(img2, "Arrow tip", (x, y),
# font, 1, (255, 0, 0))
# else:
# # text on remaining co-ordinates.
# cv2.putText(img2, string, (x, y),
# font, 1, (0, 255, 0))
i = i + 1
xList = [x[0][0] for x in cnt]
yList = [y[0][1] for y in cnt]
print("x", xList)
print("y", yList)
# Showing the final image.
cv2.imwrite('image2.png', img2)

Categories

Resources