I have an object I need to track. The object moves up and back mostly in a straight line, however, sometimes the object moves to the left. I need to track that left movement and convert the distance of the move to a measurement.
Image 1:
The object moves up and down the, however wanders to the left.
Image 2:
I was thinking to find an edge I wanted to track, then draw a line from that edge to a normal point. The line is measured in pixels and a real distance inferred.
My question is, how to track that edge circle (in my case is 1049, 390) when it moves up and back and still keep a right angle line for the measurement?
Note: in my case i've had to crop the image because the image quality is poor, many grainy lines. The cv.cornerHarris() method pics up those grainy lines as edges/contours, of which there are many.
Image 3:
Am I on the right track or need a different approach?
Below is a mod of some code I found on Stack. The distance part im not concerned about, just tracking the movement of the selected edge.
import cv2 as cv
import numpy as np
from scipy.spatial import distance as dist
image = cv.imread('image_mask.jpg')
object_width_known_a = .1445
object_width_known_b = .0849
object_width_known_x = .0849
object_height = 1
img_shape_x = image.shape[1]
img_shape_y = image.shape[0]
x_centre = round(img_shape_x/2)
y_centre = round(img_shape_y/2)
# Find Corners
def find_centroids(dst):
ret, dst = cv.threshold(dst, 0.02 * dst.max(), 255, 0)
dst = np.uint8(dst)
# find centroids
ret, labels, stats, centroids = cv.connectedComponentsWithStats(dst)
# define the criteria to stop and refine the corners
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 1)
corners = cv.cornerSubPix(gray, np.float32(centroids[1:]), (5, 5), (-1, -1), criteria)
return corners
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# gray = cv.GaussianBlur(gray, (3, 3), 1) # Gaussian filter with a 7 x 7 kernel,
gray = np.float32(gray)
dst = cv.cornerHarris(gray, 4, 3, 0.065)
# Get coordinates of the corners.
corners = find_centroids(dst)
for i in range(10, len(corners)):
print("Pixels found for this object are:", corners[i])
image[dst > 0.1 * dst.max()] = [0, 0, 255]
cv.circle(image, (int(corners[i, 0]), int(corners[i, 1])), 5, (0, 255, 0), 1)
for corner in corners:
image[int(corner[1]), int(corner[0])] = [0, 0, 255]
line_x = cv.line(image, (x_centre, 0), (x_centre, x_centre), (255, 0, 0), 1)
line_y = cv.line(image, (1049, 390), (x_centre, 390), (255, 0, 0), 1)
line_ang = cv.line(image, (1049, 390), (x_centre, x_centre), (0, 0, 255), 1)
d = (dist.euclidean((1049, 390), (x_centre, x_centre)))
y_px_dist = (dist.euclidean((1049, 390), (x_centre, 390)))
print(y_px_dist)
dist_to_edge = d / object_width_known_x
print(dist_to_edge, 'm')
a = (dist.euclidean((657, 540), (720, 540)))
b = (dist.euclidean((650, 400), (687, 400)))
x = (dist.euclidean((1049, 390), (x_centre, 390)))
object_width_a = a
object_width_b = b
object_width_x = x
pixels_per_metric_a = object_width_a / object_width_known_a # y pixels per 500mm
pixels_per_metric_b = object_width_b / object_width_known_b
pixels_per_metric_x = object_width_x / object_width_known_x
print(a, '=', pixels_per_metric_a, 'mm')
print(b, '=', pixels_per_metric_b, 'mm')
print(x, '=', pixels_per_metric_x, 'mm')
a = len(corners)
print("Number of corners found:", a)
# List to store pixel difference.
distance_pixel = []
# List to store mm distance.
distance_mm = []
P1 = corners[0]
P2 = corners[1]
P3 = corners[2]
P4 = corners[3]
P1P2 = cv.norm(P2 - P1)
P1P3 = cv.norm(P3 - P1)
P2P4 = cv.norm(P4 - P2)
P3P4 = cv.norm(P4 - P3)
pixelsPerMetric_width1 = P1P2 / object_width_a
pixelsPerMetric_width2 = P3P4 / object_width_a
pixelsPerMetric_height1 = P1P3 / object_height
pixelsPerMetric_height2 = P2P4 / object_height
# Average of PixelsPerMetric
pixelsPerMetric_avg = pixelsPerMetric_width1 + pixelsPerMetric_width2 + pixelsPerMetric_height1 + pixelsPerMetric_height2
pixelsPerMetric = pixelsPerMetric_avg / 4
# print(pixelsPerMetric)
P1P2_mm = P1P2 / pixelsPerMetric
P1P3_mm = P1P3 / pixelsPerMetric
P2P4_mm = P2P4 / pixelsPerMetric
P3P4_mm = P3P4 / pixelsPerMetric
distance_mm.append(P1P2_mm)
distance_mm.append(P1P3_mm)
distance_mm.append(P2P4_mm)
distance_mm.append(P3P4_mm)
distance_pixel.append(P1P2)
distance_pixel.append(P1P3)
distance_pixel.append(P2P4)
distance_pixel.append(P3P4)
# print(distance_pixel)
# print(distance_mm)
cv.imshow('image', image)
cv.waitKey(0)e
EDIT -------------------------------
Ive edited the question with another image. Ive overlaid the image with the coordinates of the same spot (manually, the left images same as 2nd from the left). As you can see, the coords change in each image as the object moves. I was thinking to add an ID to the cornerHarris() corner coords but then the question is, how to find the coords of that corner when it moves.
Image 4
Code for the new image:
image = cv.imread('./image_masks/image_mask.jpg')
object_height = 1
img_shape_x = image.shape[1]
img_shape_y = image.shape[0]
x_centre = round(img_shape_x / 2)
y_centre = round(img_shape_y / 2)
gray_img = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
corners = cv.goodFeaturesToTrack(gray_img, 50, 0.06, 10)
# kernel = np.ones((7,7), np.uint8)
# corners = cv.dilate(corners, kernel, iterations=2)
corners = np.int0(corners)
for i in corners:
x, y = i.ravel()
cv.circle(image, (x, y), 3, (255, 0, 0), -1)
print(x,y)
line_x = cv.line(image, (x_centre, 0), (x_centre, x_centre), (255, 0,
0), 1)
line_y2 = cv.line(image, (1028, 490), (x_centre, 390), (0, 255, 255),
1)
d2 = (dist.euclidean((1080, 593), (x_centre, x_centre)))
line_y5 = cv.line(image, (1050, 540), (x_centre, 390), (0, 255,
255), 1)
d5 = (dist.euclidean((1050, 540), (x_centre, x_centre)))
line_y3 = cv.line(image, (1071, 476), (x_centre, 390), (0, 255,
255), 1)
d3 = (dist.euclidean((1071, 476), (x_centre, x_centre)))
line_y6 = cv.line(image, (1071, 451), (x_centre, 390), (0, 255,
255), 1)
d6 = (dist.euclidean((1071, 451), (x_centre, x_centre)))
line_y4 = cv.line(image, (1047, 405), (x_centre, 390), (0, 255,
255), 1)
d4 = (dist.euclidean((1047, 405), (x_centre, x_centre)))
line_y = cv.line(image, (x_centre, 0), (x_centre, x_centre), (255,
0, 0), 1)
#print(d2, d5, d3, d6, d4)
cv.imshow('image', image)
cv.waitKey(0)nter code here
All the internet object tracking solutions assume that the object being track is very different from everything else in the frame, e.g. a bright red dot on a piece of paper. They do this to find the largest contours, a reference point, a reference object). Measuring distances between objects uses similar techniques. This assumption that is not mentioned (well, pyimagesearch.com does mention, way down the end of a blog that if the contours are too many, not distinct, than the technique will not work). Real world problems don't have bright red dots on a piece of paper for a reference.
The centroid tracking method, using indexing (I found), only work if the centroid is very close to the pervious, in the previous frame (a video). I my situation, using still images, and possibly slightly different pictures to pervious, the centroid technique did not work.
I my case, the solution was to use a combination of masking, thresholding and contours. This limited the number of contours found and framed the problem.
The lesson (an for anyone else), experimentation is the key, experimentation with as many techniques as possible. This is a solution or a combination of for your problem. Internet examples often shown a perfect scenario, they generally do not directly transfer to real world problems.
Experiment!
Related
Consider the image below:
I want to write an OpenCV program to calculate the distance (blue line) in pixels between the midpoint of the table (red dot) and the midpoint of the brown box (blue dot).
I figured using I would use cv2.findContours to find the boundaries of the table, the boundaries of the box, get the midpoint of the table, the midpoint of the box and then probably use dist.euclidean to calculate the distance between the box's midpoint and table's midpoint. However, I am stuck at this point:
My code (shown below) is drawing contours for the wires and the glare and I have no interest in them:
cv2.namedWindow("Object detector", cv2.WINDOW_NORMAL)
image = cv2.imread(PATH_TO_IMAGE)
cv2.resizeWindow('Object detector', 800, 600)
im_bw = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,2))
morphology_img = cv2.morphologyEx(im_bw, cv2.MORPH_OPEN, kernel,iterations=1)
edged = cv2.Canny(morphology_img, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
cnts= cv2.findContours(edged.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
(cnts, _) = contours.sort_contours(cnts)
cv2.drawContours(image, cnts, -1, (0,255,0), 3)
orig = image.copy()
cv2.imshow('Object detector', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
I even added the following piece of code to filter out contours of a certain size:
for (i, c) in enumerate(cnts):
# if the contour is not sufficiently large, ignore it
if cv2.contourArea(c) < 50000:
continue
else:
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
cv2.drawContours(image, [box.astype("int")], -1, (0, 255, 0), 1)
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
# compute the Euclidean distance between the midpoints
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
cv2.putText(image, "{:.1f}".format(dA),
(int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (255, 255, 255), 2)
cv2.putText(image, "{:.1f}".format(dB),
(int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (255, 255, 255), 2)
But even that didn't help.
How can I accomplish this task of calculating relative distance of the box from the center of the table?
im using the below code to detect concentric circles and measure distance at various angles. outer circle matrix shape is zero and im getting error - valueerror: operands could not be broadcast together with shapes (0,) (2,). please help to solve the error . image attached for you reference
import cv2
import numpy as np
import shapely.geometry as shapgeo
# Read image, and binarize
img = cv2.imread('/Users/n/Opencv/New_OpenCv/image.jpeg', cv2.IMREAD_GRAYSCALE)
img = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY)[1]
# Find (approximated) contours of inner and outer shape
cnts, hier = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
outer = [cv2.approxPolyDP(cnts[0], 0.1, True)]
inner = [cv2.approxPolyDP(cnts[2], 0.1, True)]
# Just for visualization purposes: Draw contours of inner and outer shape
h, w = img.shape[:2]
vis = np.zeros((h, w, 3), np.uint8)
cv2.drawContours(vis, outer, -1, (255, 0, 0), 1)
cv2.drawContours(vis, inner, -1, (0, 0, 255), 1)
# Squeeze contours for further processing
outer = np.vstack(outer).squeeze()
inner = np.vstack(inner).squeeze()
# Calculate centroid of inner contour
M = cv2.moments(inner)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
# Calculate maximum needed radius for later line intersections
r_max = np.min([cx, w - cx, cy, h - cy])
# Set up angles (in degrees)
angles = np.arange(0, 360, 4)
# Initialize distances
dists = np.zeros_like(angles)
# Prepare calculating the intersections using Shapely
poly_outer = shapgeo.asLineString(outer)
poly_inner = shapgeo.asLineString(inner)
# Iterate angles and calculate distances between inner and outer shape
for i, angle in enumerate(angles):
# Convert angle from degrees to radians
angle = angle / 180 * np.pi
# Calculate end points of line from centroid in angle's direction
x = np.cos(angle) * r_max + cx
y = np.sin(angle) * r_max + cy
points = [(cx, cy), (x, y)]
# Calculate intersections using Shapely
poly_line = shapgeo.LineString(points)
insec_outer = np.array(poly_outer.intersection(poly_line))
insec_inner = np.array(poly_inner.intersection(poly_line))
# Calculate distance between intersections using L2 norm
dists[i] = np.linalg.norm(insec_outer - insec_inner)
# Just for visualization purposes: Draw lines for some examples
if (i == 10) or (i == 40) or (i == 75):
# Line from centroid to end points
cv2.line(vis, (cx, cy), (int(x), int(y)), (128, 128, 128), 1)
# Line between both shapes
cv2.line(vis,
(int(insec_inner[0]), int(insec_inner[1])),
(int(insec_outer[0]), int(insec_outer[1])), (0, 255, 0), 2)
# Distance
cv2.putText(vis, str(dists[i]), (int(x), int(y)),
cv2.FONT_HERSHEY_COMPLEX, 0.75, (0, 255, 0), 2)
# Output angles and distances
print(np.vstack([angles, dists]).T)
print(poly_outer)
print(poly_inner)
print(poly_line)
print(insec_inner)
print(insec_outer)
# Just for visualization purposes: Output image
cv2.imshow('Output', vis)
cv2.waitKey(0)
cv2.destroyAllWindows()
I need to calculate major axis length,minor axis length and eccentricity.I am using fitEllipse() for calculation.But some object's major axis length gives higher value then height value.I couldn't find to how fix the problem.
Here is the code:
import cv2
import numpy as np
img = cv2.imread('Resources/son1.png')
#input and output image
def getContours(img,imgContour):
#Find contours and set contour retrivial mode
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#cnt is the one of the contours for every object in image
for cnt in contours:
# Contour for shape and I find area and length using this contour
cv2.drawContours(imgContour, cnt,-1, (0, 255, 0), 1)
BoundingBox(Rectangle):gives the bounding box parameters => (x,y,width,height)
x, y, w, h = cv2.boundingRect(cnt)
img = cv2.rectangle(imgContour,(x, y),(x+w, y+h),(0,255,0),2)
print("bounding_box(x,y,w,h):", x, y, w, h)
#W: and H: texts (Length)
cv2.putText(imgContour, "w={},h={}".format(w,h), (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0),1,16)
# give us specific contour (cnt) area
area = cv2.contourArea(cnt)
# Area text
cv2.putText(imgContour, "Area: " + str(int(area)), (x + w + 20, y + 45), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(255, 0, 0), 1, 16)
# # #Center of objects in picture
#
# M = cv2.moments(cnt)
# print(M)
# centx = int(M['m10'] / M['m00'])
# centy = int(M['m01'] / M['m00'])
#
# print(centx,centy)
MajorAxislength,MinorAxisLength,Eccentricity
(x,y), (minorAxisLength, majorAxisLength), angle = cv2.fitEllipse(cnt)
ellipse = cv2.fitEllipse(cnt)
cv2.ellipse(img,ellipse,(0, 0, 255), 2)
#semi-major and semi-minor
a = majorAxisLength / 2
b = minorAxisLength / 2
#Formula of eccentricity is :
Eccentricity = round(np.sqrt(pow(a, 2) - pow(b, 2))/a, 2)
x = int(x + w / 2) + 1
y = int(y + h / 2) + 1
cv2.putText(imgContour, 'Minor ='+str(round(minorAxisLength, 2)), (x+10, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1,16)
cv2.putText(imgContour, 'Major ='+str(round(majorAxisLength, 2)), (x+10, y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1, 16)
cv2.putText(imgContour, 'Eccentricity ='+str(round(Eccentricity, 3)), (x+10, y+40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1, 16)
imgContour = img.copy()
imgBlur = cv2.GaussianBlur(img, (7, 7),1)
imgGray = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2GRAY)
ImgThresh = cv2.threshold(imgGray, 100 , 255, cv2.THRESH_BINARY)[1]
imgCanny = cv2.Canny(imgGray,50,50)
getContours(imgCanny,imgContour)
cv2.imshow("Original Image", img)
cv2.imshow("Canny", imgCanny)
cv2.imshow("Contour", imgContour)
cv2.waitKey(0)
cv2.destroyAllWindows()
I added original image:
Here is the result image:
I have a binary image and I'm trying to contour elements that are prolonged (not circles).
If I try to contour one element - it contours with the correct colour. But how to do it when I have a for loop? It only shows the last element with the correct colour.
contour of last element
Here is my code:
import math
for i in range(len(contours)):
ctr = contours[i]
M = cv2.moments(ctr)
cX = M['m10']/M['m00']
cY = M['m01']/M['m00']
rgb = cv2.cvtColor(img_mask, cv2.COLOR_GRAY2RGB)
cv2.drawContours(rgb, contours, i, (255, 0, 0), 2)
rot_rect = cv2.minAreaRect(ctr)
box = np.int64(cv2.boxPoints(rot_rect))
xx1=(box[0,0]+box[1,0])/2
yy1=(box[0,1]+box[1,1])/2
xx2=(box[2,0]+box[1,0])/2
yy2=(box[2,1]+box[1,1])/2
distance1 = math.sqrt( ((xx1-cX)**2)+((yy1-cY)**2) )
distance2 = math.sqrt( ((xx2-cX)**2)+((yy2-cY)**2) )
if (distance1 < 0.5*distance2) or (0.5*distance1 > distance2):
cv2.drawContours(rgb, [box], -1, (0, 255, 0), 2)
plt.imshow(rgb)
else:
cv2.drawContours(rgb, [box], -1, (0, 0, 255), 2)
plt.imshow(rgb)
This helped me. I saved relevant contours to relevant arrays (note that they must be arrays and not lists, so we can use them in the contour drawing method).
the result of contouring specific elements in image
import math
ctr1= []
ctr2= []
for i in range(len(contours)):
ctr = contours[i]
M = cv2.moments(ctr)
cX = M['m10']/M['m00']
cY = M['m01']/M['m00']
rgb = cv2.cvtColor(img_mask, cv2.COLOR_GRAY2RGB)
rot_rect = cv2.minAreaRect(ctr)
box = np.int64(cv2.boxPoints(rot_rect))
xx1=(box[0,0]+box[1,0])/2
yy1=(box[0,1]+box[1,1])/2
xx2=(box[2,0]+box[1,0])/2
yy2=(box[2,1]+box[1,1])/2
distance1 = math.sqrt( ((xx1-cX)**2)+((yy1-cY)**2) )
distance2 = math.sqrt( ((xx2-cX)**2)+((yy2-cY)**2) )
if (distance1 < 0.5*distance2) or (0.5*distance1 > distance2):
ctr1.append(np.asarray(ctr, dtype=np.int32))
else:
ctr2.append(np.asarray(ctr, dtype=np.int32))
cv2.drawContours(rgb, ctr1, -1, (255, 0, 0),5)
cv2.drawContours(rgb, ctr2, -1, (0, 255, 0),2)
I currently have a program that finds the centroid of a hand. From this center point, 4 lines are drawn to the corners of the frame, top left, top right, bottom right, bottom left seen Here.
My overall goal is to execute a function when the length of these 4 lines are not exhibiting too much change. So far my first plan of action was to find the distance of each corner(4), to the centroid, I have been successful at this by utilizing the distance formula. I so far can see this data updating in real time because of a 1D array with 4 elements seen here. Now my thinking is, the way I will be able to find current change in the distances, is by subtracting the newer distance from the one before. The outputted difference, will then be evaluated using some type of threshold. My main question is how could I do this subtracting thing in which I am able to subtract the newer array from the previous one.
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
import math
import time, threading
import itertools
def cal_distance(center):
upper_right_distance = math.sqrt((math.pow(center[0] - 600, 2)) + (math.pow(center[1] - 0, 2)))
upper_left_distance = math.sqrt((math.pow(center[0] - 0, 2)) + (math.pow(center[1] - 0, 2)))
lower_left_distance = math.sqrt((math.pow(center[0] - 0, 2)) + (math.pow(center[1] - 600, 2)))
lower_right_distance = math.sqrt((math.pow(center[0] - 600, 2)) + (math.pow(center[1] - 600, 2)))
distances = [upper_left_distance, upper_left_distance, lower_left_distance, lower_right_distance]
return distances
skinLower = (0, 58, 50)
skinUpper = (30, 255, 255)
pts = deque(maxlen=2)
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
frame = frame[200 : 500, 550 : 850]
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, skinLower, skinUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
distance = None
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius > 10:
cv2.circle(frame, (int(x), int(y)), int(radius), (255, 255, 255), 2)
cv2.circle(frame, center, 5, (255, 0, 255), -1)
cv2.line(frame, center, (0, 0), (255, 0, 255), 3)
cv2.line(frame, center, (600, 0), (255, 0, 255), 3)
cv2.line(frame, center, (0, 600), (255, 0, 255), 3)
cv2.line(frame, center, (600, 600), (255, 0, 255), 3)
distance = cal_distance(center)
print("updated distance")
print(distance)
cv2.imshow('thing', mask)
cv2.imshow('Original', frame)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
You can do the task in following steps:
1) Set a boolean flag or any variable that checks weather its first frame( weather it is calculating the distances for the first time or not)
2) if first time then goto step 3 else go to step 4
3) Read the current frame-> find distances-> store in an array which is named(for e.g) "prevDistances" -> update the boolean flag -> break.
4) Read the current frame -> find distances -> store in an array which is named(for e.g) "currDistances"-> goto step 5.
5) Now you can compare the distance arrays (respective to each element in the array for e.g prevDistances[1]-currDistances[1], .... and check them with the respective thresholds). If the comparisons/subtractions crosses the threshold values then you can continue the desired functionality.