How to determine which rectangle a point is in? - python

sorry if the title is unclear. Basically, I've written a program that tracks an object of a certain color as it moves around my webcam's FOV. As I move the object around, the computer places a red dot on the center of the object and moves the dot with the object. However, the object's location doesn't really mean anything yet. I want the frame to be divided into four equal parts and each part outputs a different number. For example, if the object (dot) is in quadrant one, I want the number 1 to appear on the frame. How would I do this? Can anyone nudge me in the right direction? I'm using OpenCV-Python and am grateful for any help.
Here is the code I have so far.
# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=32,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "orange"
# fish in the HSV color space
orangeLower = (5, 50, 50)
orangeUpper = (15, 255, 255)
# initialize the list of tracked points, the frame counter,
# and the coordinate deltas
pts = deque(maxlen=args["buffer"])
counter = 0
(dX, dY) = (0, 0)
direction = ""
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
vs = VideoStream(src=0).start()
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# allow the camera or video file to warm up
time.sleep(2.0)
# keep looping
while True:
# grab the current frame
frame = vs.read()
# handle the frame from VideoCapture or VideoStream
frame = frame[1] if args.get("video", False) else frame
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if frame is None:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the color "orange", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, orangeLower, orangeUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
pts.appendleft(center)
# loop over the set of tracked points
for i in np.arange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# check to see if enough points have been accumulated in
# the buffer
if counter >= 10 and i == 10 and pts[i-10] is not None:
# compute the difference between the x and y
# coordinates and re-initialize the direction
# text variables
dX = pts[i-10][0] - pts[i][0]
dY = pts[i-10][1] - pts[i][1]
(dirX, dirY) = ("", "")
# ensure there is significant movement in the
# x-direction
if np.abs(dX) > 20:
dirX = "East" if np.sign(dX) == 1 else "West"
# ensure there is significant movement in the
# y-direction
if np.abs(dY) > 20:
dirY = "South" if np.sign(dY) == 1 else "North"
# handle when both directions are non-empty
if dirX != "" and dirY != "":
direction = "{}-{}".format(dirY, dirX)
# otherwise, only one direction is non-empty
else:
direction = dirX if dirX != "" else dirY
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
# show the movement deltas and the direction of movement on
# the frame
cv2.putText(frame, direction, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 255), 3)
cv2.putText(frame, "dx: {}, dy: {}".format(dX, dY),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# show the frame to the screen and increment the frame counter
cv2.imshow("Frame", frame)
cv2.rectangle(img=frame, pt1=(0, 0), pt2=(300, 225), color=(0, 0, 0), thickness=3, lineType=8, shift=0)
cv2.rectangle(img=frame, pt1 = (300, 1), pt2 = (600, 225), color = (0, 0, 0), thickness = 3, lineType = 8, shift = 0)
cv2.rectangle(img=frame, pt1 = (0, 225), pt2 = (300, 550), color = (0, 0, 0), thickness = 3, lineType = 8, shift = 0)
cv2.rectangle(img=frame, pt1 = (300, 225), pt2 = (600, 550), color = (0, 0, 0), thickness = 3, lineType = 8, shift = 0)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
counter += 1
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# if we are not using a video file, stop the camera video stream
if not args.get("video", False):
vs.stop()
# otherwise, release the camera
else:
vs.release()
# close all windows
cv2.destroyAllWindows()
Here is an image of the frame I get when I run the code.
As you can see, there are lines dividing the image in fourths. These rectangles are where I want the outputs to be.

Related

Comparing continuous pairs of arrays

I currently have a program that finds the centroid of a hand. From this center point, 4 lines are drawn to the corners of the frame, top left, top right, bottom right, bottom left seen Here.
My overall goal is to execute a function when the length of these 4 lines are not exhibiting too much change. So far my first plan of action was to find the distance of each corner(4), to the centroid, I have been successful at this by utilizing the distance formula. I so far can see this data updating in real time because of a 1D array with 4 elements seen here. Now my thinking is, the way I will be able to find current change in the distances, is by subtracting the newer distance from the one before. The outputted difference, will then be evaluated using some type of threshold. My main question is how could I do this subtracting thing in which I am able to subtract the newer array from the previous one.
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
import math
import time, threading
import itertools
def cal_distance(center):
upper_right_distance = math.sqrt((math.pow(center[0] - 600, 2)) + (math.pow(center[1] - 0, 2)))
upper_left_distance = math.sqrt((math.pow(center[0] - 0, 2)) + (math.pow(center[1] - 0, 2)))
lower_left_distance = math.sqrt((math.pow(center[0] - 0, 2)) + (math.pow(center[1] - 600, 2)))
lower_right_distance = math.sqrt((math.pow(center[0] - 600, 2)) + (math.pow(center[1] - 600, 2)))
distances = [upper_left_distance, upper_left_distance, lower_left_distance, lower_right_distance]
return distances
skinLower = (0, 58, 50)
skinUpper = (30, 255, 255)
pts = deque(maxlen=2)
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
frame = frame[200 : 500, 550 : 850]
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, skinLower, skinUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
distance = None
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius > 10:
cv2.circle(frame, (int(x), int(y)), int(radius), (255, 255, 255), 2)
cv2.circle(frame, center, 5, (255, 0, 255), -1)
cv2.line(frame, center, (0, 0), (255, 0, 255), 3)
cv2.line(frame, center, (600, 0), (255, 0, 255), 3)
cv2.line(frame, center, (0, 600), (255, 0, 255), 3)
cv2.line(frame, center, (600, 600), (255, 0, 255), 3)
distance = cal_distance(center)
print("updated distance")
print(distance)
cv2.imshow('thing', mask)
cv2.imshow('Original', frame)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
You can do the task in following steps:
1) Set a boolean flag or any variable that checks weather its first frame( weather it is calculating the distances for the first time or not)
2) if first time then goto step 3 else go to step 4
3) Read the current frame-> find distances-> store in an array which is named(for e.g) "prevDistances" -> update the boolean flag -> break.
4) Read the current frame -> find distances -> store in an array which is named(for e.g) "currDistances"-> goto step 5.
5) Now you can compare the distance arrays (respective to each element in the array for e.g prevDistances[1]-currDistances[1], .... and check them with the respective thresholds). If the comparisons/subtractions crosses the threshold values then you can continue the desired functionality.

opencv, not sure what pts output means

I am a newbie using the pyimagesearch code for ball tracking using python 2.7 and opencv.
https://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/
I am trying to write the x,y coordinates of a tracked object into a .csv file. I am converting pts to a string and then writing to a .csv file. I get a set of numbers like this: (255 386) (266 399) Are these x,y coordinates? And if so, what do they mean in relation to the image?
#import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
import csv
#construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
#define the lower and upper boundaries of the "green"
#ball in the HSV color space, then initialize the
#list of tracked points
greenLower = (0, 0, 0)
greenUpper = (180, 255, 40)
pts = deque(maxlen=args["buffer"])
#if a video path was not supplied, grab the reference
#to the webcam
if not args.get("video", False):
vs = VideoStream(src=0).start()
#otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
#allow the camera or video file to warm up
time.sleep(2.0)
#keep looping
while True:
#grab the current frame
frame = vs.read()
#handle the frame from VideoCapture or VideoStream
frame = frame[1] if args.get("video", False) else frame
#if we are viewing a video and we did not grab a frame,
#then we have reached the end of the video
if frame is None:
break
#resize the frame, blur it, and convert it to the HSV
#color space
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
#construct a mask for the color "green", then perform
#a series of dilations and erosions to remove any small
#blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
#find contours in the mask and initialize the current
#(x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
center = None
#only proceed if at least one contour was found
if len(cnts) > 0:
#find the largest contour in the mask, then use
#it to compute the minimum enclosing circle and
#centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
#only proceed if the radius meets a minimum size
if radius > 10:
#draw the circle adn centroid on the frame,
#then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 225), -1)
#update the points queue
pts.appendleft(center)
#loop over the set of tracket points
for i in range(1, len(pts)):
#if either of the tracked points are None, ignore
#them
if pts[i - 1] is None or pts[i] is None:
continue
#otherwise, compute the thickness of the line and
#draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 225), thickness)
#write info to file
f = open("foo11.csv", "w+")
s = str(pts)
f.write(s)
f.close()
#show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
#if the 'q' key is press, stop the loop
if key == ord("q"):
break
#if we are not using a video file, stop the camera video stream
if not args.get("video", False):
vs.stop()
#otherwise, release the camera
else:
vs.release()
#close all windows
cv2.destroyAllWindows()
Consider the following grid to be an image:
This image is said to have a shape of (7x7). 7 pixels in height (along y) and 7 pixels in width (along x)). This image is thus said to have 49 pixels, which is the size of this image.
The origin (0, 0) is at the top-left corner. This is the top-leftmost pixel of the image.
Now as the centroid of the contour (ball) moves, it is present in one of these 49 pixels.
As a result the .txt file is storing these pixel coordinates in a tuple of (x, y).

No output from Kalman filter in 2D object tracking

I'm trying to apply Kalman filter with opencv in python for tracking position of a ball. I can already detect it but there is still some noise I want to eliminate. There are two variables I measure - x and y position - and there are four variables I would like to get - x and y position and x and y velocity - but I get none. When I display x0, y0, vy and vx on the screen I get "[.0]".
Another problem is that I cannot apply control matrix to kalman.predict() function because I get the following error:
OpenCV Error: Assertion failed (a_size.width == len) in gemm, file /tmp/opencv3-20170518-8732-1bjq2j7/opencv-3.2.0/modules/core/src/matmul.cpp, line 1537
Traceback (most recent call last):
File "kalman.py", line 128, in <module>
kalmanout = kalman.predict(kalman.controlMatrix)
cv2.error: /tmp/opencv3-20170518-8732-1bjq2j7/opencv-3.2.0/modules/core/src/matmul.cpp:1537: error: (-215) a_size.width == len in function ge
This is the piece of code I'm using for Kalman filter (for control matrix application I use line kalmanout = kalman.predict(kalman.controlMatrix) at the end:
# import the necessary packages
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=10,
help="max buffer size")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "blue"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (48, 62, 88)
greenUpper = (151, 238, 255)
pts = deque(maxlen=args["buffer"])
tintervals = deque(maxlen=args["buffer"])
tPrev = 0;
pRad = 0
mapix = 0
mspeed = 0
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
#initialize background subtraction
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
# grab the current frame
(grabbed, frame) = camera.read()
displayx = 0
# start counting time
tPrev = time.time()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# resize the frame and apply background subtraction
frame = imutils.resize(frame, width=500)
mask = fgbg.apply(frame)
res = cv2.bitwise_and(frame, frame, mask = mask)
# blur the frame and convert it to the HSV
blurred = cv2.GaussianBlur(res, (11, 11), 0)
hsv = cv2.cvtColor(res, cv2.COLOR_BGR2HSV)
# construct a mask for the color "blue", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
pRad = radius
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update time intervals queue
tintervals.appendleft(time.time() - tPrev)
# update the points queue
pts.appendleft(center)
# predict position of the ball
if (pRad > 0 and len(pts) > 5):
if pts[0] != None and pts[1] != None:
apix = 98.1/(0.032/pRad)
mapix = apix
y0 = pts[0][1]
x0 = pts[0][0]
kalmanin = np.array((2,1), np.float32) # measurement
kalmanout = np.zeros((4,1), np.float32) # tracked / prediction
kalmanin = np.array([[np.float32(x0)],[np.float32(y0)]])
tkalman = 0.01
kalman = cv2.KalmanFilter(4,2)
kalman.measurementMatrix = np.array([[1,0,0,0],[0,1,0,0]],np.float32)
kalman.transitionMatrix = np.array([[1,0,tkalman,0],[0,1,0,tkalman],[0,0,1,0],[0,0,0,1]],np.float32)
kalman.controlMatrix = np.array([[0],[0.5*(tkalman**2.0)], [0],[tkalman]],np.float32) * mapix
kalman.processNoiseCov = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],np.float32) * 0.03
kalman.processNoiseCov = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],np.float32) * 0.03
kalman.measurementNoiseCov = np.array([[1,0],[0,1]],np.float32) * 0.00009
kalman.correct(kalmanin)
kalmanout = kalman.predict(kalman.controlMatrix)
x0 = kalmanout[0]
y0 = kalmanout[1]
vx = kalmanout[2]
vy = kalmanout[3]
displayx = x0
listX = []
listY = []
for i in range(1, 11):
t = 0.01 * i
y = y0 + vy * t + (apix * (t ** 2)) / 2
x = x0 + vx * t
listX.append(int(x))
listY.append(int(y))
mspeed = vy
for i in range(0, 9):
cv2.line(frame, (listX[i], listY[i]), (listX[i+1], listY[i+1]), (255, 0, 0), 4)
# loop over the set of tracked points
for i in xrange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
cv2.putText(frame, "y axis speed: {}".format(displayx),
(120, frame.shape[0] - 70), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 0, 255), 1)
cv2.putText(frame, "radius in px: {}".format(pRad),
(120, frame.shape[0] - 30), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 0, 255), 1)
cv2.putText(frame, "apix: {}".format(mapix),
(120, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 0, 255), 1)
if (mapix != 0):
cv2.putText(frame, "radius in meters: {}".format((9.81*pRad)/mapix),
(120, frame.shape[0] - 50), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 0, 255), 1)
# shows x, y position, (newest input from pts)
cv2.putText(frame, "x, y: {}".format(pts[0]),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
First of all I would move the initialization of the Kalman filter outside the loop. The main issue with your code is that you have set the control matrix. If I understand your task you are only observing the system, not controlling it. Just skip the kalman.controlMatrix initialization or set it to a zero matrix. In the loop you then just use
kalmanout = kalman.predict()
kalman.correct(kalmanin)

OpenCV python - How to constantly update the time output and distance output?

I am using a Raspberry Pi B+ running Raspbian Wheezy and sporting a USB webcam. My goal is to measure the distance between an object and the camera in realtime.
Following a guide on how to do so with still images
This is the code I am currently running :
# import the necessary packages
import numpy as np
import cv2
import datetime
import time
def find_marker(frame):
# convert the image to grayscale, blur it, and detect edges
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 35, 125)
# find the contours in the edged image and keep the largest one;
# we'll assume that this is our piece of paper in the image
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
c = max(cnts, key = cv2.contourArea)
# compute the bounding box of the of the paper region and return it
return cv2.minAreaRect(c)
def distance_to_camera(knownWidth, focalLength, perWidth):
# compute and return the distance from the maker to the camera
return (knownWidth * focalLength) / perWidth
# initialize the known distance from the camera to the object, which
# in this case is 24 inches
KNOWN_DISTANCE = 11.811
# initialize the known object width, which in this case, the piece of
# paper is 11 inches wide
KNOWN_WIDTH = 2.3622
# initialize the list of images that we'll be using
#IMAGE_PATHS = ["images/2ft.png", "images/3ft.png", "images/4ft.png"]
# load the furst image that contains an object that is KNOWN TO BE 2 feet
# from our camera, then find the paper marker in the image, and initialize
# the focal length
#image = cv2.imread(IMAGE_PATHS[0])
#marker = find_marker(image)
#focalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH
cap = cv2.VideoCapture(0)
timestamp = datetime.datetime.now()
while(1):
(grabbed, frame) = cap.read()
marker = find_marker(frame)
# for () LOOP THIS TO GET DISTANCE CALCULATION FULLY WORKING!
focalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH
inches = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])
# draw a bounding box around the image and display it
box = np.int0(cv2.cv.BoxPoints(marker))
cv2.drawContours(frame, [box], -1, (0, 255, 0), 2)
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, "%.2fft" % (inches / 12),
(frame.shape[1] - 200, frame.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 255, 0), 3)
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
#Write to textfile here and send
# for () LOOP End
cv2.imshow("Frame",frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
# loop over the images
#for imagePath in IMAGE_PATHS:
# load the image, find the marker in the image, then compute the
# distance to the marker from the camera
# image = cv2.imread(imagePath)
# marker = find_marker(image)
# inches = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])
# draw a bounding box around the image and display it
# box = np.int0(cv2.cv.BoxPoints(marker))
# cv2.drawContours(image, [box], -1, (0, 255, 0), 2)
# cv2.putText(image, "%.2fft" % (inches / 12),
# (image.shape[1] - 200, image.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX,
# 2.0, (0, 255, 0), 3)
# cv2.imshow("image", image)
# cv2.waitKey(0)
Here is my output:
However, the time (the red text on the bottom left) and the distance detected don't change as the program runs. Is there any way of getting these two values to update until the program ends?
This is why both values are not being updated:
timestamp
timestamp is out of the while loop
timestamp = datetime.datetime.now()
while(1):
Should be:
while(1):
timestamp = datetime.datetime.now()
distance
distance_to_camera, with this parameters, will produce a constant output:
# assuming:
# a = KNOWN_WIDTH
# b = focalLength
# c = marker[1][0]
# d = KNOWN_DISTANCE
def distance_to_camera(x,y,z):
return (x*y)/z
b = c*d/a
inches = distance_to_camera(a,b,c) # => a*b/c
# inches = a*b/c, b = c*d/a
# inches = a*c*d/a*c
# inches = d << constant output
that is equal to KNOWN_DISTANCE. If you do the math: KNOWN_DISTANCE / 12 = 0.98425 is the distance you are getting
Edit:
I just read the tutorial, and looks like you should do the focalLenght calculation just once, out of the while.

Ball speed measuring with OpenCV in Python

I need to measure the ball speed of a ping pong ball when it is shot out of a ping pong ball shoot machine.
We decided to measure its speed with video motion tracking.
With Python and OpenCV we got to the point that we could track the ball.
The next step is to measure its speed. But we have no clue how to do it.
# import the necessary packages
from collections import deque
import numpy as np
import argparse
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (51, 60, 60)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# resize the frame, blur it, and convert it to the HSV
# color space
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()

Categories

Resources