opencv, not sure what pts output means - python

I am a newbie using the pyimagesearch code for ball tracking using python 2.7 and opencv.
https://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/
I am trying to write the x,y coordinates of a tracked object into a .csv file. I am converting pts to a string and then writing to a .csv file. I get a set of numbers like this: (255 386) (266 399) Are these x,y coordinates? And if so, what do they mean in relation to the image?
#import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
import csv
#construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
#define the lower and upper boundaries of the "green"
#ball in the HSV color space, then initialize the
#list of tracked points
greenLower = (0, 0, 0)
greenUpper = (180, 255, 40)
pts = deque(maxlen=args["buffer"])
#if a video path was not supplied, grab the reference
#to the webcam
if not args.get("video", False):
vs = VideoStream(src=0).start()
#otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
#allow the camera or video file to warm up
time.sleep(2.0)
#keep looping
while True:
#grab the current frame
frame = vs.read()
#handle the frame from VideoCapture or VideoStream
frame = frame[1] if args.get("video", False) else frame
#if we are viewing a video and we did not grab a frame,
#then we have reached the end of the video
if frame is None:
break
#resize the frame, blur it, and convert it to the HSV
#color space
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
#construct a mask for the color "green", then perform
#a series of dilations and erosions to remove any small
#blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
#find contours in the mask and initialize the current
#(x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
center = None
#only proceed if at least one contour was found
if len(cnts) > 0:
#find the largest contour in the mask, then use
#it to compute the minimum enclosing circle and
#centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
#only proceed if the radius meets a minimum size
if radius > 10:
#draw the circle adn centroid on the frame,
#then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 225), -1)
#update the points queue
pts.appendleft(center)
#loop over the set of tracket points
for i in range(1, len(pts)):
#if either of the tracked points are None, ignore
#them
if pts[i - 1] is None or pts[i] is None:
continue
#otherwise, compute the thickness of the line and
#draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 225), thickness)
#write info to file
f = open("foo11.csv", "w+")
s = str(pts)
f.write(s)
f.close()
#show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
#if the 'q' key is press, stop the loop
if key == ord("q"):
break
#if we are not using a video file, stop the camera video stream
if not args.get("video", False):
vs.stop()
#otherwise, release the camera
else:
vs.release()
#close all windows
cv2.destroyAllWindows()

Consider the following grid to be an image:
This image is said to have a shape of (7x7). 7 pixels in height (along y) and 7 pixels in width (along x)). This image is thus said to have 49 pixels, which is the size of this image.
The origin (0, 0) is at the top-left corner. This is the top-leftmost pixel of the image.
Now as the centroid of the contour (ball) moves, it is present in one of these 49 pixels.
As a result the .txt file is storing these pixel coordinates in a tuple of (x, y).

Related

How to determine which rectangle a point is in?

sorry if the title is unclear. Basically, I've written a program that tracks an object of a certain color as it moves around my webcam's FOV. As I move the object around, the computer places a red dot on the center of the object and moves the dot with the object. However, the object's location doesn't really mean anything yet. I want the frame to be divided into four equal parts and each part outputs a different number. For example, if the object (dot) is in quadrant one, I want the number 1 to appear on the frame. How would I do this? Can anyone nudge me in the right direction? I'm using OpenCV-Python and am grateful for any help.
Here is the code I have so far.
# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=32,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "orange"
# fish in the HSV color space
orangeLower = (5, 50, 50)
orangeUpper = (15, 255, 255)
# initialize the list of tracked points, the frame counter,
# and the coordinate deltas
pts = deque(maxlen=args["buffer"])
counter = 0
(dX, dY) = (0, 0)
direction = ""
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
vs = VideoStream(src=0).start()
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# allow the camera or video file to warm up
time.sleep(2.0)
# keep looping
while True:
# grab the current frame
frame = vs.read()
# handle the frame from VideoCapture or VideoStream
frame = frame[1] if args.get("video", False) else frame
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if frame is None:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the color "orange", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, orangeLower, orangeUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
pts.appendleft(center)
# loop over the set of tracked points
for i in np.arange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# check to see if enough points have been accumulated in
# the buffer
if counter >= 10 and i == 10 and pts[i-10] is not None:
# compute the difference between the x and y
# coordinates and re-initialize the direction
# text variables
dX = pts[i-10][0] - pts[i][0]
dY = pts[i-10][1] - pts[i][1]
(dirX, dirY) = ("", "")
# ensure there is significant movement in the
# x-direction
if np.abs(dX) > 20:
dirX = "East" if np.sign(dX) == 1 else "West"
# ensure there is significant movement in the
# y-direction
if np.abs(dY) > 20:
dirY = "South" if np.sign(dY) == 1 else "North"
# handle when both directions are non-empty
if dirX != "" and dirY != "":
direction = "{}-{}".format(dirY, dirX)
# otherwise, only one direction is non-empty
else:
direction = dirX if dirX != "" else dirY
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
# show the movement deltas and the direction of movement on
# the frame
cv2.putText(frame, direction, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (0, 0, 255), 3)
cv2.putText(frame, "dx: {}, dy: {}".format(dX, dY),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# show the frame to the screen and increment the frame counter
cv2.imshow("Frame", frame)
cv2.rectangle(img=frame, pt1=(0, 0), pt2=(300, 225), color=(0, 0, 0), thickness=3, lineType=8, shift=0)
cv2.rectangle(img=frame, pt1 = (300, 1), pt2 = (600, 225), color = (0, 0, 0), thickness = 3, lineType = 8, shift = 0)
cv2.rectangle(img=frame, pt1 = (0, 225), pt2 = (300, 550), color = (0, 0, 0), thickness = 3, lineType = 8, shift = 0)
cv2.rectangle(img=frame, pt1 = (300, 225), pt2 = (600, 550), color = (0, 0, 0), thickness = 3, lineType = 8, shift = 0)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
counter += 1
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# if we are not using a video file, stop the camera video stream
if not args.get("video", False):
vs.stop()
# otherwise, release the camera
else:
vs.release()
# close all windows
cv2.destroyAllWindows()
Here is an image of the frame I get when I run the code.
As you can see, there are lines dividing the image in fourths. These rectangles are where I want the outputs to be.

Reading from Kinect Camera rather than the default webcam to detect objects in the frame

I have found two sample codes that accomplish separate tasks that I am trying to get to work in tandem.
The Fist code opens the webcam from my laptop and reads the video stream to detect a certain colored object within the frame. It then creates an outline of the circular object and creates a colored trail of it previous location as it moves in real time.
The only issue is I am trying to use an Xbox 360 Kinect to work as the webcam rather than the built-in webcam on my laptop. (In the future I plan on using the depth camera as well, which is why I would like to use the kinect camera.)
The second code shows how to open and view the Kinect Camera's videostream.
I have found that setting the number in the VideoStream(src= 0).start() to 0 is the default camera. If I were to change that value to say a 1, 2, 3 or whatever... it should read the next available camera. However, when I print all available cameras, it only shows the webcam listed.
I have deleted, reinstalled, and installed all the correct drivers and packages that I should need for this to work if I were to just put a #1 in that line of code but have had no luck. There must be a different approach that will work to solve this issue.
---------The First Code------------------------------------------------
# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (53, 36, 124)
greenUpper = (200, 200, 242)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False): #if not video file was given
vs = VideoStream(src=0).start() #access the webcam here
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# allow the camera or video file to warm up
time.sleep(2.0)
# keep looping
while True:
# grab the current frame
frame = vs.read()
# handle the frame from VideoCapture or VideoStream
frame = frame[1] if args.get("video", False) else frame
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if frame is None:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update the points queue
pts.appendleft(center)
# loop over the set of tracked points
for i in range(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
cv2.destroyAllWindows()
break
# close all windows
cv2.destroyAllWindows()
#----------End of the First code-----------------------------------
#------------Second Code-------------------------------------------
from pykinect import nui
import numpy
import cv2
def video_handler_function(frame):
video = numpy.empty((480,640,4),numpy.uint8)
frame.image.copy_bits(video.ctypes.data)
cv2.imshow('KINECT Video Stream', video)
kinect = nui.Runtime()
kinect.video_frame_ready += video_handler_function
kinect.video_stream.open(nui.ImageStreamType.Video, 2,nui.ImageResolution.Resolution640x480,nui.ImageType.Color)
cv2.namedWindow('KINECT Video Stream', cv2.WINDOW_AUTOSIZE)
while True:
key = cv2.waitKey(1)
if key == 27: break
kinect.close()
cv2.destroyAllWindows()
#----------end of the second code---------------------------------
```python
When I change the value, to 1 which is the port number that the kinect is connected to, it should open the video stream and have the same results as the first code, but it just closes the python app.

How can I make my motion detector in OpenCV less sensitive for light conditions?

I tried to make a motion detector but as I saw, it is very sensitive because of lighting conditions. I would like to detect if there was a change on a dart board (so when somebody throws a dart, detect it).
This is how I tried it:
"""
Vecsei Gabor
"""
import cv2
#Initialize the first frame in the video stream
prevFrame = None
#Area of the detected contour, below this value it's not counted as detected
dontCare = 500
#Capture from webcam
cap = cv2.VideoCapture(0)
#Limit the FPS to 10 (For this task the lower the better)
cap.set(cv2.cv.CV_CAP_PROP_FPS, 15)
#counter for the detection
i = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#Blur for better results
output = cv2.GaussianBlur(frame, (21, 21), 0)
#If the first frame is None, initialize it
if prevFrame is None:
prevFrame = output
continue
#Compute the absolute difference between the current frame and
#First frame
frameDelta = cv2.absdiff(prevFrame, output)
#Convert to gray to detect contours
frameDelta = cv2.cvtColor(frameDelta, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(frameDelta, 21, 255, cv2.THRESH_BINARY)[1]
#Dilate the thresholded image to fill in holes, then find contours
#on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts, hier = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts_sorted = sorted(cnts, key = cv2.contourArea, reverse = True)[:1]
#Loop over the contours
for c in cnts_sorted:
#If the contour is too small, ignore it
if cv2.contourArea(c) < dontCare:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
i+=1
print "Detected something!" + str(i)
print "Area: " + str(cv2.contourArea(c))
prevFrame = output
cv2.imshow('Webcam ',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Basically I just watch for the differences between two frames and if there was a change on the frame I save it as out new image, so we can detect new changes.

Draw an arrow instead of the rectangle at camshift algorithm with python

I want to measure the distance of the tracked object from the centre of the image, I'm not really good with python and I don't know how to get the x,y,w,h out from the roiBox object.
As you can see in the following code the algorithm prints the roiBox output - x,y of the top left corner and the w,h of the rectangle.
I need to:
Get those numbers as int's
Calculate the centre of the rectangle
Then draw an arrow from the centre of the rectangle to the centre of the frame.
The code:
# USAGE
# python track.py --video video/sample.mov
# import the necessary packages
import numpy as np
import argparse
import cv2
# initialize the current frame of the video, along with the list of ROI points along with whether or not this is input mode
frame = None
roiPts = []
inputMode = False
def select_ROI(event, x, y, flags, param):
# grab the reference to the current frame, list of ROI points and whether or not it is ROI selection mode
global frame, roiPts, inputMode
# if we are in ROI selection mode, the mouse was clicked, and we do not already have four points, then update the list of ROI points with the (x, y) location of the click and draw the circle
if inputMode and event == cv2.EVENT_LBUTTONDOWN and len(roiPts) < 4:
roiPts.append((x, y))
cv2.circle(frame, (x, y), 4, (0, 255, 0), 2)
cv2.imshow("frame", frame)
def determine_ROI_for_first_time():
global inputMode, roiBox, roiPts, roiHist
# indicate that we are in input mode and clone the frame
inputMode = True
orig = frame.copy()
# keep looping until 4 reference ROI points have been selected; press any key to exit ROI selction mode once 4 points have been selected
while len(roiPts) < 4:
cv2.imshow("frame", frame)
cv2.waitKey(0)
# determine the top-left and bottom-right points
roiPts = np.array(roiPts)
s = roiPts.sum(axis = 1)
tl = roiPts[np.argmin(s)]
br = roiPts[np.argmax(s)]
# grab the ROI for the bounding box and convert it to the HSV color space
roi = orig[tl[1]:br[1], tl[0]:br[0]]
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
#roi = cv2.cvtColor(roi, cv2.COLOR_BGR2LAB)
# compute a HSV histogram for the ROI and store the bounding box
roiHist = cv2.calcHist([roi], [0], None, [16], [0, 180])
roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
return (tl[0], tl[1], br[0], br[1])
def do_camshift():
global frame, roiBox
# convert the current frame to the HSV color space and perform mean shift
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
backProj = cv2.calcBackProject([hsv], [0], roiHist, [0, 180], 1)
# apply cam shift to the back projection, convert the points to a bounding box, and then draw them
(r, roiBox) = cv2.CamShift(backProj, roiBox, termination)
pts = np.int0(cv2.boxPoints(r))
cv2.polylines(frame, [pts], True, (0, 255, 0), 2)
def main():
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
# grab the reference to the current frame, list of ROI points and whether or not it is ROI selection mode
global frame, roiPts, inputMode, roiBox, termination
# if the video path was not supplied, grab the reference to the camera
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# camera = cv2.VideoCapture("/home/idan/Desktop/b.mp4")
# otherwise, load the video
else:
camera = cv2.VideoCapture(args["video"])
# setup the mouse callback
cv2.namedWindow("frame")
cv2.setMouseCallback("frame", select_ROI)
# initialize the termination criteria for cam shift, indicating a maximum of ten iterations or movement by a least one pixel along with the bounding box of the ROI
termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
roiBox = None
# keep looping over the frames
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# check to see if we have reached the end of the video
if not grabbed:
break
# handle if the 'i' key is pressed, then go into ROI selection mode
key = cv2.waitKey(1) & 0xFF
if key == ord("i") and len(roiPts) < 4:
roiBox = determine_ROI_for_first_time()
# if the see if the ROI has been computed
print roiBox
if roiBox is not None:
do_camshift()
# show the frame and record if the user presses a key
cv2.imshow("frame", frame)
# wait, if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()

Ball speed measuring with OpenCV in Python

I need to measure the ball speed of a ping pong ball when it is shot out of a ping pong ball shoot machine.
We decided to measure its speed with video motion tracking.
With Python and OpenCV we got to the point that we could track the ball.
The next step is to measure its speed. But we have no clue how to do it.
# import the necessary packages
from collections import deque
import numpy as np
import argparse
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (51, 60, 60)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# resize the frame, blur it, and convert it to the HSV
# color space
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()

Categories

Resources