OpenCV (Python) Drawing on Videos, Tracking - python

# libraries that will be needed
import numpy as np # numpy
import cv2 # opencv
import imutils # allows video editing
import random
from imutils.object_detection import non_max_suppression
from imutils import paths
import imutils
import cv2
#default HOG
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# function to trak people
def tracker(cap):
while True:
ret, img = cap.read()
# if video stopped playing, quit
if ret == False:
break
# resize window
img = imutils.resize(img, width = 400)
# convert to graysclae and equalize
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
# detect people
rects, weights = hog.detectMultiScale(gray, winStride = (8, 8), padding = (8, 8), scale = 1.25)
# store detected people in array
rects = np.array([[x, y, x+w, y+h] for(x, y, w, h) in rects] )
# find largest possible rectangel to avoid detection
# of same person several times
biggest = non_max_suppression(rects, probs = None, overlapThresh = 0.65)
# draw largest rectangle
for (xA, yA, xB, yB) in biggest:
# create random color
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
cv2.rectangle(img, (xA, yA), (xB, yB), color, 2)
# show image
cv2.imshow('Image', img)
k = cv2.waitKey(30) & 0xFF
if k == 27:
break
# run video
cap = cv2.VideoCapture('NYC.mp4')
tracker(cap)
# release frame and destroy windows
cap.release()
cv2.destroyAllWindows
I am attempting to track multiple people at once using OpenCV. Once a person is detected, I draw a rectangle around them. I am having trouble having a random/different color box for each person while maintaining the same color box for a person after they have been detected.
Currently, a person is detected and a box is drawn. In the next frame, if they are still detected a new colored box is drawn, but I would like to maintain the original color.
Also open to tips/tricks that will improve my code and tracking since I am very new to this.

You need a way to say with good confidence this is the person in the next frame if you want to draw same color box for the same person. There are many ways to do it.
One way is you can use tracking. Try using Camshift/meanshift tracking(ofcourse there are many tracking algo, I wont be sure which one suits you best since i dont know your dataset).
Once a person is detected, initialise a tracker and assign a color box to that person. If person is out of the image, then have a system to remove the person from the list.
Hope this helps :)

Your if ret == False: is not indented correctly to match the while True: loop. Current Code:
def tracker(cap):
while True:
ret, img = cap.read()
# if video stopped playing, quit
if ret == False:
break
# resize window
img = imutils.resize(img, width = 400)
# convert to graysclae and equalize
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
Fixed indentation:
def tracker(cap):
while True:
ret, img = cap.read()
# if video stopped playing, quit
if ret == False:
break
# resize window
img = imutils.resize(img, width = 400)
# convert to graysclae and equalize
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)

Related

Capture what is in a rectangle of a numpy array

This might sound obvious to some of you but I was trying to figure out how to capture only what is in a rectangle.
The below code, uses dlib face detection and draws a bounding box around a detected face.
cv2.rectangle(img,(det.left(), det.top()), (det.right(), det.bottom()), color_green, line_width)
What I am struggling with is how to capture/ view only what is within the rectangle.
So in this example I only want to see what is in (det.left(), det.top()), (det.right(), det.bottom())
import sys
import dlib
import cv2
detector = dlib.get_frontal_face_detector()
cam = cv2.VideoCapture(1)
color_green = (0,255,0)
line_width = 3
while True:
ret_val, img = cam.read()
rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
dets = detector(rgb_image)
for det in dets:
cv2.rectangle(img,(det.left(), det.top()), (det.right(), det.bottom()), color_green, line_width)
cv2.imshow('my webcam', img)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
You can try to implement cropping with numpy slicing.
For a rectangle drawn by:
cv2.rectangle(img,(left,top), (right,bottom), color_green, line_width)
You can try cropping as:
new_img = img[top:bottom,left:right,:]
to show only the parts that are within the detected boxes you'd want to either black out what's outside the dets (which I find almost impossible) or you could simply draw the desired parts on a black canvas instead
this is why I start here with a black canvas
import sys
import dlib
import cv2
import numpy as np
detector = dlib.get_frontal_face_detector()
cam = cv2.VideoCapture(1)
color_green = (0, 255, 0)
line_width = 3
while True:
ret_val, img = cam.read()
# get image dims to creat a black canvas of the same size
# img shape is (rows, cols, c) or (h, w, c)
img_h, img_w , _ = img.shape # c = 3 assuming it's a color iamge
# this will act as our black background / canvas
black_tmp = np.zeros((img_h, img_w, 3), dtype=int)
rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
dets = detector(rgb_image)
for det in dets:
# det cy -> left , top | x2y2 -> right bottom
x , y, x2,y2 = det.left(), det.top(), det.right(), det.bottom()
# copy the pixels in the desired area to your black background
black_tmp [y:y2, x:x2, ::] = img[y:y2, x:x2, ::]
# maybe draw the rectangle on the new image too
cv2.rectangle(black_tmp,(det.left(), det.top()), (det.right(), det.bottom()), color_green, line_width)
cv2.imshow('my webcam', black_tmp)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
I've tried to only minimally edit your code
here's the np.zeros docs in case you need it
I hope this helps, if I missed any detail you need please feel free to point it out

Set background color outside ROI in OpenCV

I successfully displayed the video feed and am trying to change the background color of the area outside ROI from black to blue but the screen still shows black background. Please help me solve the problem. Any help would be greatly appreciated.
Original code
import numpy as np
from cv2 import cv2
'''
ML object detection algo(haarcascade)used to identify objects.
the XML file consists of trained Haar Cascade models.
'''
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#initialize video from the webcam
video = cv2.VideoCapture(1)
while True:
# ret tells if the camera works properly. Frame is an actual frame from the video feed
ret, frame= video.read()
# print(cv2.VideoCapture(0).isOpened())
# make sure port is working and read the image
if frame is not None and video.isOpened():
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect the faces within the subregions of the image in scales
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=6)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
#Use the coordinates to find the center of the face and from that point draw a rectangle of radius w/2 or h/2.
center_coordinates = x + w // 2, y + h // 2
radius = w // 2 # or can be h / 2 or can be anything based on your requirements
#background color(black)
mask=np.zeros(frame.shape[:2] , dtype="uint8")
# Draw the desired region to crop out in white
cv2.circle(mask, center_coordinates, radius, (255,255,255),-1)
masked=cv2.bitwise_and(frame,frame,mask=mask)
cv2.imshow('mask applied',masked)
if cv2.waitKey(30) & 0xff==27:
break
video.release()
cv2.destroyAllWindows()
The above code detects and displays the face in the circular mask on the black background. But as mentioned above, The background color outside circular ROI should be blue.
I tried replacing mask=np.zeros(frame.shape[:2], dtype="uint8")with the code below and fails. Frame.shape[0:2]doesn't even include channel and I can't figure out how to change the color in the first place.
mask=np.ones(frame.shape[0:2], dtype="uint8")
mask[:,:,0]=255
mask[:,:,1]=0
mask[:,:,2]=0
I also tried creating a circular masked image then place it on another image only to find out it results in the same problem.
import numpy as np
from cv2 import cv2
'''
ML object detection algo(haarcascade)used to identify objects.
the XML file consists of trained Haar Cascade models.
'''
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#initialize video from the webcam
video = cv2.VideoCapture(1)
print(cv2.VideoCapture(1).isOpened())
while True:
# ret tells if the camera works properly. Frame is an actual frame from the video feed
ret, frame= video.read()
# print(cv2.VideoCapture(0).isOpened())
# make sure port is working and read the image
if frame is not None and video.isOpened():
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect the faces within the subregions of the image in scales
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=6)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
#Use the coordinates to find the center of the face and from that point draw a rectangle of radius w/2 or h/2.
center_coordinates = x + w // 2, y + h // 2
radius = w // 2 # or can be h / 2 or can be anything based on your requirements
#background color(black)
mask=np.zeros(frame.shape[:2] , dtype="uint8")
# create blue colored background
color = np.full_like(frame, (255,0,0))
# Draw the desired region to crop out in white
roi=cv2.circle(mask, center_coordinates, radius, (255,255,255),-1)
masked=cv2.bitwise_and(frame,frame,mask=mask)
mask_blue=cv2.bitwise_and(color,color,mask=mask-roi)
# combine the two masked images
result = cv2.add(masked,mask_blue)
cv2.imshow('result',result)
if cv2.waitKey(30) & 0xff==27:
break
video.release()
cv2.destroyAllWindows()
I have changed your code as follows according to your requirement. Here i have added an extra line
masked[np.where((masked==[0,0,0]).all(axis=2))]=[255,0,0]
where you can change pixel values of black region to any specific color.
import numpy as np
import cv2
'''
ML object detection algo(haarcascade)used to identify objects.
the XML file consists of trained Haar Cascade models.
'''
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades +'haarcascade_frontalface_default.xml')
#initialize video from the webcam
video = cv2.VideoCapture(0)
print(cv2.VideoCapture(0).isOpened())
while True:
# ret tells if the camera works properly. Frame is an actual frame from the video feed
ret, frame= video.read()
# print(cv2.VideoCapture(0).isOpened())
# make sure port is working and read the image
if frame is not None and video.isOpened():
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect the faces within the subregions of the image in scales
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=6)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
#Use the coordinates to find the center of the face and from that point draw a rectangle of radius w/2 or h/2.
center_coordinates = x + w // 2, y + h // 2
radius = w // 2 # or can be h / 2 or can be anything based on your requirements
#background color(black)
mask=np.zeros(frame.shape[:2] , dtype="uint8")
# Draw the desired region to crop out in white
roi=cv2.circle(mask, center_coordinates, radius, (255,255,255),-1)
masked=cv2.bitwise_and(frame,frame,mask=mask)
masked[np.where((masked==[0,0,0]).all(axis=2))]=[255,0,0]
cv2.imshow('result',masked)
if cv2.waitKey(30) & 0xff==27:
break
video.release()
cv2.destroyAllWindows()

Object tracking program does not show tracked points in the output image

I need to track an object in a video frame but to do so I have to initialize good features to track my object.
So, I have a .mp4 file, I retained the blue channel of the first frame and I obtained the first frame. I then went on to create my binary mask by extracting my region of interest from the first frame and it was specified that my ROI lies within [300,400] on the y-axis and [460,550] x-axis (which is the front side of the bus).
I then initialized 10 corner points by using cv2.goodFeaturesToTrack having a quality level of 0.01 and a minimum allowable distance of 10 pixels between corner points. I then tried to display these points on the RGB image however I am not getting any points. The reason as to why I do not know.
This is shown here:
import numpy as np
import cv2
import matplotlib.pyplot as plt
vid = cv2.VideoCapture('Bus.mp4')
ret, frame = vid.read()
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
blue_ch = frame[:,:,2]
mask = blue_ch[300:400,460:550]
prev_pts = cv2.goodFeaturesToTrack(image = mask,maxCorners=10,qualityLevel=0.01,minDistance=10,blockSize=10)
blue_ch = np.array(blue_ch)
for i in prev_pts:
x,y = i.ravel()
cv2.circle(blue_ch,(x,y),3,255,-1)
plt.imshow(blue_ch)
It creates small yellow dots in top left corner. They are similar to background color so you may not see them.
When I draw on RGB frame then I get red dots which you can see on image
If I convert blue_ch to RBG before drawing
blue_ch = cv2.cvtColor(blue_ch, cv2.COLOR_GRAY2RGB)
then I see
You get mask = blue_ch[300:400, 460:550] so when I add
y += 300
x += 460
then I get dots in correct place
My code:
import numpy as np
import cv2
import matplotlib.pyplot as plt
vid = cv2.VideoCapture('Bus.mp4')
ret, frame = vid.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
blue_ch = frame[:, :, 2]
mask = blue_ch[300:400, 460:550]
prev_pts = cv2.goodFeaturesToTrack(image=mask, maxCorners=10, qualityLevel=0.01, minDistance=10, blockSize=10)
blue_ch = cv2.cvtColor(blue_ch, cv2.COLOR_GRAY2RGB)
#blue_ch = np.array(blue_ch)
for i in prev_pts:
x, y = i.ravel()
y += 300
x += 460
#print(x, y)
cv2.circle(frame, (int(x), int(y)), 3, 255, -1)
cv2.circle(blue_ch, (int(x), int(y)), 3, 255, -1)
# --- display with matplot ---
plt.imshow(frame)
plt.show()
plt.imshow(blue_ch)
plt.show()
# -- OR display with `cv2` ---
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
blue_ch = cv2.cvtColor(blue_ch, cv2.COLOR_BGR2RGB)
cv2.imshow('frame', frame)
cv2.imshow('blue_ch', blue_ch)
print("Press ESC to exit")
try:
while cv2.waitKey(1) != 27: # ESC
pass
except:
cv2.destroyAllWindows()

Reading from Kinect Camera rather than the default webcam to detect objects in the frame

I have found two sample codes that accomplish separate tasks that I am trying to get to work in tandem.
The Fist code opens the webcam from my laptop and reads the video stream to detect a certain colored object within the frame. It then creates an outline of the circular object and creates a colored trail of it previous location as it moves in real time.
The only issue is I am trying to use an Xbox 360 Kinect to work as the webcam rather than the built-in webcam on my laptop. (In the future I plan on using the depth camera as well, which is why I would like to use the kinect camera.)
The second code shows how to open and view the Kinect Camera's videostream.
I have found that setting the number in the VideoStream(src= 0).start() to 0 is the default camera. If I were to change that value to say a 1, 2, 3 or whatever... it should read the next available camera. However, when I print all available cameras, it only shows the webcam listed.
I have deleted, reinstalled, and installed all the correct drivers and packages that I should need for this to work if I were to just put a #1 in that line of code but have had no luck. There must be a different approach that will work to solve this issue.
---------The First Code------------------------------------------------
# import the necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (53, 36, 124)
greenUpper = (200, 200, 242)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False): #if not video file was given
vs = VideoStream(src=0).start() #access the webcam here
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# allow the camera or video file to warm up
time.sleep(2.0)
# keep looping
while True:
# grab the current frame
frame = vs.read()
# handle the frame from VideoCapture or VideoStream
frame = frame[1] if args.get("video", False) else frame
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if frame is None:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update the points queue
pts.appendleft(center)
# loop over the set of tracked points
for i in range(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
cv2.destroyAllWindows()
break
# close all windows
cv2.destroyAllWindows()
#----------End of the First code-----------------------------------
#------------Second Code-------------------------------------------
from pykinect import nui
import numpy
import cv2
def video_handler_function(frame):
video = numpy.empty((480,640,4),numpy.uint8)
frame.image.copy_bits(video.ctypes.data)
cv2.imshow('KINECT Video Stream', video)
kinect = nui.Runtime()
kinect.video_frame_ready += video_handler_function
kinect.video_stream.open(nui.ImageStreamType.Video, 2,nui.ImageResolution.Resolution640x480,nui.ImageType.Color)
cv2.namedWindow('KINECT Video Stream', cv2.WINDOW_AUTOSIZE)
while True:
key = cv2.waitKey(1)
if key == 27: break
kinect.close()
cv2.destroyAllWindows()
#----------end of the second code---------------------------------
```python
When I change the value, to 1 which is the port number that the kinect is connected to, it should open the video stream and have the same results as the first code, but it just closes the python app.

Draw an arrow instead of the rectangle at camshift algorithm with python

I want to measure the distance of the tracked object from the centre of the image, I'm not really good with python and I don't know how to get the x,y,w,h out from the roiBox object.
As you can see in the following code the algorithm prints the roiBox output - x,y of the top left corner and the w,h of the rectangle.
I need to:
Get those numbers as int's
Calculate the centre of the rectangle
Then draw an arrow from the centre of the rectangle to the centre of the frame.
The code:
# USAGE
# python track.py --video video/sample.mov
# import the necessary packages
import numpy as np
import argparse
import cv2
# initialize the current frame of the video, along with the list of ROI points along with whether or not this is input mode
frame = None
roiPts = []
inputMode = False
def select_ROI(event, x, y, flags, param):
# grab the reference to the current frame, list of ROI points and whether or not it is ROI selection mode
global frame, roiPts, inputMode
# if we are in ROI selection mode, the mouse was clicked, and we do not already have four points, then update the list of ROI points with the (x, y) location of the click and draw the circle
if inputMode and event == cv2.EVENT_LBUTTONDOWN and len(roiPts) < 4:
roiPts.append((x, y))
cv2.circle(frame, (x, y), 4, (0, 255, 0), 2)
cv2.imshow("frame", frame)
def determine_ROI_for_first_time():
global inputMode, roiBox, roiPts, roiHist
# indicate that we are in input mode and clone the frame
inputMode = True
orig = frame.copy()
# keep looping until 4 reference ROI points have been selected; press any key to exit ROI selction mode once 4 points have been selected
while len(roiPts) < 4:
cv2.imshow("frame", frame)
cv2.waitKey(0)
# determine the top-left and bottom-right points
roiPts = np.array(roiPts)
s = roiPts.sum(axis = 1)
tl = roiPts[np.argmin(s)]
br = roiPts[np.argmax(s)]
# grab the ROI for the bounding box and convert it to the HSV color space
roi = orig[tl[1]:br[1], tl[0]:br[0]]
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
#roi = cv2.cvtColor(roi, cv2.COLOR_BGR2LAB)
# compute a HSV histogram for the ROI and store the bounding box
roiHist = cv2.calcHist([roi], [0], None, [16], [0, 180])
roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
return (tl[0], tl[1], br[0], br[1])
def do_camshift():
global frame, roiBox
# convert the current frame to the HSV color space and perform mean shift
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
backProj = cv2.calcBackProject([hsv], [0], roiHist, [0, 180], 1)
# apply cam shift to the back projection, convert the points to a bounding box, and then draw them
(r, roiBox) = cv2.CamShift(backProj, roiBox, termination)
pts = np.int0(cv2.boxPoints(r))
cv2.polylines(frame, [pts], True, (0, 255, 0), 2)
def main():
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
# grab the reference to the current frame, list of ROI points and whether or not it is ROI selection mode
global frame, roiPts, inputMode, roiBox, termination
# if the video path was not supplied, grab the reference to the camera
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# camera = cv2.VideoCapture("/home/idan/Desktop/b.mp4")
# otherwise, load the video
else:
camera = cv2.VideoCapture(args["video"])
# setup the mouse callback
cv2.namedWindow("frame")
cv2.setMouseCallback("frame", select_ROI)
# initialize the termination criteria for cam shift, indicating a maximum of ten iterations or movement by a least one pixel along with the bounding box of the ROI
termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
roiBox = None
# keep looping over the frames
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# check to see if we have reached the end of the video
if not grabbed:
break
# handle if the 'i' key is pressed, then go into ROI selection mode
key = cv2.waitKey(1) & 0xFF
if key == ord("i") and len(roiPts) < 4:
roiBox = determine_ROI_for_first_time()
# if the see if the ROI has been computed
print roiBox
if roiBox is not None:
do_camshift()
# show the frame and record if the user presses a key
cv2.imshow("frame", frame)
# wait, if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()

Categories

Resources