Motion Tracker is detecting entirety of screen - python

I'm trying to get basic motion tracking working to be later used in an raspberrypi/arduino project. I don't know very much python yet but I can wrap my head around the logic of whats going on pretty well. I've been using some examples to try and get it working with my laptops built-in camera but it seems to be tracking the entirety of the image even when I'm outside the first frame. My guess is that the low-resolution (640x480) and frame rate (6 fps) is causing jitter, and the differences these frames from the jitter is what it's attempting to track. From what I've read the gaussianblur is supposed to take care of this- but it's not. The code seems to compile, I can see the multiple types of processing taking place in multiple windows and there is some motion-detection going on but its very inconsistent and I can't troubleshoot whats going wrong.
import cv2,time
first_frame = None
video = cv2.VideoCapture(0)
a = 1
while True:
a = a + 1
check, frame = video.read()
print (frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if first_frame is None:
first_frame = gray
continue
delta_frame = cv2.absdiff(first_frame, gray)
thresh_delta = cv2.threshold(delta_frame, 25, 255, cv2.THRESH_BINARY)[1]
thresh_delta = cv2.dilate(thresh_delta, None, iterations=2)
(_, cnts, _) = cv2.findContours(thresh_delta.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 1000:
continue
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('captureFrame', frame)
cv2.imshow('captureGrey', gray)
cv2.imshow('delta', delta_frame)
cv2.imshow('thresh', thresh_delta)
key = cv2.waitKey(1)
if key == ord('q'):
break
print(a)
video.release()
cv2.destroyAllWindows()
EDIT: it seems to be a hardware problem regarding auto-lighting? Cannot confirm. But buying a cheap Microsoft lifecam VX 2000 seemed to resolved the issue.

Related

OpenCV detecting hundreds of cars in footage with one car

I've written a very simple script for detecting cars when given footage:
cap = cv.VideoCapture(1)
car_cascade = cv.CascadeClassifier('assets/cars.xml')
while True:
ret, frame = cap.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
cars = car_cascade.detectMultiScale(gray, 1.1, 1)
for (x, y, w, h) in cars:
cv.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
# Display the resulting frame
cv.imshow('frame', frame)
if cv.waitKey(1) == ord('q'):
break
# When everything done, release the capture
cap.release()
cv.destroyAllWindows()
I'm using the following file for my cars.xml: https://github.com/Aman-Preet-Singh-Gulati/Vehicle-count-detect/blob/main/Required%20Files/cars.xml. I've seen several projects that utilize this same Cascade file as well.
My problem is that when I spin up the video I see a screen like this, where hundreds of elements in the video are categorized as "cars" by the detectMultiScale function. I've been struggling to find anything on why this might be occuring.

Detect speed of moving object with Python and OpenCV

I wrote a code that marks moving objects on the video with rectangle. This is the video that I have used:
https://www.youtube.com/watch?v=PTy2vx8Ejas&t=47s&ab_channel=macman400
The code works very good:
import cv2
import numpy as np
import copy
# upload video
cap = cv2.VideoCapture('test_video.mp4')
#reading two frames
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
# get diference between two frames
diff = cv2.absdiff(frame1, frame2)
# convert diference in gray
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# treshold
_, thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations = 5)
# define contours
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
cnt_area = cv2.contourArea(contour)
if cnt_area < 1000 or cnt_area > 4000:
continue
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame1, (x,y), (x+w, y+h), (0, 0, 255))
print(cnt_area)
#cv2.drawContours(frame1, contours, -1, (0,0,255), 1)
# show frames
cv2.imshow('frame', frame1)
frame1 = frame2
ret, frame2 = cap.read()
if cv2.waitKey(60) == 60:
break
cv2.destroyAllWindows()
cap.release()
Is it possible to get the speed of each rectangle?
Basically, I want to detect only fast / rapid / sudden movements. So I want to set some kind of threshold for speed of each object/rectangle.
By speed I do not mean strictly m/s or km/h, I mean some other metric that can be interpreted as speed. I want to detect only rapid movements, in this case, only persons that are in the fight.

Unable to perform real time video processing in RaspberryPi using Webcam

I wrote a code to detect a white color using a Webcam (Logitech C310 HD Webcam) in RaspberryPi 3B+.
Code will perform the following functions:
Capture the video using cv2.VideoCapture (0)
Grab each frame of video and look for white objects in that frame.
If a white object is present in that frame, code will encircle it and will print White.
Real video and processed video will be shown using cv2.imshow('frame',frame1) and cv2.imshow('res',res1).
Code is shown below:
import cv2
import numpy as np
from time import sleep
cap1 = cv2.VideoCapture(0)
cap1.set(3,640)
cap1.set(4,480)
cap1.set(5,15)
while(1):
_, frame1 = cap1.read()
#hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of white color in HSV
# change it according to your need !
lower_white = np.array([150,150,150], dtype=np.uint8)
upper_white = np.array([255,255,255], dtype=np.uint8)
# Threshold the HSV image to get only white colors
mask1 = cv2.inRange(frame1, lower_white, upper_white)
kernal = np.ones((5, 5), "uint8")
# Tracking the Red Color
(_, contours1, hierarchy) = cv2.findContours(mask1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour1 in enumerate(contours1):
area1 = cv2.contourArea(contour1)
#start = time.time()
if (area1 > 1200):
print 'white in cam 1'
#x, y, w, h = cv2.boundingRect(contour)
#img = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
ellipse1 = cv2.fitEllipse(contour1)
cv2.ellipse(frame1, ellipse1, (0, 255, 0), 2)
#cv2.putText(frame, "RED color", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255))
red = cv2.dilate(mask1, kernal)
# Bitwise-AND mask and original image
res1 = cv2.bitwise_and(frame1,frame1, mask= mask1)
cv2.imshow('frame',frame1)
cv2.imshow('res',res1)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cap1.release()
cv2.destroyAllWindows()
My problem is that a few months back, this code was working fine in real-time. But now when I again tried to run the same code, my video didn't run in real-time but a huge time lag is present. For example, when there is a white object in front of the camera, it will take a few minutes to show it as white. In past, when I ran this code, video frame window appears without any time lag but now frame window appear after a few minutes.
Please explain to me how to solve this problem? Why is it so that in past same code was working but now it's not? Is it a problem of python2 or python3 or RaspberryPi?

OpenCV: Drawing boundary over detected motion

I am a newbie to OpenCV and decided to learn it but writing a small program that detects motion and draws a bounding box on the object.
I started with a rather simple method that simply calculated the difference between two frames, finds contours and drew a simple rectangle around the object. This served its purpose for a while but it was incapable of autonomously tracking multiple objects as they popped up. I had to manually change my parameters for it to track n number of objects.
So I decided to change my method and instead use BackgroundSubtractorMOG. This method is more favorable for what I am trying to accomplish but the only issue that I am currently having is how to draw an outline of the detected object(s) after applying BackgroundSubtractorMOG. I don't want rectangles anymore instead I want it to draw around the object's border.
import sys
import cv2
def getImageDifference(first, second):
return cv2.absdiff(first, second)
def drawRectangle(contour, frame):
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
previousFrame = None
backgroundSubtractor = None
camera = cv2.VideoCapture(0)
backgroundSubtractor = cv2.BackgroundSubtractorMOG()
while True:
grabbed, frame = camera.read()
if not grabbed:
break
fgMask = backgroundSubtractor.apply(frame, learningRate = 1.0/10)
output = cv2.GaussianBlur(fgMask, (21, 21), 0)
if previousFrame is None:
previousFrame = fgMask
continue
frameDelta = getImageDifference(previousFrame, output)
maskRGB = cv2.cvtColor(fgMask,cv2.COLOR_GRAY2BGR)
frameDela = maskRGB
# frameDelta = cv2.cvtColor(fgMask, cv2.COLOR_BGR2GRAY)
threshold = cv2.threshold(fgMask, 21, 255, cv2.THRESH_BINARY)[1]
threshold = cv2.dilate(threshold, None, iterations = 2)
contours, hierarchy = cv2.findContours(threshold.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sortedContours = sorted(contours, key = cv2.contourArea, reverse = True)[:2] #this will track two objects simultaneously. If I want more, I'd have to come and change this value to whatever I want
for contour in sortedContours:
drawRectangle(contour, frame)
previousFrame = output
draw = frame & maskRGB
cv2.imshow('Main',frame)
cv2.imshow('Background Subtraction', fgMask)
cv2.imshow('Background Subtraction with color', draw)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()

How can I make my motion detector in OpenCV less sensitive for light conditions?

I tried to make a motion detector but as I saw, it is very sensitive because of lighting conditions. I would like to detect if there was a change on a dart board (so when somebody throws a dart, detect it).
This is how I tried it:
"""
Vecsei Gabor
"""
import cv2
#Initialize the first frame in the video stream
prevFrame = None
#Area of the detected contour, below this value it's not counted as detected
dontCare = 500
#Capture from webcam
cap = cv2.VideoCapture(0)
#Limit the FPS to 10 (For this task the lower the better)
cap.set(cv2.cv.CV_CAP_PROP_FPS, 15)
#counter for the detection
i = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#Blur for better results
output = cv2.GaussianBlur(frame, (21, 21), 0)
#If the first frame is None, initialize it
if prevFrame is None:
prevFrame = output
continue
#Compute the absolute difference between the current frame and
#First frame
frameDelta = cv2.absdiff(prevFrame, output)
#Convert to gray to detect contours
frameDelta = cv2.cvtColor(frameDelta, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(frameDelta, 21, 255, cv2.THRESH_BINARY)[1]
#Dilate the thresholded image to fill in holes, then find contours
#on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts, hier = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts_sorted = sorted(cnts, key = cv2.contourArea, reverse = True)[:1]
#Loop over the contours
for c in cnts_sorted:
#If the contour is too small, ignore it
if cv2.contourArea(c) < dontCare:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
i+=1
print "Detected something!" + str(i)
print "Area: " + str(cv2.contourArea(c))
prevFrame = output
cv2.imshow('Webcam ',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Basically I just watch for the differences between two frames and if there was a change on the frame I save it as out new image, so we can detect new changes.

Categories

Resources