Detect speed of moving object with Python and OpenCV - python

I wrote a code that marks moving objects on the video with rectangle. This is the video that I have used:
https://www.youtube.com/watch?v=PTy2vx8Ejas&t=47s&ab_channel=macman400
The code works very good:
import cv2
import numpy as np
import copy
# upload video
cap = cv2.VideoCapture('test_video.mp4')
#reading two frames
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
# get diference between two frames
diff = cv2.absdiff(frame1, frame2)
# convert diference in gray
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# treshold
_, thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations = 5)
# define contours
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
cnt_area = cv2.contourArea(contour)
if cnt_area < 1000 or cnt_area > 4000:
continue
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame1, (x,y), (x+w, y+h), (0, 0, 255))
print(cnt_area)
#cv2.drawContours(frame1, contours, -1, (0,0,255), 1)
# show frames
cv2.imshow('frame', frame1)
frame1 = frame2
ret, frame2 = cap.read()
if cv2.waitKey(60) == 60:
break
cv2.destroyAllWindows()
cap.release()
Is it possible to get the speed of each rectangle?
Basically, I want to detect only fast / rapid / sudden movements. So I want to set some kind of threshold for speed of each object/rectangle.
By speed I do not mean strictly m/s or km/h, I mean some other metric that can be interpreted as speed. I want to detect only rapid movements, in this case, only persons that are in the fight.

Related

detect moving object with opencv and python

i found very interesting article about detection of moving objects, here is correspondng link :Detection of moving object
and also corresponding article : Article about object detection
i followed code and try to implement my self, here is corresponding code :
import cv2
import numpy as np
import matplotlib.pyplot as plt
from Background_Image_Creation import get_background
cap =cv2.VideoCapture("video_1.mp4")
#print(cap.get(cv2.CAP_PROP_FRAME_COUNT))
#print(cap.get(cv2.CAP_PROP_FPS))
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
save_name = "Result.mp4"
# define codec and create VideoWriter object
out = cv2.VideoWriter(save_name,cv2.VideoWriter_fourcc(*'mp4v'), 10, (frame_width, frame_height))
background_frame =get_background("video_1.mp4")
background = cv2.cvtColor(background_frame, cv2.COLOR_BGR2GRAY)
print(background.shape)
frame_count =0
consecutive_frame=8
#frame_diff_list =[]
while cap.isOpened():
ret,frame =cap.read()
print(ret)
print(frame.shape)
if ret==True:
frame_count+=1
orig_frame =frame.copy()
gray =cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
if frame_count % consecutive_frame == 0 or frame_count == 1:
frame_diff_list =[]
frame_diff = cv2.absdiff(gray, background)
ret, thresh = cv2.threshold(frame_diff, 50, 255, cv2.THRESH_BINARY)
dilate_frame = cv2.dilate(thresh, None, iterations=2)
frame_diff_list.append(dilate_frame)
print(frame_diff_list)
if len(frame_diff_list) == consecutive_frame:
# add all the frames in the `frame_diff_list`
sum_frames = sum(frame_diff_list)
print(sum_frames)
# find the contours around the white segmented areas
contours, hierarchy = cv2.findContours(sum_frames, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# draw the contours, not strictly necessary
for i, cnt in enumerate(contours):
cv2.drawContours(frame, contours, i, (0, 0, 255), 3)
for contour in contours:
# continue through the loop if contour area is less than 500...
# ... helps in removing noise detection
if cv2.contourArea(contour) < 500:
continue
# get the xmin, ymin, width, and height coordinates from the contours
(x, y, w, h) = cv2.boundingRect(contour)
# draw the bounding boxes
cv2.rectangle(orig_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('Detected Objects', orig_frame)
out.write(orig_frame)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
code for background frame creation is also presented :
import numpy as np
import cv2
import matplotlib.pyplot as plt
def get_background(path):
cap =cv2.VideoCapture(path)
frame_indices =cap.get(cv2.CAP_PROP_FRAME_COUNT)*np.random.uniform(size=50)
frames =[]
for idx in frame_indices:
cap.set(cv2.CAP_PROP_POS_FRAMES,idx)
ret,frame =cap.read()
frames.append(frame)
median_frame = np.median(frames, axis=0).astype(np.uint8)
return median_frame
#median_frame =get_background("video_1.mp4")
#cv2.imshow("Median_Background",median_frame)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#plt.show()
code runs fine, but output video does not contain anything, it just 1 KGB size, one thing what i am thinking is that this fragment
frame_diff_list.append(dilate_frame)
is colored with yellow color, here is screenshot :
and also when i try to print print(frame_diff_list)
it just printed one output :
i was more surprised when i have tested
print(ret)
print(frame.shape)
from the begining of the loop and it just printed one output :
True
(360, 640, 3)
it seems that loop does not cover all frames right? could you help me please to figure out what is wrong with my code?

How to fix ValueError: not enough values to unpack (expected 3, got 2) in python

How to fix ValueError: not enough values to unpack (expected 3, got 2) in python
This is opencv project for motion detection using python By #dhruvmarathe
Having this problem from soo many days please help me solve this
This is my code
import cv2
import time
first_frame = None
video = cv2.VideoCapture(0)
while True:
check, frame = video.read()
print(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(21,21), 0)
if first_frame is None:
first_frame = gray
continue
delta_frame = cv2.absdiff(first_frame, gray)
thresh_delta = cv2.threshold(delta_frame, 30 , 255 , cv2.THRESH_BINARY) [1]
thresh_delta = cv2.dilate(thresh_delta, None, iterations=0 )
(_,cnts,_) = cv2.findContours(thresh_delta.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 1000:
continue
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 3)
cv2.imshow('frame', frame)
cv2.imshow('Capturing',gray)
cv2.imshow('delta', delta_frame)
cv2.imshow('thresh', thresh_delta)
key = cv2.waitKey(1)
if key == ord('q'):
break
video.release()
(_,cnts,_) = cv2.findContours(thresh_delta.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
findContours returns a tuple of two objects (contours and hierachy). You are trying to unpack three values (even if you discard the first and third).
I imagine from the name you've used, you want
cnts, _ = cv2.findContours(thresh_delta.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)

Can we use Haar Classifier to detect Iris of an eye as done for face and eyes?

I am currently trying to detect iris and subsequently pupil within a given image. All approaches seen online, detect the face, then the eyes and then use thresholding, hough transformation, and contouring to finally detect the pupil.
If we already have a large data set of pupils, can't we train a Haar Classifier as done in the case of Face and Eyes to detect iris of an eye? Is there any reason this shouldn't work?
Currently, I have a partially working solution using the non-haar based approach.
Example of one working code that
import math
import cv2
eye_cascade = cv2.CascadeClassifier('./cascade_files/haarcascade_eye.xml')
if eye_cascade.empty():
raise IOError('Unable to load the eye cascade classifier xml file')
cap = cv2.VideoCapture(0)
ds_factor = 0.5
ret, frame = cap.read()
contours = []
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, None, fx=ds_factor, fy=ds_factor, interpolation=cv2.INTER_AREA)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
eyes = eye_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=1)
for (x_eye, y_eye, w_eye, h_eye) in eyes:
pupil_frame = gray[y_eye:y_eye + h_eye, x_eye:x_eye + w_eye]
ret, thresh = cv2.threshold(pupil_frame, 80, 255, cv2.THRESH_BINARY)
cv2.imshow("threshold", thresh)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
print(contours)
for contour in contours:
area = cv2.contourArea(contour)
rect = cv2.boundingRect(contour)
x, y, w, h = rect
radius = 0.15 * (w + h)
area_condition = (100 <= area <= 200)
symmetry_condition = (abs(1 - float(w)/float(h)) <= 0.2)
fill_condition = (abs(1 - (area / (math.pi * math.pow(radius, 2.0)))) <= 0.4)
cv2.circle(frame, (int(x_eye + x + radius), int(y_eye + y + radius)), int(1.3 * radius), (0, 180, 0), -1)
cv2.imshow('Pupil Detector', frame)
c = cv2.waitKey(1)
if c == 27:
break
cap.release()
cv2.destroyAllWindows()
Example output:
Yes, you could train a pupil detector directly with sample images on a Haar Cascade classifier. If you aren't constrained to small hardware at high throughput, then I would recommend using deep learning to train on the same dataset, you could expect better results.

Motion Tracker is detecting entirety of screen

I'm trying to get basic motion tracking working to be later used in an raspberrypi/arduino project. I don't know very much python yet but I can wrap my head around the logic of whats going on pretty well. I've been using some examples to try and get it working with my laptops built-in camera but it seems to be tracking the entirety of the image even when I'm outside the first frame. My guess is that the low-resolution (640x480) and frame rate (6 fps) is causing jitter, and the differences these frames from the jitter is what it's attempting to track. From what I've read the gaussianblur is supposed to take care of this- but it's not. The code seems to compile, I can see the multiple types of processing taking place in multiple windows and there is some motion-detection going on but its very inconsistent and I can't troubleshoot whats going wrong.
import cv2,time
first_frame = None
video = cv2.VideoCapture(0)
a = 1
while True:
a = a + 1
check, frame = video.read()
print (frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if first_frame is None:
first_frame = gray
continue
delta_frame = cv2.absdiff(first_frame, gray)
thresh_delta = cv2.threshold(delta_frame, 25, 255, cv2.THRESH_BINARY)[1]
thresh_delta = cv2.dilate(thresh_delta, None, iterations=2)
(_, cnts, _) = cv2.findContours(thresh_delta.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 1000:
continue
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('captureFrame', frame)
cv2.imshow('captureGrey', gray)
cv2.imshow('delta', delta_frame)
cv2.imshow('thresh', thresh_delta)
key = cv2.waitKey(1)
if key == ord('q'):
break
print(a)
video.release()
cv2.destroyAllWindows()
EDIT: it seems to be a hardware problem regarding auto-lighting? Cannot confirm. But buying a cheap Microsoft lifecam VX 2000 seemed to resolved the issue.

How can I make my motion detector in OpenCV less sensitive for light conditions?

I tried to make a motion detector but as I saw, it is very sensitive because of lighting conditions. I would like to detect if there was a change on a dart board (so when somebody throws a dart, detect it).
This is how I tried it:
"""
Vecsei Gabor
"""
import cv2
#Initialize the first frame in the video stream
prevFrame = None
#Area of the detected contour, below this value it's not counted as detected
dontCare = 500
#Capture from webcam
cap = cv2.VideoCapture(0)
#Limit the FPS to 10 (For this task the lower the better)
cap.set(cv2.cv.CV_CAP_PROP_FPS, 15)
#counter for the detection
i = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#Blur for better results
output = cv2.GaussianBlur(frame, (21, 21), 0)
#If the first frame is None, initialize it
if prevFrame is None:
prevFrame = output
continue
#Compute the absolute difference between the current frame and
#First frame
frameDelta = cv2.absdiff(prevFrame, output)
#Convert to gray to detect contours
frameDelta = cv2.cvtColor(frameDelta, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(frameDelta, 21, 255, cv2.THRESH_BINARY)[1]
#Dilate the thresholded image to fill in holes, then find contours
#on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts, hier = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts_sorted = sorted(cnts, key = cv2.contourArea, reverse = True)[:1]
#Loop over the contours
for c in cnts_sorted:
#If the contour is too small, ignore it
if cv2.contourArea(c) < dontCare:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
i+=1
print "Detected something!" + str(i)
print "Area: " + str(cv2.contourArea(c))
prevFrame = output
cv2.imshow('Webcam ',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Basically I just watch for the differences between two frames and if there was a change on the frame I save it as out new image, so we can detect new changes.

Categories

Resources