detect moving object with opencv and python - python

i found very interesting article about detection of moving objects, here is correspondng link :Detection of moving object
and also corresponding article : Article about object detection
i followed code and try to implement my self, here is corresponding code :
import cv2
import numpy as np
import matplotlib.pyplot as plt
from Background_Image_Creation import get_background
cap =cv2.VideoCapture("video_1.mp4")
#print(cap.get(cv2.CAP_PROP_FRAME_COUNT))
#print(cap.get(cv2.CAP_PROP_FPS))
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
save_name = "Result.mp4"
# define codec and create VideoWriter object
out = cv2.VideoWriter(save_name,cv2.VideoWriter_fourcc(*'mp4v'), 10, (frame_width, frame_height))
background_frame =get_background("video_1.mp4")
background = cv2.cvtColor(background_frame, cv2.COLOR_BGR2GRAY)
print(background.shape)
frame_count =0
consecutive_frame=8
#frame_diff_list =[]
while cap.isOpened():
ret,frame =cap.read()
print(ret)
print(frame.shape)
if ret==True:
frame_count+=1
orig_frame =frame.copy()
gray =cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
if frame_count % consecutive_frame == 0 or frame_count == 1:
frame_diff_list =[]
frame_diff = cv2.absdiff(gray, background)
ret, thresh = cv2.threshold(frame_diff, 50, 255, cv2.THRESH_BINARY)
dilate_frame = cv2.dilate(thresh, None, iterations=2)
frame_diff_list.append(dilate_frame)
print(frame_diff_list)
if len(frame_diff_list) == consecutive_frame:
# add all the frames in the `frame_diff_list`
sum_frames = sum(frame_diff_list)
print(sum_frames)
# find the contours around the white segmented areas
contours, hierarchy = cv2.findContours(sum_frames, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# draw the contours, not strictly necessary
for i, cnt in enumerate(contours):
cv2.drawContours(frame, contours, i, (0, 0, 255), 3)
for contour in contours:
# continue through the loop if contour area is less than 500...
# ... helps in removing noise detection
if cv2.contourArea(contour) < 500:
continue
# get the xmin, ymin, width, and height coordinates from the contours
(x, y, w, h) = cv2.boundingRect(contour)
# draw the bounding boxes
cv2.rectangle(orig_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('Detected Objects', orig_frame)
out.write(orig_frame)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
code for background frame creation is also presented :
import numpy as np
import cv2
import matplotlib.pyplot as plt
def get_background(path):
cap =cv2.VideoCapture(path)
frame_indices =cap.get(cv2.CAP_PROP_FRAME_COUNT)*np.random.uniform(size=50)
frames =[]
for idx in frame_indices:
cap.set(cv2.CAP_PROP_POS_FRAMES,idx)
ret,frame =cap.read()
frames.append(frame)
median_frame = np.median(frames, axis=0).astype(np.uint8)
return median_frame
#median_frame =get_background("video_1.mp4")
#cv2.imshow("Median_Background",median_frame)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#plt.show()
code runs fine, but output video does not contain anything, it just 1 KGB size, one thing what i am thinking is that this fragment
frame_diff_list.append(dilate_frame)
is colored with yellow color, here is screenshot :
and also when i try to print print(frame_diff_list)
it just printed one output :
i was more surprised when i have tested
print(ret)
print(frame.shape)
from the begining of the loop and it just printed one output :
True
(360, 640, 3)
it seems that loop does not cover all frames right? could you help me please to figure out what is wrong with my code?

Related

Detect speed of moving object with Python and OpenCV

I wrote a code that marks moving objects on the video with rectangle. This is the video that I have used:
https://www.youtube.com/watch?v=PTy2vx8Ejas&t=47s&ab_channel=macman400
The code works very good:
import cv2
import numpy as np
import copy
# upload video
cap = cv2.VideoCapture('test_video.mp4')
#reading two frames
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
# get diference between two frames
diff = cv2.absdiff(frame1, frame2)
# convert diference in gray
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# treshold
_, thresh = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations = 5)
# define contours
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
cnt_area = cv2.contourArea(contour)
if cnt_area < 1000 or cnt_area > 4000:
continue
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame1, (x,y), (x+w, y+h), (0, 0, 255))
print(cnt_area)
#cv2.drawContours(frame1, contours, -1, (0,0,255), 1)
# show frames
cv2.imshow('frame', frame1)
frame1 = frame2
ret, frame2 = cap.read()
if cv2.waitKey(60) == 60:
break
cv2.destroyAllWindows()
cap.release()
Is it possible to get the speed of each rectangle?
Basically, I want to detect only fast / rapid / sudden movements. So I want to set some kind of threshold for speed of each object/rectangle.
By speed I do not mean strictly m/s or km/h, I mean some other metric that can be interpreted as speed. I want to detect only rapid movements, in this case, only persons that are in the fight.

Unable to perform real time video processing in RaspberryPi using Webcam

I wrote a code to detect a white color using a Webcam (Logitech C310 HD Webcam) in RaspberryPi 3B+.
Code will perform the following functions:
Capture the video using cv2.VideoCapture (0)
Grab each frame of video and look for white objects in that frame.
If a white object is present in that frame, code will encircle it and will print White.
Real video and processed video will be shown using cv2.imshow('frame',frame1) and cv2.imshow('res',res1).
Code is shown below:
import cv2
import numpy as np
from time import sleep
cap1 = cv2.VideoCapture(0)
cap1.set(3,640)
cap1.set(4,480)
cap1.set(5,15)
while(1):
_, frame1 = cap1.read()
#hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of white color in HSV
# change it according to your need !
lower_white = np.array([150,150,150], dtype=np.uint8)
upper_white = np.array([255,255,255], dtype=np.uint8)
# Threshold the HSV image to get only white colors
mask1 = cv2.inRange(frame1, lower_white, upper_white)
kernal = np.ones((5, 5), "uint8")
# Tracking the Red Color
(_, contours1, hierarchy) = cv2.findContours(mask1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for pic, contour1 in enumerate(contours1):
area1 = cv2.contourArea(contour1)
#start = time.time()
if (area1 > 1200):
print 'white in cam 1'
#x, y, w, h = cv2.boundingRect(contour)
#img = cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
ellipse1 = cv2.fitEllipse(contour1)
cv2.ellipse(frame1, ellipse1, (0, 255, 0), 2)
#cv2.putText(frame, "RED color", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255))
red = cv2.dilate(mask1, kernal)
# Bitwise-AND mask and original image
res1 = cv2.bitwise_and(frame1,frame1, mask= mask1)
cv2.imshow('frame',frame1)
cv2.imshow('res',res1)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cap1.release()
cv2.destroyAllWindows()
My problem is that a few months back, this code was working fine in real-time. But now when I again tried to run the same code, my video didn't run in real-time but a huge time lag is present. For example, when there is a white object in front of the camera, it will take a few minutes to show it as white. In past, when I ran this code, video frame window appears without any time lag but now frame window appear after a few minutes.
Please explain to me how to solve this problem? Why is it so that in past same code was working but now it's not? Is it a problem of python2 or python3 or RaspberryPi?

How to give numbers to the rectangles that i drew for counting people?

I used mixture of gaussians algorithm than i used tresholding to clear shadows.Then I used contours to find white objects. Then i drew rectangles around them. Now I want to give numbers to these rectangles and i want numbers to track rectangles for counting people.
Here is the code i used if you can give me some idea about counting it would be awesome.
import numpy as np
import cv2
import sys
video_path = 'video.avi'
cv2.ocl.setUseOpenCL(False)
version = cv2.__version__.split('.')[0]
print(version)
cap = cv2.VideoCapture(video_path)
if version == '2' :
fgbg = cv2.BackgroundSubtractorMOG2()
if version == '3':
fgbg = cv2.createBackgroundSubtractorMOG2()
while (cap.isOpened):
ret, frame = cap.read()
if ret==True:
fgmask = fgbg.apply(frame)
ret1,th1 = cv2.threshold (fgmask,150,200,cv2.THRESH_BINARY)
if version == '2' :
(contours, hierarchy) = cv2.findContours(th1.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if version == '3' :
(im2, contours, hierarchy) = cv2.findContours(th1.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if cv2.contourArea(c) < 500:
continue
(x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('foreground and background',th1)
cv2.imshow('rgb',frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()

OpenCV: Drawing boundary over detected motion

I am a newbie to OpenCV and decided to learn it but writing a small program that detects motion and draws a bounding box on the object.
I started with a rather simple method that simply calculated the difference between two frames, finds contours and drew a simple rectangle around the object. This served its purpose for a while but it was incapable of autonomously tracking multiple objects as they popped up. I had to manually change my parameters for it to track n number of objects.
So I decided to change my method and instead use BackgroundSubtractorMOG. This method is more favorable for what I am trying to accomplish but the only issue that I am currently having is how to draw an outline of the detected object(s) after applying BackgroundSubtractorMOG. I don't want rectangles anymore instead I want it to draw around the object's border.
import sys
import cv2
def getImageDifference(first, second):
return cv2.absdiff(first, second)
def drawRectangle(contour, frame):
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
previousFrame = None
backgroundSubtractor = None
camera = cv2.VideoCapture(0)
backgroundSubtractor = cv2.BackgroundSubtractorMOG()
while True:
grabbed, frame = camera.read()
if not grabbed:
break
fgMask = backgroundSubtractor.apply(frame, learningRate = 1.0/10)
output = cv2.GaussianBlur(fgMask, (21, 21), 0)
if previousFrame is None:
previousFrame = fgMask
continue
frameDelta = getImageDifference(previousFrame, output)
maskRGB = cv2.cvtColor(fgMask,cv2.COLOR_GRAY2BGR)
frameDela = maskRGB
# frameDelta = cv2.cvtColor(fgMask, cv2.COLOR_BGR2GRAY)
threshold = cv2.threshold(fgMask, 21, 255, cv2.THRESH_BINARY)[1]
threshold = cv2.dilate(threshold, None, iterations = 2)
contours, hierarchy = cv2.findContours(threshold.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sortedContours = sorted(contours, key = cv2.contourArea, reverse = True)[:2] #this will track two objects simultaneously. If I want more, I'd have to come and change this value to whatever I want
for contour in sortedContours:
drawRectangle(contour, frame)
previousFrame = output
draw = frame & maskRGB
cv2.imshow('Main',frame)
cv2.imshow('Background Subtraction', fgMask)
cv2.imshow('Background Subtraction with color', draw)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()

How can I make my motion detector in OpenCV less sensitive for light conditions?

I tried to make a motion detector but as I saw, it is very sensitive because of lighting conditions. I would like to detect if there was a change on a dart board (so when somebody throws a dart, detect it).
This is how I tried it:
"""
Vecsei Gabor
"""
import cv2
#Initialize the first frame in the video stream
prevFrame = None
#Area of the detected contour, below this value it's not counted as detected
dontCare = 500
#Capture from webcam
cap = cv2.VideoCapture(0)
#Limit the FPS to 10 (For this task the lower the better)
cap.set(cv2.cv.CV_CAP_PROP_FPS, 15)
#counter for the detection
i = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#Blur for better results
output = cv2.GaussianBlur(frame, (21, 21), 0)
#If the first frame is None, initialize it
if prevFrame is None:
prevFrame = output
continue
#Compute the absolute difference between the current frame and
#First frame
frameDelta = cv2.absdiff(prevFrame, output)
#Convert to gray to detect contours
frameDelta = cv2.cvtColor(frameDelta, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(frameDelta, 21, 255, cv2.THRESH_BINARY)[1]
#Dilate the thresholded image to fill in holes, then find contours
#on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts, hier = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts_sorted = sorted(cnts, key = cv2.contourArea, reverse = True)[:1]
#Loop over the contours
for c in cnts_sorted:
#If the contour is too small, ignore it
if cv2.contourArea(c) < dontCare:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
i+=1
print "Detected something!" + str(i)
print "Area: " + str(cv2.contourArea(c))
prevFrame = output
cv2.imshow('Webcam ',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Basically I just watch for the differences between two frames and if there was a change on the frame I save it as out new image, so we can detect new changes.

Categories

Resources