OpenCV: Drawing boundary over detected motion - python

I am a newbie to OpenCV and decided to learn it but writing a small program that detects motion and draws a bounding box on the object.
I started with a rather simple method that simply calculated the difference between two frames, finds contours and drew a simple rectangle around the object. This served its purpose for a while but it was incapable of autonomously tracking multiple objects as they popped up. I had to manually change my parameters for it to track n number of objects.
So I decided to change my method and instead use BackgroundSubtractorMOG. This method is more favorable for what I am trying to accomplish but the only issue that I am currently having is how to draw an outline of the detected object(s) after applying BackgroundSubtractorMOG. I don't want rectangles anymore instead I want it to draw around the object's border.
import sys
import cv2
def getImageDifference(first, second):
return cv2.absdiff(first, second)
def drawRectangle(contour, frame):
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
previousFrame = None
backgroundSubtractor = None
camera = cv2.VideoCapture(0)
backgroundSubtractor = cv2.BackgroundSubtractorMOG()
while True:
grabbed, frame = camera.read()
if not grabbed:
break
fgMask = backgroundSubtractor.apply(frame, learningRate = 1.0/10)
output = cv2.GaussianBlur(fgMask, (21, 21), 0)
if previousFrame is None:
previousFrame = fgMask
continue
frameDelta = getImageDifference(previousFrame, output)
maskRGB = cv2.cvtColor(fgMask,cv2.COLOR_GRAY2BGR)
frameDela = maskRGB
# frameDelta = cv2.cvtColor(fgMask, cv2.COLOR_BGR2GRAY)
threshold = cv2.threshold(fgMask, 21, 255, cv2.THRESH_BINARY)[1]
threshold = cv2.dilate(threshold, None, iterations = 2)
contours, hierarchy = cv2.findContours(threshold.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sortedContours = sorted(contours, key = cv2.contourArea, reverse = True)[:2] #this will track two objects simultaneously. If I want more, I'd have to come and change this value to whatever I want
for contour in sortedContours:
drawRectangle(contour, frame)
previousFrame = output
draw = frame & maskRGB
cv2.imshow('Main',frame)
cv2.imshow('Background Subtraction', fgMask)
cv2.imshow('Background Subtraction with color', draw)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()

Related

detect moving object with opencv and python

i found very interesting article about detection of moving objects, here is correspondng link :Detection of moving object
and also corresponding article : Article about object detection
i followed code and try to implement my self, here is corresponding code :
import cv2
import numpy as np
import matplotlib.pyplot as plt
from Background_Image_Creation import get_background
cap =cv2.VideoCapture("video_1.mp4")
#print(cap.get(cv2.CAP_PROP_FRAME_COUNT))
#print(cap.get(cv2.CAP_PROP_FPS))
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
save_name = "Result.mp4"
# define codec and create VideoWriter object
out = cv2.VideoWriter(save_name,cv2.VideoWriter_fourcc(*'mp4v'), 10, (frame_width, frame_height))
background_frame =get_background("video_1.mp4")
background = cv2.cvtColor(background_frame, cv2.COLOR_BGR2GRAY)
print(background.shape)
frame_count =0
consecutive_frame=8
#frame_diff_list =[]
while cap.isOpened():
ret,frame =cap.read()
print(ret)
print(frame.shape)
if ret==True:
frame_count+=1
orig_frame =frame.copy()
gray =cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
if frame_count % consecutive_frame == 0 or frame_count == 1:
frame_diff_list =[]
frame_diff = cv2.absdiff(gray, background)
ret, thresh = cv2.threshold(frame_diff, 50, 255, cv2.THRESH_BINARY)
dilate_frame = cv2.dilate(thresh, None, iterations=2)
frame_diff_list.append(dilate_frame)
print(frame_diff_list)
if len(frame_diff_list) == consecutive_frame:
# add all the frames in the `frame_diff_list`
sum_frames = sum(frame_diff_list)
print(sum_frames)
# find the contours around the white segmented areas
contours, hierarchy = cv2.findContours(sum_frames, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# draw the contours, not strictly necessary
for i, cnt in enumerate(contours):
cv2.drawContours(frame, contours, i, (0, 0, 255), 3)
for contour in contours:
# continue through the loop if contour area is less than 500...
# ... helps in removing noise detection
if cv2.contourArea(contour) < 500:
continue
# get the xmin, ymin, width, and height coordinates from the contours
(x, y, w, h) = cv2.boundingRect(contour)
# draw the bounding boxes
cv2.rectangle(orig_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('Detected Objects', orig_frame)
out.write(orig_frame)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
code for background frame creation is also presented :
import numpy as np
import cv2
import matplotlib.pyplot as plt
def get_background(path):
cap =cv2.VideoCapture(path)
frame_indices =cap.get(cv2.CAP_PROP_FRAME_COUNT)*np.random.uniform(size=50)
frames =[]
for idx in frame_indices:
cap.set(cv2.CAP_PROP_POS_FRAMES,idx)
ret,frame =cap.read()
frames.append(frame)
median_frame = np.median(frames, axis=0).astype(np.uint8)
return median_frame
#median_frame =get_background("video_1.mp4")
#cv2.imshow("Median_Background",median_frame)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#plt.show()
code runs fine, but output video does not contain anything, it just 1 KGB size, one thing what i am thinking is that this fragment
frame_diff_list.append(dilate_frame)
is colored with yellow color, here is screenshot :
and also when i try to print print(frame_diff_list)
it just printed one output :
i was more surprised when i have tested
print(ret)
print(frame.shape)
from the begining of the loop and it just printed one output :
True
(360, 640, 3)
it seems that loop does not cover all frames right? could you help me please to figure out what is wrong with my code?

Eliminate or Ignore all small or overlapping contours or rectangles inside a big contours/rectangle opencv

I want to ignore all rectangles or contours that are overlapping or inside a big rectangle, I found many solutions but no one work in my case.
import numpy as np
import cv2
import imutils
cap = cv2.VideoCapture('rtsp://admin:admin#192.168.1.72')
#read the first frame from camera for our background
_,first_frame = cap.read()
#We’ll also convert the image to grayscale since color has no bearing on our motion detection
first_gray = cv2.cvtColor(first_frame, cv2.COLOR_BGR2GRAY)
#Due to tiny variations in the digital camera sensors, no two frames will be 100% same, to account for this and apply Gaussian smoothing
first_gray = cv2.GaussianBlur(first_gray, (21, 21), 0)
open('/tmp/test.txt', 'w').close()
while(1):
_, frame = cap.read()
#We’ll also convert the image to grayscale since color has no bearing on our motion detection
gray_frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#Due to tiny variations in the digital camera sensors, no two frames will be 100% same, to account for this and apply Gaussian smoothing
blurFrame = cv2.GaussianBlur(gray_frame, (21, 21), 0)
#Computing the difference between two frames is a simple subtraction
diff = cv2.absdiff(first_gray, blurFrame)
_,thresh = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)
# dilate the thresholded image to fill in holes
thresh = cv2.dilate(thresh, None, iterations=2)
#find contours on thresholded image
contours,_ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
pixelList = \[\]
for contour in contours:
if( cv2.contourArea(contour) > 100):
(x, y, w, h) = cv2.boundingRect(contour)
pixelList.append(list((x, y, w, h)))
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
if len(pixelList) !=0:
with open("/tmp/test.txt", "a") as myfile:
myfile.write(str(pixelList)+'\n')
orgFrame = cv2.resize(frame, (600, 600))
diffFrame = cv2.resize(diff, (300, 300))
cv2.imshow('diffFrameBlur',diff)
cv2.imshow('frameBlur',frame)
k = cv2.waitKey(1) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Please look at the image attached in here you will find that lots of contours detected inside a big contour, I really want to eliminate these all contours(small) that is inside a big contour or even you can say rectangle that, I draw after calculating area.
Compare the top left point and bottom right point of each rectangle, contained in another rectangle, then eliminate them.
Use this function below to check if a point is inside the rectangle.
def rectContains(rect,pt):
in = rect[0] < pt[0] < rect[0]+rect[2] and rect[1] < pt[1] < rect[1]+rect[3]
return in
Call this function only for top left and bottom right for each rectangle, and if its contained inside another rectangle, eliminate them.
If you are intending to make it faster, the reduce the number of comparisons.
For all the contours detected, sort them in the order of size,
cntsSorted = sorted(cnts, key=lambda x: cv2.contourArea(x))
From the sorted contours and start from smallest, and compare it with rectangle which are largest. basically first element with the last element and so on

Motion Tracker is detecting entirety of screen

I'm trying to get basic motion tracking working to be later used in an raspberrypi/arduino project. I don't know very much python yet but I can wrap my head around the logic of whats going on pretty well. I've been using some examples to try and get it working with my laptops built-in camera but it seems to be tracking the entirety of the image even when I'm outside the first frame. My guess is that the low-resolution (640x480) and frame rate (6 fps) is causing jitter, and the differences these frames from the jitter is what it's attempting to track. From what I've read the gaussianblur is supposed to take care of this- but it's not. The code seems to compile, I can see the multiple types of processing taking place in multiple windows and there is some motion-detection going on but its very inconsistent and I can't troubleshoot whats going wrong.
import cv2,time
first_frame = None
video = cv2.VideoCapture(0)
a = 1
while True:
a = a + 1
check, frame = video.read()
print (frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if first_frame is None:
first_frame = gray
continue
delta_frame = cv2.absdiff(first_frame, gray)
thresh_delta = cv2.threshold(delta_frame, 25, 255, cv2.THRESH_BINARY)[1]
thresh_delta = cv2.dilate(thresh_delta, None, iterations=2)
(_, cnts, _) = cv2.findContours(thresh_delta.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 1000:
continue
(x, y, w, h) = cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('captureFrame', frame)
cv2.imshow('captureGrey', gray)
cv2.imshow('delta', delta_frame)
cv2.imshow('thresh', thresh_delta)
key = cv2.waitKey(1)
if key == ord('q'):
break
print(a)
video.release()
cv2.destroyAllWindows()
EDIT: it seems to be a hardware problem regarding auto-lighting? Cannot confirm. But buying a cheap Microsoft lifecam VX 2000 seemed to resolved the issue.

How can I make my motion detector in OpenCV less sensitive for light conditions?

I tried to make a motion detector but as I saw, it is very sensitive because of lighting conditions. I would like to detect if there was a change on a dart board (so when somebody throws a dart, detect it).
This is how I tried it:
"""
Vecsei Gabor
"""
import cv2
#Initialize the first frame in the video stream
prevFrame = None
#Area of the detected contour, below this value it's not counted as detected
dontCare = 500
#Capture from webcam
cap = cv2.VideoCapture(0)
#Limit the FPS to 10 (For this task the lower the better)
cap.set(cv2.cv.CV_CAP_PROP_FPS, 15)
#counter for the detection
i = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
#Blur for better results
output = cv2.GaussianBlur(frame, (21, 21), 0)
#If the first frame is None, initialize it
if prevFrame is None:
prevFrame = output
continue
#Compute the absolute difference between the current frame and
#First frame
frameDelta = cv2.absdiff(prevFrame, output)
#Convert to gray to detect contours
frameDelta = cv2.cvtColor(frameDelta, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(frameDelta, 21, 255, cv2.THRESH_BINARY)[1]
#Dilate the thresholded image to fill in holes, then find contours
#on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts, hier = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts_sorted = sorted(cnts, key = cv2.contourArea, reverse = True)[:1]
#Loop over the contours
for c in cnts_sorted:
#If the contour is too small, ignore it
if cv2.contourArea(c) < dontCare:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
i+=1
print "Detected something!" + str(i)
print "Area: " + str(cv2.contourArea(c))
prevFrame = output
cv2.imshow('Webcam ',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Basically I just watch for the differences between two frames and if there was a change on the frame I save it as out new image, so we can detect new changes.

object left but it shows a stable initial footprint along with the moving object tracker

I have write down a code that can detect a moving object on a stable background and return a dilated binary spot that can be used to track position in term of x,y coordinates using "cv2.findContours" method in real-time. My problem is that when i run this code it shows two spot one is stable spot which shows the exact initial position of object while one spot continuously moving and showing the current position in real-time. now i just want to show the real-time position rather the the stable spot
import scipy.misc
import cv2
import time
cam = cv2.VideoCapture("VID_20150401_191129.3gp")
r, f1 = cam.read()
f1 = scipy.misc.imresize(f1, 0.4)
while(1):
r2, f2 = cam.read()
f2 = scipy.misc.imresize(f2, 0.4)
frameDelta = cv2.absdiff(f2,f1)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=4)
cv2.imshow('im',thresh)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
In this code you are doing is that setting initial frame (reading image here r, f1 = cam.read() f1 is frame) as a background-frame and the reading frame as current-frame. You are subtracting first frame with the rest of frames. To get the motion objects you can use an another function called backgroundUpdate.
like this
def backgroundUpdate():
backgroundFrame = np.uint8((0.1* currentFrame) + ((0.9) * previousFrame))
Here current frame is the reading frame and previous frame was last read.
So here your code can be change like this
cam = cv2.VideoCapture("VID_20150401_191129.3gp")
while(1):
r, currentFrame = cam.read()
currentFrame = scipy.misc.imresize(f2, 0.4)
previousFrame = currentFrame
if backgroundFrame is None:
previousFrame = currentFrame
backgroundUpdate()
else:
backgroundUpdate()
previousFrame = currentFrame
frameDelta = cv2.absdiff(backgroundFrame, currentFrame)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=4)
cv2.imshow('im',thresh)
k = cv2.waitKey(30) & 0xff
if k == 27:
break`
The backgroundUpdate function updates the backgroundFrame through out the capture. This will give an good result and and small motions also neglecting. Make sure that both functions can access the variables. For that you can use global.
And for more optimal solution after capture you can use gray conversion and blur.Here is the code for that.
In [1]: currentFrame = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
In [2]: currentFrame = cv2.GaussianBlur(currentFrame, (25, 25), 0)

Categories

Resources