Threshold + Contours (video) - python

Basically, my program will detect moving objects in the video scene by using a method called (thresholding). But, i want to add findcontours feature into the program. I built this code from scratch. Now im stuck at 'findcontours' and 'drawcontours' part.
Problem:
Cant draw contours on detected objects (fixed)
ValueError: too many values to unpack (findcontours)
This is my code:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
history = 150
varThreshold = 18
fgbg = cv2.createBackgroundSubtractorMOG2()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
img_,contours, hierarchy = cv2.findContours(fgmask, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()

Related

How do I add an image overlay to my live video using cv2?

This is my code, I've looked at some tutorials but can't find what I'm looking for
I want to overlay the Frame.png image on my webcam. I tried to add the image directly but it didn't work either. If possible, Is there a way to add an image, not to overlay but to keep the image at a certain coordinate in the live webcam window
import cv2
import numpy as np
def detect_and_save():
alpha = 0.2
beta = 1-alpha
cap = cv2.VideoCapture(0)
sciframe = cv2.imread('Frame.png')
classifier = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
while True:
ret ,frame = cap.read()
overlay = frame.copy()
output = frame.copy()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = classifier.detectMultiScale(gray,1.5,5)
cv2.putText(output, "HUD Test",(175, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 50, 50), 3)
cv2
for face in faces:
x,y,w,h = face
cv2.rectangle(overlay,(x,y),(x+w,y+h),(255,200,0),-1)
cv2.rectangle(overlay,(x,y),(x+w,y+h),(255,0,0),1)
cv2.rectangle(overlay,(x,y-20),(x+w,y),(25,20,0),-1)
cv2.addWeighted(overlay,alpha,output,beta,0,output)
cv2.putText(output,"Human",(x+10,y-10),cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
if not ret:
continue
cv2.imshow("HUD",output)
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('s'):
cv2.imwrite('./images/CID_{}.png'.format(time.strftime('%d%m%y_%H_%M_%S')),output)
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
import time
detect_and_save()
You can directly add one image on top of another one at any coordinate easily in opencv.
cap = cv2.VideoCapture(0)
im_height = 50 #define your top image size here
im_width = 50
im = cv2.resize(cv2.imread("Frame.png"), (im_width, im_height))
while (True):
ret, frame = cap.read()
frame[0:im_width, 0:im_height] = im #for top-left corner, 0:50 and 0:50 for my image; select your region here like 200:250
cv2.imshow("live camera", frame)
if cv2.waitKey(1) == ord("q"):
break
cap.release()
cv2.destroyAllWindows()

Avoid foreground becomes into background on background substractor using OpenCV

I want to make a hand detector using OpenCV. I've created a background substractor using the next code:
import cv2
import numpy as np
global fgMask
camera = cv2.VideoCapture(0)
backSub = cv2.createBackgroundSubtractorMOG2(detectShadows=False)
firstImage = True
crop_width = 300
crop_height = 300
camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1024)
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1024)
while True:
try:
ret, image = camera.read()
image = cv2.flip(image, 1)
roi = image[0:crop_height, 0:crop_width]
if firstImage:
fgMask = backSub.apply(roi)
firstImage = False
else:
fgMask = backSub.apply(roi, None, 0)
cv2.imshow("Original", image)
cv2.imshow("Mask", fgMask)
cv2.imshow("Roi", roi)
k = cv2.waitKey(10)
if k == 27: # press ESC to exit
camera.release()
cv2.destroyAllWindows()
break
except Exception as ex:
print(ex)
I add images during few seconds using apply method in order to the model learns background, and the mask generated by model is black (everything is ok at this point)
when I put my hand, the mask is ok
but after a while the hand begins to disapper
I have read you can set learningRate parameter to 0 to avoid the model trains using new frames, but I get the same result (hand disappers after a while). I've tried different learning parameters but the result is always same.

cv2 issue for face-detection algorithm

I have a face detection program.
I tried to run the code but it's not working.
import cv2
import numpy as np
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainer/trainer.yml")
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
cam = cv2.VideoCapture(0)
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
fontColor = (255, 255, 255)
while True:
ret, im =cam.read()
gray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
faces=faceCascade.detectMultiScale(gray, 1.2,5)
for(x,y,w,h) in faces:
cv2.rectangle(im,(x,y),(x+w,y+h),(225,0,0),2)
Id, conf = recognizer.predict(gray[y:y+h,x:x+w])
if(conf<50):
if(Id==1):
Id="chandra"
elif(Id==2):
Id="vamsi"
else:
Id="Unknown"
cv2.putText(im,str(Id), (x,y+h),fontFace, 255)
cv2.imshow('im',im)
if (cv2.waitKey(10) == ord('q')):
break
cam.release()
cv2.destroyAllWindows()
I got this error:
(-215:Assertion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
I'm using opencv2 and python 3.7
Maybe this will help:
try:
ret, im =cam.read()
except:
continue
Please, use this code below to check if you have any images coming from your camera:
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

OpenCV - Coloring a specific area during video capture?

Here is my code:
import cv2
import numpy as np
cap = cv2.VideoCapture(1)
fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
framesize = (640,480)
out = cv2.VideoWriter('dump.avi',fourcc,60.0,framesize)
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',frame)
cv2.rectangle(gray, (0,0),(640,480),(255,255,255),3)
cv2.putText(gray, "gray", (0,130),font, 5,(255,255,255),2, cv2.LINE_AA)
cv2.imshow('fr',gray)
Here I am trying to color a specific square area on the live image feed
#gray[100:105,110:115] = [255,255,255]
io = gray[37:111,107:194]
Here I am cloning an are into another
gray[200:200,270:283] = io
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
How can I color a specific area? As my attempt is not working.

Creating Bounding Box across an object in a video

Opencv with Python!
I am trying to create bounding boxes across objects in a video. I have already used the background subtraction function. I am using finContour function. Now the code detects the edges of the 'bus' in the video and creates a bounding box, but it also detects the edges of the windows of the bus and creates a bonding box for each of the window. I just need to get a bounding box across the bus only.
import numpy as np
import cv2
cap = cv2.VideoCapture("C:\\Python27\\clip1.avi")
fgbg = cv2.BackgroundSubtractorMOG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
# res,thresh = cv2.threshold(fgmask,127,255,0)
kernel = np.ones((10,10),np.uint8)
dilation = cv2.dilate(fgmask,kernel,iterations = 1)
erosion = cv2.erode(fgmask,kernel,iterations = 1)
contours,hierarchy = cv2.findContours(fgmask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
if (i % 1 == 0):
cnt = contours[i]
x,y,w,h = cv2.boundingRect(cnt)
cv2.drawContours(fgmask ,contours, -1, (255,255,0), 3)
cv2.rectangle(fgmask,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('frame',fgmask)
cv2.imshow("original",frame)
if cv2.waitKey(30) == ord('a'):
break
cap.release()
cv2.destroyAllWindows()
import cv2
import numpy as np
#img.png is the fgmask
img=cv2.imread('img.png')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,th1 = cv2.threshold(gray,25,255,cv2.THRESH_BINARY)
_,contours,hierarchy = cv2.findContours(th1, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('image1',img)
cv2.waitKey(0)
cv2.destoryAllWindows(0)
RESULTS

Categories

Resources