import cv2
import sys
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=2.0,
minNeighbors=5,A minSize=(30, 30),
flags = 0
#flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
# Write frame in file
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imwrite('only_face.jpg')
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
The above code is detecting faces from live feed of camera and saving image of only one face. i want to save image of multiple faces that are detected from the feed. It only saves image of one face
See what I have included here:
import cv2
import sys
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
increment = 0 #---Declare a variable that will increment for every image saved
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor=2.0, minNeighbors=5,A minSize=(30, 30), flags = 0
#flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
increment = increment + 1 #--- Initiate the increment variable
if cv2.waitKey(1) & 0xFF == ord('q'):
# Write frame in file
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imwrite('only_face' + str(increment) + '.jpg') #--- Here is where the increment variable is placed. It will be incremented for every face and thus saving every face that gets detected.
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
I have included the increment variable, which will be incremented for every face detected thus saving every image separately.
Related
I'm currently practicing opencv and tried the face recognition code and it's working fine. I'd like to get notified if I'm away from the screen for more than 2 mins. I'm trying to play an audio file when I'm away and stop it when I'm back.
import cv2 as cv
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv.imshow('Video', frame)
# check if the tuple faces is empty
if len(faces) == 0:
start_time = time.time()
while len(faces) == 0:
print('person is away for ',time.time()-start_time)
d_time = time.time()
if d_time-start_time > 120:
pygame.mixer.init()
sound = pygame.mixer.Sound("Recording.mp3")
sound.play(5)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
Last time I checked my code, the window size is normal. But when I run it now, the window became small. Please does anybody know how to get this to normal?
Here's my code:
import cv2 as cv
face_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml')
eyeglasses_cascade = cv.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml')
cap = cv.VideoCapture(0)
while cap.isOpened():
_, frame = cap.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
for (x, y, w, h) in faces:
cv.rectangle(frame, (x,y), (x+w, y+w), (255,0,0), 3)
cv.putText(frame,'Face', (x, y+h+30), cv.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
glasses = eyeglasses_cascade.detectMultiScale(roi_gray)
for (gx, gy, gw, gh) in glasses:
cv.rectangle(roi_color, (gx,gy), (gx+gw, gy+gh), (0,255,0), 2)
cv.imshow("img", frame)
if cv.waitKey(1) & 0xFF == ord('x'):
break
cap.release()
You have to tell OpenCV what size to use with the capture device:
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 1080)
Note, your camera may only support certain resolutions, so it's important to check that.
I restarted my pc, and weirdly enough, it works. Window size is now back to normal
I am trying to detect objects from an mp4 video file using OpenCV in Python. I am able to detect the objects in the video.
I would like to get the timestamp of the position at which the object was detected in the video file and write it to a text file.
Here is my code so far:
import cv2
my_cascade = cv2.CascadeClassifier('toy.xml')
def detect(gray, frame):
toys= my_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in toys:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
#Logic to write time stamp to file goes here
return frame
video_capture = cv2.VideoCapture('home.mp4')
cv2.startWindowThread()
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
I tried to use VideoCapture's method get() using property identifier like this:
video_capture.get(CV_CAP_PROP_POS_MSEC)
but get an error name 'CV_CAP_PROP_POS_MSEC' is not defined
Seems like cv2 does not have this method or property identifier implemented.
Is there any other way in cv2 to implement what I want?
Please help.
SOLVED
This will print timestamp of the position of object everytime it is detected in the video.
Here is the working code:
import cv2
my_cascade = cv2.CascadeClassifier('toy.xml')
def detect(gray, frame):
toys= my_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in toys:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
print("Toy Detected at: "+str(video_capture.get(cv2.CAP_PROP_POS_MSEC)))
return frame
video_capture = cv2.VideoCapture('home.mp4')
cv2.startWindowThread()
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
cv2 does have the property implemented, but the full name needs to be used.
This should work
import cv2
my_cascade = cv2.CascadeClassifier('toy.xml')
def detect(gray, frame):
toys= my_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in toys:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
#Logic to write time stamp to file goes here
return frame
video_capture = cv2.VideoCapture('home.mp4')
cv2.startWindowThread()
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
print (video_capture.get(cv2.CAP_PROP_POS_MSEC))
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
i.e. you want the property of the instance video_capture not the generic class VideoCapture and the property name is prefixed by the class cv2.. The property name is either CAP_PROP_POS_MSECor CV_CAP_PROP_MSEC - depending on the OpenCV version (see Can't get VideoCapture property as the property identifier are not defined ).
I has raspberry pi 3 with raspbian jessie installed. Open CV3 and python 3.0 also installed. I get a python sample code which detect the face. I need write some text on the screen but it's not write on it. I need write text once instead of repeated on top of each faces. Below is the code
import cv2
import sys
from tkinter import *
from tkinter import messagebox
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
video_capture.set(3,500)
video_capture.set(4,300)
video_capture.set(12, 0.1)
frameWidth = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#cv2.putText(frame, 'How are you', (x - 1, y - 1), cv2.FONT_HERSHEY_PLAIN,2,(0, 255, 0))
#if the puttext at here, it will write the text on top of each face detected. But I just need the text appear once.
# Display the resulting frame
cv2.imshow('Video', frame)
if len(faces) > 0:
cv2.putText(img = frame, text = 'How are you', org = (int(frameWidth/2 - 20),int(frameHeight/2)), fontFace = cv2.FONT_HERSHEY_DUPLEX, fontScale = 3,
color = (0, 255, 0))
#print(int(frameWidth/2 - 20),int(frameHeight/2))
#print('Found ' + str(len(faces)) + ' face(s)')
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
You are calling imshow before putText so you'll never see the results of putText. Move the imshow statement to the line before waitKey.
using opencv2, I tried to detect the human faces from webcam using the below code:-
import cv2
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5,
minSize=(30, 30), flags=cv2.cv.CV_HAAR_SCALE_IMAGE)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
I am unable to execute the complete code as i got an error at the below step:-
ret, frame = video_capture.read() //error msg **expected an intended block**
Thanks,
breeze
Indenting is very important in Python, it forms the structure of the code. Your script would need to be indented something like as follows:
import cv2
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30),
flags= cv2.cv.CV_HAAR_SCALE_IMAGE)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
To run this script for face recognition:
python myscript.py C:\opencv\build\etc\haarcascades\haarcascade_frontalface_default.xml