using opencv2, I tried to detect the human faces from webcam using the below code:-
import cv2
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5,
minSize=(30, 30), flags=cv2.cv.CV_HAAR_SCALE_IMAGE)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
I am unable to execute the complete code as i got an error at the below step:-
ret, frame = video_capture.read() //error msg **expected an intended block**
Thanks,
breeze
Indenting is very important in Python, it forms the structure of the code. Your script would need to be indented something like as follows:
import cv2
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30),
flags= cv2.cv.CV_HAAR_SCALE_IMAGE)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
To run this script for face recognition:
python myscript.py C:\opencv\build\etc\haarcascades\haarcascade_frontalface_default.xml
Related
I'm currently practicing opencv and tried the face recognition code and it's working fine. I'd like to get notified if I'm away from the screen for more than 2 mins. I'm trying to play an audio file when I'm away and stop it when I'm back.
import cv2 as cv
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv.imshow('Video', frame)
# check if the tuple faces is empty
if len(faces) == 0:
start_time = time.time()
while len(faces) == 0:
print('person is away for ',time.time()-start_time)
d_time = time.time()
if d_time-start_time > 120:
pygame.mixer.init()
sound = pygame.mixer.Sound("Recording.mp3")
sound.play(5)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
I am trying to detect objects from an mp4 video file using OpenCV in Python. I am able to detect the objects in the video.
I would like to get the timestamp of the position at which the object was detected in the video file and write it to a text file.
Here is my code so far:
import cv2
my_cascade = cv2.CascadeClassifier('toy.xml')
def detect(gray, frame):
toys= my_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in toys:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
#Logic to write time stamp to file goes here
return frame
video_capture = cv2.VideoCapture('home.mp4')
cv2.startWindowThread()
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
I tried to use VideoCapture's method get() using property identifier like this:
video_capture.get(CV_CAP_PROP_POS_MSEC)
but get an error name 'CV_CAP_PROP_POS_MSEC' is not defined
Seems like cv2 does not have this method or property identifier implemented.
Is there any other way in cv2 to implement what I want?
Please help.
SOLVED
This will print timestamp of the position of object everytime it is detected in the video.
Here is the working code:
import cv2
my_cascade = cv2.CascadeClassifier('toy.xml')
def detect(gray, frame):
toys= my_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in toys:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
print("Toy Detected at: "+str(video_capture.get(cv2.CAP_PROP_POS_MSEC)))
return frame
video_capture = cv2.VideoCapture('home.mp4')
cv2.startWindowThread()
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
cv2 does have the property implemented, but the full name needs to be used.
This should work
import cv2
my_cascade = cv2.CascadeClassifier('toy.xml')
def detect(gray, frame):
toys= my_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in toys:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
#Logic to write time stamp to file goes here
return frame
video_capture = cv2.VideoCapture('home.mp4')
cv2.startWindowThread()
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
print (video_capture.get(cv2.CAP_PROP_POS_MSEC))
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
i.e. you want the property of the instance video_capture not the generic class VideoCapture and the property name is prefixed by the class cv2.. The property name is either CAP_PROP_POS_MSECor CV_CAP_PROP_MSEC - depending on the OpenCV version (see Can't get VideoCapture property as the property identifier are not defined ).
import cv2
import sys
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=2.0,
minNeighbors=5,A minSize=(30, 30),
flags = 0
#flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
# Write frame in file
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imwrite('only_face.jpg')
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
The above code is detecting faces from live feed of camera and saving image of only one face. i want to save image of multiple faces that are detected from the feed. It only saves image of one face
See what I have included here:
import cv2
import sys
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
increment = 0 #---Declare a variable that will increment for every image saved
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor=2.0, minNeighbors=5,A minSize=(30, 30), flags = 0
#flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
increment = increment + 1 #--- Initiate the increment variable
if cv2.waitKey(1) & 0xFF == ord('q'):
# Write frame in file
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imwrite('only_face' + str(increment) + '.jpg') #--- Here is where the increment variable is placed. It will be incremented for every face and thus saving every face that gets detected.
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
I have included the increment variable, which will be incremented for every face detected thus saving every image separately.
I need to understand cv2.waitkey() in python with cv2
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=8,
minSize=(40, 40),
#flags=cv2.cv.CV_HAAR_SCALE_IMAGE
flags = 0
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(gray, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if cv2.waitKey(1) & 0xFF == ord('b'):
cv2.imwrite('example.png',gray)
cv2.waitKey() when I press b it doesn't save picture but press q works . Please help!
You are calling waitKey() twice. With your code, press any key but q then press b and it will save the image.
Only call waitKey(1) once, and save the result in a variable, then test that variable, e.g.:
keypress = cv2.waitKey(1)
if keypress & 0xFF == ord('q'):
break
if keypress & 0xFF == ord('b'):
cv2.imwrite('example.png',gray)
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
frame,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1)==ord('e'):
break
code gives this error, please help me
Traceback (most recent call last): File
"C:\Users\yavuz\Desktop\sor\sor2.pyw", line 15, in
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) error: ........\opencv\modules\imgproc\src\color.cpp:3737: error: (-215)
scn == 3 || scn == 4 in function cv::cvtColor
You need to check if "ret == True' before going on with cvtColor. The code should look like this:
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
while True:
ret, frame = video_capture.read()
if ret == True:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
frame,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1)==ord('e'):
break