I has raspberry pi 3 with raspbian jessie installed. Open CV3 and python 3.0 also installed. I get a python sample code which detect the face. I need write some text on the screen but it's not write on it. I need write text once instead of repeated on top of each faces. Below is the code
import cv2
import sys
from tkinter import *
from tkinter import messagebox
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
video_capture.set(3,500)
video_capture.set(4,300)
video_capture.set(12, 0.1)
frameWidth = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#cv2.putText(frame, 'How are you', (x - 1, y - 1), cv2.FONT_HERSHEY_PLAIN,2,(0, 255, 0))
#if the puttext at here, it will write the text on top of each face detected. But I just need the text appear once.
# Display the resulting frame
cv2.imshow('Video', frame)
if len(faces) > 0:
cv2.putText(img = frame, text = 'How are you', org = (int(frameWidth/2 - 20),int(frameHeight/2)), fontFace = cv2.FONT_HERSHEY_DUPLEX, fontScale = 3,
color = (0, 255, 0))
#print(int(frameWidth/2 - 20),int(frameHeight/2))
#print('Found ' + str(len(faces)) + ' face(s)')
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
You are calling imshow before putText so you'll never see the results of putText. Move the imshow statement to the line before waitKey.
Related
I'm currently practicing opencv and tried the face recognition code and it's working fine. I'd like to get notified if I'm away from the screen for more than 2 mins. I'm trying to play an audio file when I'm away and stop it when I'm back.
import cv2 as cv
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv.imshow('Video', frame)
# check if the tuple faces is empty
if len(faces) == 0:
start_time = time.time()
while len(faces) == 0:
print('person is away for ',time.time()-start_time)
d_time = time.time()
if d_time-start_time > 120:
pygame.mixer.init()
sound = pygame.mixer.Sound("Recording.mp3")
sound.play(5)
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
Last time I checked my code, the window size is normal. But when I run it now, the window became small. Please does anybody know how to get this to normal?
Here's my code:
import cv2 as cv
face_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml')
eyeglasses_cascade = cv.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml')
cap = cv.VideoCapture(0)
while cap.isOpened():
_, frame = cap.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
for (x, y, w, h) in faces:
cv.rectangle(frame, (x,y), (x+w, y+w), (255,0,0), 3)
cv.putText(frame,'Face', (x, y+h+30), cv.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
glasses = eyeglasses_cascade.detectMultiScale(roi_gray)
for (gx, gy, gw, gh) in glasses:
cv.rectangle(roi_color, (gx,gy), (gx+gw, gy+gh), (0,255,0), 2)
cv.imshow("img", frame)
if cv.waitKey(1) & 0xFF == ord('x'):
break
cap.release()
You have to tell OpenCV what size to use with the capture device:
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 1080)
Note, your camera may only support certain resolutions, so it's important to check that.
I restarted my pc, and weirdly enough, it works. Window size is now back to normal
I ran the following code with python. But there is no video display screen. So the camera light is on, but the video screen is not visible.
Operating system: Windows 10 x64
python: 3.9.1
source
https://github.com/GangYuanFan/Closed-Eye-Detection-with-opencv/blob/master/cv_close_eye_detect.py
import cv2
eye_cascPath = 'haarcascade_eye_tree_eyeglasses.xml'
face_cascPath = 'haarcascade_frontalface_alt.xml'
faceCascade = cv2.CascadeClassifier(face_cascPath)
eyeCascade = cv2.CascadeClassifier(eye_cascPath)
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
while 1:
ret, img = cap.read()
if ret:
frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
frame,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
# flags = cv2.CV_HAAR_SCALE_IMAGE
)
#print("Found {0} faces!".format(len(faces)))
if len(faces) > 0:
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
frame_tmp = img[faces[0][1]:faces[0][1] + faces[0][3], faces[0][0]:faces[0][0] + faces[0][2]:1, :]
frame = frame[faces[0][1]:faces[0][1] + faces[0][3], faces[0][0]:faces[0][0] + faces[0][2]:1]
eyes = eyeCascade.detectMultiScale(
frame,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
# flags = cv2.CV_HAAR_SCALE_IMAGE
)
if len(eyes) == 0:
print('no eyes!!!')
else:
print('eyes!!!')
frame_tmp = cv2.resize(frame_tmp, (400, 400), interpolation=cv2.INTER_LINEAR)
cv2.imshow('Face Recognition', frame_tmp)
waitkey = cv2.waitKey(1)
if waitkey == ord('q') or waitkey == ord('Q'):
cv2.destroyAllWindows()
break
It looks like your only cv2.imshow() is inside the the if len(faces) > 0: condition, try placing it in the if ret: condition, you should atleast see the cv2 camera window pop up then.
I am working on a face detection use case and want to print a message when the face is not detected in the frame, and draw the bounding box when the face is detected. I am using open-cv for the same
Here is my code so far, please let me know what changes need to be made.
import cv2
cascPath = 'haarcascade_frontalface_dataset.xml'
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while True :
_, frame = video_capture.read(0)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow("face detection", frame)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
if w == 0:
cv2.putText(frame,'No face',(0,130), font, 1, (200,255,155))
else:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Display the resulting frame in browser
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
I figured out the solution, go through it if you want to have a look , just add this if statement before for loop, it writes no face in the window if no face is detected
if len(faces) == 0:
cv2.putText(frame,'No face',(0,130), 4,1, (200,255,155))
This guide in geeksforgeeks can help you. Also you can follow link on tproger, so I recommend to use google transltator.
https://www.geeksforgeeks.org/python-opencv-cv2-puttext-method/
https://tproger.ru/translations/opencv-python-guide/#text
import cv2
import sys
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
img_counter = 0
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
k = cv2.waitKey(1)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.5,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('FaceDetection', frame)
if k%256 == 27: #ESC Pressed
break
elif k%256 == 32:
# SPACE pressed
img_name = "facedetect_webcam_{}.png".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_name))
img_counter += 1
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
after running the code in python the image of my webcam turn into this
Your code works for me without any problems.
The output image looks like there is a problem with the data type of the input image.
Something similar was discussed here:
https://answers.opencv.org/question/174027/c-ycbcr422-to-rgb-convert-from-raw-data-file/
Can you please also post your input frame grayscale image?
What camera are you using?
try add this code after video_capture.read() and post output.
print(frame.dtype)
print(frame.shape)
Mine is: dtype - uint8, shape - (480, 640, 3).
You can have a problem with the number of channels (for example RGBA) and rectangle function.
These are just my ideas of what could be wrong. But for me, your code works fine.
br Jozef