Real-time face_recognition can't indentify the right face - python

I want to build an attendance system by face_recognition with real-time. Therefore, I created a folder that have 20 pictures of some famous people to test the code. However, the result is bad, since the program can't really identify the right face. What should I do to improve the code such that I can identify the right faces?
I hope to get some advice from you. Thanks a lot.
# -*- coding: utf-8 -*-
#I have installed these package successfully
import face_recognition
import cv2
import datetime
import glob2 as gb
import numpy as np
video_capture = cv2.VideoCapture(0)
#There are 20 pictures in my local folder
img_path=gb.glob(r'F:\liuzhenya\photo\\*.jpg')
#Store the name of picture
known_face_names=[]
#Store the matrix from these pictures
known_face_encodings=[]
for i in img_path:
picture_name=i.replace('F:\liuzhenya\photo\\','')
picture_newname=picture_name.replace('.jpg','')
obama_img = face_recognition.load_image_file(i)
obama_face_encoding = face_recognition.face_encodings(obama_img)[0]
known_face_names.append(picture_newname)
known_face_encodings.append(obama_face_encoding)
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame=small_frame[:,:,::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
match = face_recognition.compare_faces(known_face_encodings, face_encoding)
if True in match:
match_index=match.index(True)
name = "match"
#To print name and time
cute_clock = datetime.datetime.now()
print (known_face_names[match_index]+':'+str(cute_clock))
else:
name = "unknown"
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), 2)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left+6, bottom-6), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()

Related

how to videostream on anvil server

#anvil.server.callable
def track_criminal(track_face_encoding):
face_locations = []
face_encodings = []
face_names = []
video_capture = cv2.VideoCapture(0)
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
for face_encoding in face_encodings:
matches = face_recognition.compare_faces([track_face_encoding], face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(track_face_encoding, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = "Detected"
face_names.append(name)
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (255, 153, 153), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (255, 153, 153), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (53, 255, 153), 1)
cv2.imwrite("image.jpeg",frame)
media_objects=anvil.media.from_file("image.jpeg",'image/jpeg')
yield (media_objects)
#above code is on the jupyter notebook
def track_button_click(self, **event_args):
track_image = anvil.server.call("track_encoding",self.file_loader_1.file)
self.image_2.source= anvil.server.call("track_criminal",track_image)
#above code is on anvil
I am trying to make a face detection app on anvil but I am not able to videostream using my webcamera
error I am receiving is
anvil.server.SerializationError: Cannot serialize return value from function. Cannot serialize <class 'generator'> object at msg['response']
at track, line 22
Can anyone tell me a way to videostream on anvil. I tried using return earlier and using while in the code on anvil to call "self.image_2.source= anvil.server.call("track_criminal",track_image)" this function and load image to anvil till some condition specified but I was facing with 2 problems : firstly Thst were not returning continously it was lagging and it was taking time to shift from one image to other ; Second : after loading 2 3 images there arises an axis error stating "Axiserror: axis 1 is out of bound for array of dimension 1".
Can anyone help me to do face recognition via livestreaming on anvil.

IndexError: list index out of range with face recognition and opencv

import numpy as np
import cv2
import face_recognition as fr
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Could not open webcam")
person1_image = fr.load_image_file("person1.JPG")
person2_image = fr.load_image_file("person2.jpg")
person1_image_encoding = fr.face_encodings(person1_image)
person2_image_encoding = fr.face_encodings(person2_image)
known_face_encodings = [person1_image_encoding, person2_image_encoding]
known_face_names = ["person1", "person2"]
while True:
ret, frame = cap.read()
rgb_frame = frame[:, :, ::-1]
face_locations = fr.face_locations(rgb_frame)
face_encodings = fr.face_encodings(rgb_frame, face_locations)
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
matches = fr.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
face_distances = fr.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
print(best_match_index)
if matches[best_match_index]:
name = known_face_names[best_match_index]
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Webcam_facerecognition', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
This works until it detects a face, at which point it says:
if matches[best_match_index]:
IndexError: list index out of range
I printed out the match index. It seems they are number larger than 2, like 62, 120 or something like that.
Why don't you try instead of best_match_index = np.argmin(face_distances)something like
best_match_index = face_distances.index(min(face_distances))
??

How to execute if else statement in for loop only once in while loop

Good Morning, I am beginner in python language, I would to ask a question regarding on python code. Fyi, currently I am working on face recognition with voice. Currently my problem when i call get_frame() function. The speak.tts("your name"+name,lang) code executed repeatedly and non-stop. My question how i going to execute this only once when i call this function in my app.py and it's will not produce the voice repeatedly. Below I share my code, if you dont understand the code let's me know i will try the best to explain and maybe can add more details code. Hope some one can helps thanks.
app.py
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
camera.py
class VideoCamera:
def __init__(self,app):
self.known_encoding_faces = aface.known_encoding_faces
self.user_id = aface.face_user_keys
self.faces = []
self.test = []
self.video_capture = cv2.VideoCapture(0)
self.face_user_keys = {}
self.name_face()
def get_frame(self):
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
success, frame = self.video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
flag = False
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame,number_of_times_to_upsample=2)
#print(face_locations)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
#print(face_encodings)
if len(face_encodings) > 0:
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)[0]
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(self.known_encoding_faces, face_encodings, tolerance=0.6)
#print(matches)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = self.faces[first_match_index]['name']
face_names.append(name)
#print(face_names)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
#Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
# description = ', '.join(name)
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# tts = gTTS(name, lang='en')
# tts.save('tts.mp3')
# tts = AudioSegment.from_mp3("tts.mp3")
# subprocess.call(["ffplay", "-nodisp", "-autoexit", "tts.mp3"])
if (val == 9):
speak.tts("your name"+name,lang)
break
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
def __del__(self):
self.video_capture.release()
The best way seems to call get_frame() outside the loop.
If you want to call get_frame() only once when calling the gen(camera) function, you should not put the call in the loop, as the loop will repeatedly execute its instructions.
def gen(camera):
frame = camera.get_frame()
while True:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')

small_frame = cv2.resize(frame, (128,128)) cv2.error: OpenCV(3.4.5) error: (-215:Assertion failed) !ssize.empty() in function 'cv::resize'

i'm using python 3.6 and i get this error
Traceback (most recent call last):
File "C:\Users\mchaf\Music\face\facerec_from_webcam_faster.py", line 49, in
small_frame = cv2.resize(frame, (128,128))
cv2.error: OpenCV(3.4.5) C:\projects\opencv-python\opencv\modules\imgproc\src\resize.cpp:3784: error: (-215:Assertion failed) !ssize.empty() in function 'cv::resize'
What should i do to fix this issue?
Here is my line code
from distutils.core import setup
import face_recognition
from cv2 import *
import subprocess
import time
video_capture = cv2.VideoCapture(0)
obama_image = face_recognition.load_image_file("obama.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
biden_image = face_recognition.load_image_file("biden.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
known_face_encodings = [
obama_face_encoding,
biden_face_encoding
]
known_face_names = [
"Barack Obama",
"Joe Biden"
]
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (128,128))
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
time.sleep(5)
imshow("Operator",frame)
video_capture.release()
cv2.destroyAllWindows()
subprocess.call([r'C:\Users\mchaf\Desktop\run.bat'])
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
Try putting the frame processing in this condition
if ret:
cv2.resize...
And delete line with
Video_capture.release()
cv2.destroyAllWindows()
It looks your frame is empty thats why you cant resize
The reason is that you release the capture(kind of disconnection) from your webcam. After releasing you want to read new frame but it is empty
Your code is obviously copied from different places. Even if you correct these your code wont work.
For example:
...
name = "Unknown"
time.sleep(5) # Why sleep?
imshow("Operator",frame) # OpenCV cv2 class is missing
video_capture.release() # Why you release the capture? - main error
cv2.destroyAllWindows() # Why you destroy window if you will want to see result in the next loop
subprocess.call([r'C:\Users\mchaf\Desktop\run.bat'])
...

OpenCV waitKey in python doesn't work on mac

I'm writing code to face recognition in python and i'm using open cv on mac (PyCharm).
I don't understand why:
if cv2.waitKey(1) & 0xFF == ord('q'):
break
doesn't work on mac, but on windows this code work.
In particular q doesn't trigger the if.
I try to change with:
k = cv2.waitKey(0)
if k == 27:
break
Below write my code
def run(self):
video_capture = cv2.VideoCapture(0)
try:
while self.active:
# Grab a single frame of video
ret, frame = video_capture.read()
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_frame = frame[:, :, ::-1]
# Find all the faces and face enqcodings in the frame of video
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
# Loop through each face in this frame of video
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(self.known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = self.known_face_names[first_match_index]
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
if (name != "Unknown"):
self.reco.emit(name)
self.deactivate()
video_capture.release()
cv2.destroyAllWindows()
else:
self.unreco.emit()
self.deactivate()
video_capture.release()
cv2.destroyAllWindows()
# Display the resulting image
# cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
except:
print("error while recognize")

Categories

Resources