#anvil.server.callable
def track_criminal(track_face_encoding):
face_locations = []
face_encodings = []
face_names = []
video_capture = cv2.VideoCapture(0)
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
for face_encoding in face_encodings:
matches = face_recognition.compare_faces([track_face_encoding], face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(track_face_encoding, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = "Detected"
face_names.append(name)
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (255, 153, 153), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (255, 153, 153), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (53, 255, 153), 1)
cv2.imwrite("image.jpeg",frame)
media_objects=anvil.media.from_file("image.jpeg",'image/jpeg')
yield (media_objects)
#above code is on the jupyter notebook
def track_button_click(self, **event_args):
track_image = anvil.server.call("track_encoding",self.file_loader_1.file)
self.image_2.source= anvil.server.call("track_criminal",track_image)
#above code is on anvil
I am trying to make a face detection app on anvil but I am not able to videostream using my webcamera
error I am receiving is
anvil.server.SerializationError: Cannot serialize return value from function. Cannot serialize <class 'generator'> object at msg['response']
at track, line 22
Can anyone tell me a way to videostream on anvil. I tried using return earlier and using while in the code on anvil to call "self.image_2.source= anvil.server.call("track_criminal",track_image)" this function and load image to anvil till some condition specified but I was facing with 2 problems : firstly Thst were not returning continously it was lagging and it was taking time to shift from one image to other ; Second : after loading 2 3 images there arises an axis error stating "Axiserror: axis 1 is out of bound for array of dimension 1".
Can anyone help me to do face recognition via livestreaming on anvil.
Related
Traceback (most recent call last):
File "c:\Users\user\Desktop\face_recognition\face_recog.py", line 103, in <module>
frame = face_recog.get_frame()
File "c:\Users\user\Desktop\face_recognition\face_recog.py", line 41, in get_frame
frame = self.capture.get_frame(self)
AttributeError: 'cv2.VideoCapture' object has no attribute 'get_frame'def get_frame(self):
# Grab a single frame of video
frame = self.capture.get_frame()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if self.process_this_frame:
# Find all the faces and face encodings in the current frame of video
self.face_locations = face_recognition.face_locations(rgb_small_frame)
self.face_encodings = face_recognition.face_encodings(rgb_small_frame, self.face_locations)
self.face_names = []
for face_encoding in self.face_encodings:
# See if the face is a match for the known face(s)
distances = face_recognition.face_distance(self.known_face_encodings, face_encoding)
min_value = min(distances)
# tolerance: How much distance between faces to consider it a match. Lower is more strict.
# 0.6 is typical best performance.
name = "Unknown"
if min_value < 0.6:
index = np.argmin(distances)
name = self.known_face_names[index]
self.face_names.append(name)
self.process_this_frame = not self.process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(self.face_locations, self.face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
return frame
def get_jpg_bytes(self):
frame = self.get_frame()
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
ret, jpg = cv2.imencode('.jpg', frame)
return jpg.tobytes()
# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
print('finish')
I'm working on a project that involves object detection + sort tracking.
I have scripts to work with videos an camera using OpenCV in the Coral dev board.
The main issue is when make usage of the VideoWriter to save the output of the detections.
For camera script it's usage decreases fps rate from 11 to 2.3 and for video script from 6-7 to 2.
Is there a way to solve/optimize this issue.
Here is my portion of code that grabs frames, detects and tracks, and then writes.
# Read frames
while(video.isOpened()):
# Acquire frame and resize to expected shape [1xHxWx3]
ret, frame = video.read()
if not ret:
break
# Debug info
frame_count += 1
print("[INFO] Processing frame: {}".format(frame_count))
if FLIP:
frame = cv2.flip(frame, 1)
if ROTATE != 0:
frame = cv2.rotate(frame, ROTATE) # Rotate image on given angle
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert to RGB
frame = cv2.resize(frame, (VIDEO_WIDTH, VIDEO_HEIGHT)) # resize frame to output dims
frame_resized = cv2.resize(frame_rgb, (width, height)) # resize to fit tf model dims
input_data = np.expand_dims(frame_resized, axis=0)
# Normalize pixel values if using a floating model (i.e. if model is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Initialize writer
if (writer is None) and (SAVE_VIDEO) :
writer = cv2.VideoWriter(VIDEO_OUTPUT, cv2.VideoWriter_fourcc(*'XVID'), args.fps, (VIDEO_WIDTH, VIDEO_HEIGHT))
# Perform the actual detection by running the model with the image as input
#s_detection_time = time.time()
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
#e_detection_time = time.time()
#print("[INFO] Detection time took: {} seconds".format(e_detection_time-s_detection_time))
# Retrieve detection results
boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
#num = interpreter.get_tensor(output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
#print("[INFO] Boxes: {}".format(boxes))
detections = np.array([[]])
#s_detections_loop = time.time()
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
#print("[INFO] Box ", i , ": ", boxes[i])
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * VIDEO_HEIGHT)))
xmin = int(max(1,(boxes[i][1] * VIDEO_WIDTH)))
ymax = int(min(VIDEO_HEIGHT,(boxes[i][2] * VIDEO_HEIGHT)))
xmax = int(min(VIDEO_WIDTH,(boxes[i][3] * VIDEO_WIDTH)))
# Calculate centroid of bounding box
#centroid_x = int((xmin + xmax) / 2)
#centroid_y = int((ymin + ymax) / 2)
# Format detection for sort and append to current detections
detection = np.array([[xmin, ymin, xmax, ymax]])
#f.write("Box {}: {}\n".format(i, detection[:4]))
#print("[INFO] Size of detections: ", detections.size)
if detections.size == 0:
detections = detection
else:
detections = np.append(detections, detection, axis=0)
# Draw a circle indicating centroid
#print("[INFO] Centroid of box ", i, ": ", (centroid_x, centroid_y))
#cv2.circle(frame, (centroid_x, centroid_y), 6, (0, 0, 204), -1)
# Calculate area of rectangle
#obj_height = (ymin + ymax)
#print("[INFO] Object height: ", obj_height)
# Check if centroid passes ROI
# Draw the bounding box
#cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (0, 0, 255), 4)
#print("[INFO] Object passing ROI")
#print("[INFO] Object height: ", obj_height)
#counter += 1
#print("[INFO] Object out of ROI")
# Draw the bounding box
#cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 4)
#print("[INFO] Total objects counted: ", counter)
# Draw label
"""object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
"""
#f.write("\n")
#e_detection_loop = time.time()
#print("[INFO] Detection loop time took {} seconds".format(e_detection_loop-s_detections_loop))
#s_tracker_update = time.time()
# Update sort tracker
print("[INFO] Current Detections: ", detections.astype(int))
objects_tracked = tracker.update(detections.astype(int))
#e_tracker_update = time.time()
#print("[INFO] Updating trackers state took {} seconds".format(e_tracker_update-s_tracker_update))
#s_draw_tracked = time.time()
# Process every tracked object
for object_tracked in objects_tracked:
if object_tracked.active:
bbox_color = (0, 128, 255)
else:
bbox_color = (10, 255, 0)
bbox = object_tracked.get_state().astype(int)
# Draw the bbox rectangle
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), bbox_color, 4)
# Calculate centroid of bounding box
centroid = (object_tracked.last_centroid[0], object_tracked.last_centroid[1])
# Draw the centroid
cv2.circle(frame, centroid, 6, (0, 0, 204), -1)
label = '{} [{}]'.format(OBJECT_NAME,object_tracked.id) # Example: 'object [1]'
labelSize, baseLine = cv2.getTextSize(label, FONT, 0.7, 2) # Get font size
label_ymin = max(bbox[1], labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (bbox[0], label_ymin-labelSize[1]-10), (bbox[0]+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (bbox[0], label_ymin-7), FONT, 0.7, (0, 0, 0), 2) # Draw label text
#e_draw_tracked = time.time()
#print("[INFO] Drawing tracked objects took {} seconds".format(e_draw_tracked-s_draw_tracked))
# Update fps count
fps.update()
fps.stop()
# Prepare fps display
fps_label = "FPS: {0:.2f}".format(fps.fps())
cv2.rectangle(frame, (0, 0), (int(VIDEO_WIDTH*0.6), int(VIDEO_HEIGHT*0.07)), (255, 255, 255), cv2.FILLED)
cv2.putText(frame, fps_label, (int(VIDEO_WIDTH*0.01), int(VIDEO_HEIGHT*0.05)), FONT, 1.5, (10, 255, 0), 3)
# Prepare total and active objects count display
total_objects_text = "TOTAL {}S: {}".format(OBJECT_NAME,tracker.total_trackers)
active_objects_text = "ACTIVE {}S: {}".format(OBJECT_NAME,tracker.active_trackers)
cv2.putText(frame, total_objects_text, (int(VIDEO_WIDTH*0.1+VIDEO_WIDTH*0.06), int(VIDEO_HEIGHT*0.05)), FONT, 1.5, (0, 0, 255), 3) # Draw label text
cv2.putText(frame, active_objects_text, (int(VIDEO_WIDTH*0.1+VIDEO_WIDTH*0.27), int(VIDEO_HEIGHT*0.05)), FONT, 1.5, (0, 128, 255), 3) # Draw label text
# Draw horizontal boundaries
cv2.line(frame, (LEFT_BOUNDARY, int(VIDEO_HEIGHT*0.07)), (LEFT_BOUNDARY, VIDEO_HEIGHT), (0, 255, 255), 4)
#cv2.line(frame, (RIGHT_BOUNDARY, 0), (RIGHT_BOUNDARY, VIDEO_HEIGHT), (0, 255, 255), 4)
#s_trackers_state = time.time()
tracker.update_trackers_state()
#e_trackers_state = time.time()
#print("[INFO] Updating trackers state took {} seconds".format(e_trackers_state-s_trackers_state))
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
# Center window
if not IS_CENTERED:
cv2.moveWindow('Object detector', 0, 0)
IS_CENTERED = True
if SAVE_VIDEO:
writer.write(frame)
print("\n\n")
# Press 'q' to quit
if cv2.waitKey(1) == ord('q'):
break
Thanks in advance for any help!
An important thing when trying to optimize/improve code performance is to categorize and measure your code execution. Only after identifying what is actually causing the bottleneck or performance decrease then can you can work on improving those sections of code. For this approach I'm assuming that you're both reading and saving frames in the same thread. So if you're facing a performance reduction due to I/O latency then this method can help otherwise if you find out that your problem is due to CPU processing limitations then this method will not give you a performance boost.
That being said, the approach is to use threading. The idea is to create another separate thread for obtaining the frames as cv2.VideoCapture.read() is blocking. This can be expensive and cause latency as the main thread has to wait until it has obtained a frame. By putting this operation into a separate thread that just focuses on grabbing frames and processing/saving the frames in the main thread, it dramatically improves performance due to I/O latency reduction. Here's a simple example on how to use threading to read frames in one thread and show/save frames in the main thread. Be sure to change the capture_src to your stream.
Code
from threading import Thread
import cv2
class VideoWritingThreading(object):
def __init__(self, src=0):
# Create a VideoCapture object
self.capture = cv2.VideoCapture(src)
# Default resolutions of the frame are obtained (system dependent)
self.frame_width = int(self.capture.get(3))
self.frame_height = int(self.capture.get(4))
# Set up codec and output video settings
self.codec = cv2.VideoWriter_fourcc('M','J','P','G')
self.output_video = cv2.VideoWriter('output.avi', self.codec, 30, (self.frame_width, self.frame_height))
# Start the thread to read frames from the video stream
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
self.thread.start()
def update(self):
# Read the next frame from the stream in a different thread
while True:
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
def show_frame(self):
# Display frames in main program
if self.status:
cv2.imshow('frame', self.frame)
# Press Q on keyboard to stop recording
key = cv2.waitKey(1)
if key == ord('q'):
self.capture.release()
self.output_video.release()
cv2.destroyAllWindows()
exit(1)
def save_frame(self):
# Save obtained frame into video output file
self.output_video.write(self.frame)
if __name__ == '__main__':
capture_src = 'your stream link!'
video_writing = VideoWritingThreading(capture_src)
while True:
try:
video_writing.show_frame()
video_writing.save_frame()
except AttributeError:
pass
Good Morning, I am beginner in python language, I would to ask a question regarding on python code. Fyi, currently I am working on face recognition with voice. Currently my problem when i call get_frame() function. The speak.tts("your name"+name,lang) code executed repeatedly and non-stop. My question how i going to execute this only once when i call this function in my app.py and it's will not produce the voice repeatedly. Below I share my code, if you dont understand the code let's me know i will try the best to explain and maybe can add more details code. Hope some one can helps thanks.
app.py
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
camera.py
class VideoCamera:
def __init__(self,app):
self.known_encoding_faces = aface.known_encoding_faces
self.user_id = aface.face_user_keys
self.faces = []
self.test = []
self.video_capture = cv2.VideoCapture(0)
self.face_user_keys = {}
self.name_face()
def get_frame(self):
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
success, frame = self.video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
flag = False
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame,number_of_times_to_upsample=2)
#print(face_locations)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
#print(face_encodings)
if len(face_encodings) > 0:
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)[0]
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(self.known_encoding_faces, face_encodings, tolerance=0.6)
#print(matches)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = self.faces[first_match_index]['name']
face_names.append(name)
#print(face_names)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
#Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
# description = ', '.join(name)
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# tts = gTTS(name, lang='en')
# tts.save('tts.mp3')
# tts = AudioSegment.from_mp3("tts.mp3")
# subprocess.call(["ffplay", "-nodisp", "-autoexit", "tts.mp3"])
if (val == 9):
speak.tts("your name"+name,lang)
break
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
def __del__(self):
self.video_capture.release()
The best way seems to call get_frame() outside the loop.
If you want to call get_frame() only once when calling the gen(camera) function, you should not put the call in the loop, as the loop will repeatedly execute its instructions.
def gen(camera):
frame = camera.get_frame()
while True:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
i'm using python 3.6 and i get this error
Traceback (most recent call last):
File "C:\Users\mchaf\Music\face\facerec_from_webcam_faster.py", line 49, in
small_frame = cv2.resize(frame, (128,128))
cv2.error: OpenCV(3.4.5) C:\projects\opencv-python\opencv\modules\imgproc\src\resize.cpp:3784: error: (-215:Assertion failed) !ssize.empty() in function 'cv::resize'
What should i do to fix this issue?
Here is my line code
from distutils.core import setup
import face_recognition
from cv2 import *
import subprocess
import time
video_capture = cv2.VideoCapture(0)
obama_image = face_recognition.load_image_file("obama.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
biden_image = face_recognition.load_image_file("biden.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
known_face_encodings = [
obama_face_encoding,
biden_face_encoding
]
known_face_names = [
"Barack Obama",
"Joe Biden"
]
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (128,128))
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
time.sleep(5)
imshow("Operator",frame)
video_capture.release()
cv2.destroyAllWindows()
subprocess.call([r'C:\Users\mchaf\Desktop\run.bat'])
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
Try putting the frame processing in this condition
if ret:
cv2.resize...
And delete line with
Video_capture.release()
cv2.destroyAllWindows()
It looks your frame is empty thats why you cant resize
The reason is that you release the capture(kind of disconnection) from your webcam. After releasing you want to read new frame but it is empty
Your code is obviously copied from different places. Even if you correct these your code wont work.
For example:
...
name = "Unknown"
time.sleep(5) # Why sleep?
imshow("Operator",frame) # OpenCV cv2 class is missing
video_capture.release() # Why you release the capture? - main error
cv2.destroyAllWindows() # Why you destroy window if you will want to see result in the next loop
subprocess.call([r'C:\Users\mchaf\Desktop\run.bat'])
...
I want to build an attendance system by face_recognition with real-time. Therefore, I created a folder that have 20 pictures of some famous people to test the code. However, the result is bad, since the program can't really identify the right face. What should I do to improve the code such that I can identify the right faces?
I hope to get some advice from you. Thanks a lot.
# -*- coding: utf-8 -*-
#I have installed these package successfully
import face_recognition
import cv2
import datetime
import glob2 as gb
import numpy as np
video_capture = cv2.VideoCapture(0)
#There are 20 pictures in my local folder
img_path=gb.glob(r'F:\liuzhenya\photo\\*.jpg')
#Store the name of picture
known_face_names=[]
#Store the matrix from these pictures
known_face_encodings=[]
for i in img_path:
picture_name=i.replace('F:\liuzhenya\photo\\','')
picture_newname=picture_name.replace('.jpg','')
obama_img = face_recognition.load_image_file(i)
obama_face_encoding = face_recognition.face_encodings(obama_img)[0]
known_face_names.append(picture_newname)
known_face_encodings.append(obama_face_encoding)
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame=small_frame[:,:,::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
match = face_recognition.compare_faces(known_face_encodings, face_encoding)
if True in match:
match_index=match.index(True)
name = "match"
#To print name and time
cute_clock = datetime.datetime.now()
print (known_face_names[match_index]+':'+str(cute_clock))
else:
name = "unknown"
face_names.append(name)
process_this_frame = not process_this_frame
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), 2)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left+6, bottom-6), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()