KNOWN_FACES_DIR = 'Known'
TOLERANCE = 0.6
FRAME_THICKNESS = 3
FONT_THICKNESS = 2
MODEL = 'cnn'
video=cv2.VideoCapture(0)
print('Loading known faces...')
known_faces = []
known_names = []
for name in os.listdir(KNOWN_FACES_DIR):
for filename in os.listdir(f'{KNOWN_FACES_DIR}/{name}'):
image = face_recognition.load_image_file(f'{KNOWN_FACES_DIR}/{name}/{filename}')
encoding = face_recognition.face_encodings(image)[0]
known_faces.append(encoding)
known_names.append(name)
while True:
ret,image=video.read()
locations = face_recognition.face_locations(image, model=MODEL)
encodings = face_recognition.face_encodings(image, locations)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
print(f', found {len(encodings)} face(s)')
for face_encoding, face_location in zip(encodings, locations):
results = face_recognition.compare_faces(known_faces, face_encoding, TOLERANCE)
match = None
if True in results:
match = known_names[results.index(True)]
print(f' - {match} from {results}')
top_left = (face_location[3], face_location[0])
bottom_right = (face_location[1], face_location[2])
cv2.rectangle(image, top_left, bottom_right,FRAME_THICKNESS)
top_left = (face_location[3], face_location[2])
bottom_right = (face_location[1], face_location[2] + 22)
cv2.rectangle(image, top_left, bottom_right,cv2.FILLED)
cv2.putText(image, match, (face_location[3] + 10, face_location[2] + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 200, 200), FONT_THICKNESS)
cv2.imshow(filename, image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()
I am using the face_recognition library for the face recognition project. This gives me the desired result but it keeps on not responding. It shows the result on a single frame after not responding for 20-30 sec. Is it because my laptop specs are not good or the code requires some changes?
https://www.youtube.com/watch?v=PdkPI92KSIs&list=RDCMUCfzlCWGWYyIQ0aLC5w48gBQ&index=2
This is the youtube video on how the result should look like.
Why don't you use deepface? Here, database path is the location where you stored facial images.
#!pip install deepface
from deepface import DeepFace
DeepFace.stream(db_path = "C:/my_db")
Related
The problem is, program is really lengthy and computationally expensive.
so is there any way to make this program faster or any other way to write this code?
I am beginner in python and would love to take all suggestions or different approach then this program
also i am new to the stack overflow so if anything is wrong in this post or any issue in program please point out in comment .
first section of code is
#TEST V2.1 multitracker
import cv2
import numpy as np
#path = (input("enter the video path: "))
cap = cv2.VideoCapture(" YOUR VIDEO PATH ")
# creating the dictionary to add all the wanted trackers in OpenCV that can be used for tracking in future
OBJECT_TRACKING_MACHINE = {
"csrt": cv2.legacy.TrackerCSRT_create,
"kcf": cv2.legacy.TrackerKCF_create,
"boosting": cv2.legacy.TrackerBoosting_create,
"mil": cv2.legacy.TrackerMIL_create,
"tld": cv2.legacy.TrackerTLD_create,
"medianflow": cv2.legacy.TrackerMedianFlow_create,
"mosse": cv2.legacy.TrackerMOSSE_create
}
# Creating the MultiTracker variable object to store the
trackers = cv2.legacy.MultiTracker_create()
here I started the loop
while True:
frame = cap.read()[1]
#print("freame start",frame)
if frame is None:
print("error getting the video,please check the input")
break
frame = cv2.resize(frame,(1080,720))
Thresh = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#Thresh = cv2.adaptiveThreshold(gray, 185, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV, 11, 6)
#print(trackers.update(Thresh))
(success, boxes) = trackers.update(Thresh)
# loop over the bounding boxes and draw them on the frame
if success == False:
bound_boxes = trackers.getObjects()
idx = np.where(bound_boxes.sum(axis= 1) != 0)[0]
bound_boxes = bound_boxes[idx]
trackers = cv2.legacy.MultiTracker_create()
for bound_box in bound_boxes:
trackers.add(tracker,Thresh,bound_box)
x,y,w,h = cv2.boundingRect(Thresh)
k = cv2.waitKey(50)
And I am guessing this is the section which is making the program slow
if is there any different way to represent this part or any idea different then this
for i,box in enumerate(boxes):
(x, y, w, h) = [int(v) for v in box]
#cv2.rectangle(Thresh, (x, y), (x + w, y + h), (255, 255, 255), 2)
#cv2.putText(Thresh,('TRACKING BOX NO-'+str(i)),(x+10,y-3),cv2.FONT_HERSHEY_PLAIN,1.0,(255,255,0),2)
arr = boxes.astype(int)
if i == 0 :
Roi = Thresh[(arr[i,1]):(arr[i,1]+arr[i,3]),(arr[i,0]):(arr[i,0]+arr[i,2])]
murg = cv2.resize(Roi,(300,200))
cv2.imshow("horizon", murg)
#print(murg)
if i == 1 :
Roi1 = Thresh[(arr[i,1]):(arr[i,1]+arr[i,3]),(arr[i,0]):(arr[i,0]+arr[i,2])]
Roi = Thresh[(arr[(i-1),1]):(arr[(i-1),1]+arr[(i-1),3]),(arr[(i-1),0]):(arr[(i-1),0]+arr[(i-1),2])]
murg = cv2.resize(Roi,(300,200))
murg1 = cv2.resize(Roi1,(300,200))
hori = np.concatenate((murg,murg1),axis=1)
cv2.imshow("horizon",hori)
#print(hori)
elif i == 2 :
Roi2 = Thresh[(arr[i,1]):(arr[i,1]+arr[i,3]),(arr[i,0]):(arr[i,0]+arr[i,2])]
Roi1 = Thresh[(arr[(i-1),1]):(arr[(i-1),1]+arr[(i-1),3]),(arr[(i-1),0]):(arr[(i-1),0]+arr[(i-1),2])]
Roi = Thresh[(arr[(i-2),1]):(arr[(i-2),1]+arr[(i-2),3]),(arr[(i-2),0]):(arr[(i-2),0]+arr[(i-2),2])]
murg = cv2.resize(Roi,(300,200))
murg1 = cv2.resize(Roi1,(300,200))
murg2 = cv2.resize(Roi2,(300,200))
hori = np.concatenate((murg,murg1,murg2),axis=1)
cv2.imshow("horizon",hori)
#print(hori)
elif i == 3 :
Roi3 = Thresh[(arr[i,1]):(arr[i,1]+arr[i,3]),(arr[i,0]):(arr[i,0]+arr[i,2])]
Roi2 = Thresh[(arr[(i-1),1]):(arr[(i-1),1]+arr[(i-1),3]),(arr[(i-1),0]):(arr[(i-1),0]+arr[(i-1),2])]
Roi1 = Thresh[(arr[(i-2),1]):(arr[(i-2),1]+arr[(i-2),3]),(arr[(i-2),0]):(arr[(i-2),0]+arr[(i-2),2])]
Roi = Thresh[(arr[(i-3),1]):(arr[(i-3),1]+arr[(i-3),3]),(arr[(i-3),0]):(arr[(i-3),0]+arr[(i-3),2])]
murg = cv2.resize(Roi,(300,200))
murg1 = cv2.resize(Roi1,(300,200))
murg2 = cv2.resize(Roi2,(300,200))
murg3 = cv2.resize(Roi3,(300,200))
hori = np.concatenate((murg,murg1,murg2,murg3),axis=1)
cv2.imshow("horizon",hori)
#print(hori)
this section is so that I can print the ROI matrix and to select the ROI
if k == ord("1"):
print(murg)
if k == ord("2"):
print(murg1)
if k == ord ("3"):
print(murg2)
if k == ord("4"):
print(murg3)
cv2.imshow('Frame', Thresh)
if k == ord("e"):
break
if k == ord("s"):
roi = cv2.selectROI("Frame", Thresh, fromCenter=False,showCrosshair=False)
tracker = OBJECT_TRACKING_MACHINE['mosse']()
trackers.add(tracker, Thresh, roi)
#print(boxes,success)
cap.release()
cv2.destroyAllWindows()
when you will run this code you can extract 4 ROI frames which will track your ROI's (I haven't added the precaution for empty matrix so it will give you error if you select more than 4 roi's)
my end goal is to extract those ROI videos for Image processing (this code is not done yet and there's more image processing is going to happen in letter part) **
I am new to python. Currently I am working on a project to detect face using retinaface and opencv. I am using the retinafacewrapper from the retinaface git. Below is my code and I tried to run the code on an .mov video file and i got this error TypeError: detect_face() missing 1 required positional argument: 'img'. I need some help on this. Thank you.
from retinaface import RetinaFace
import cv2
def build_model():
from retinaface import RetinaFace
face_detector = RetinaFace.build_model()
return face_detector
def detect_face(face_detector, img , align = True):
from retinaface import RetinaFace
from retinaface.commons import postprocess
#---------------------------------
resp = []
# The BGR2RGB conversion will be done in the preprocessing step of retinaface.
# img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #retinaface expects RGB but OpenCV read BGR
"""
face = None
img_region = [0, 0, img.shape[0], img.shape[1]] #Really?
faces = RetinaFace.extract_faces(img_rgb, model = face_detector, align = align)
if len(faces) > 0:
face = faces[0][:, :, ::-1]
return face, img_region
"""
#--------------------------
obj = RetinaFace.detect_faces(img, model = face_detector, threshold = 0.9)
if type(obj) == dict:
for key in obj:
identity = obj[key]
facial_area = identity["facial_area"]
y = facial_area[1]
h = facial_area[3] - y
x = facial_area[0]
w = facial_area[2] - x
img_region = [x, y, w, h]
#detected_face = img[int(y):int(y+h), int(x):int(x+w)] #opencv
detected_face = img[facial_area[1]: facial_area[3], facial_area[0]: facial_area[2]]
if align:
landmarks = identity["landmarks"]
left_eye = landmarks["left_eye"]
right_eye = landmarks["right_eye"]
nose = landmarks["nose"]
#mouth_right = landmarks["mouth_right"]
#mouth_left = landmarks["mouth_left"]
detected_face = postprocess.alignment_procedure(detected_face, right_eye, left_eye, nose)
resp.append((detected_face, img_region))
return resp
path = "database/images/org_3fc67bdc820dc3c1_1647514190000.mov"
cap = cv2.VideoCapture(path)
# Video
while True:
ret, img = cap.read()
img = cv2.resize(img,(640,360))
img = detect_face(img)
cv2.imshow('Face detector', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Download/Clone this repo and run pose_detection_retinaface.py file. This is the complete code for using retinaface for face detection.
I am trying to learn face detection and I got this code from GeeksforGeeks tutorial. However When I run one of the two files, it shows the error AttributeError: module 'cv2' has no attribute 'LBPHFaceRecognizer_create'. I tried uninstalling open cv, installing pip install opencv-contrib-python as well as reinstalling open cv and running it. I am currently running open cv2 4.5.5. The tutorial advised to remove the '.face' from cv2.face.LBPHFaceRecognizer_create() for running cv2, however when I run it with .face, it displays module 'cv2' has no attribute 'face'. Please, someone, help me with this
# It helps in identifying the faces
import cv2, sys, numpy, os
from cv2 import *
size = 4
haar_file = 'haarcascade_frontalface_default.xml'
datasets = 'datasets'
# Part 1: Create fisherRecognizer
print('Recognizing Face Please Be in sufficient Lights...')
# Create a list of images and a list of corresponding names
(images, labels, names, id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(datasets):
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(datasets, subdir)
for filename in os.listdir(subjectpath):
path = subjectpath + '/' + filename
label = id
images.append(cv2.imread(path, 0))
labels.append(int(label))
id += 1
(width, height) = (130, 100)
# Create a Numpy array from the two lists above
(images, labels) = [numpy.array(lis) for lis in [images, labels]]
# OpenCV trains a model from the images
# NOTE FOR OpenCV2: remove '.face'
model = cv2.LBPHFaceRecognizer_create()
model.train(images, labels)
for i in range[0, 20]:
if i<10:
print(i)
i += 1
else:
print('Done wit it')
# Part 2: Use fisherRecognizer on camera stream
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(0)
while True:
(_, im) = webcam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x + w, y + h), (255, 0, 0), 2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
# Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)
if prediction[1]<500:
cv2.putText(im, '% s - %.0f' % (names[prediction[0]], prediction[1]), (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0))
else:
cv2.putText(im, 'not recognized', (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0))
cv2.imshow('OpenCV', im)
key = cv2.waitKey(10)
if key == 27:
break
I think that you may need to explicitly state "cv2.face" not just "face..."
model = cv2.face.LBPHFaceRecognizer_create()
before doing so...did you confirm that you have a version of Opencv installed that contains the module Face? You can check like this:
import cv2
functions = dir(cv2)
for f in functions:
print (f)
and if not...install like this:
pip uninstall opencv-contrib-python
pip install opencv-contrib-python
I am trying to write and save the video of my object detection. I have referred and tried based on the posts posted here in Stackoverflow. However, none of them is working.
My specifications:
Python 3.7.9
OpenCV 4.5.1
The code is as below
import cv2
from main import *
import imutils
import math
darknetmain = darknet_main()
darknetmain.setGPU(is_GPU=True)
saveVid ="./data/test_sampah_tasik.mp4"
video = cv2.VideoCapture(0)
width = 270
height = 480
size = (width,height)
result = cv2.VideoWriter('21_output.mpeg',cv2.VideoWriter_fourcc(*'MPJG'),20,size)
# initialize the known distance from the camera to the object, which
# in this case is 24 inches
KNOWN_DISTANCE = 12.0
# initialize the known object width, which in this case, the piece of
# paper is 11.81 inches wide
KNOWN_WIDTH = 12.0
if video.isOpened():
while(True):
res, cv_img = video.read()
if res==True:
k = cv_img[90:360, 90:570]
imcaptions, boundingBoxs = darknetmain.performDetect(k)
if len(imcaptions)>0:
for i in range(len(imcaptions)):
k = cv2.rectangle(k, boundingBoxs[i][0], boundingBoxs[i][2], (0, 255, 0), 2)
k = cv2.putText(k, imcaptions[i], boundingBoxs[i][0], cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 0, 255))
startPoint = (180,570)
dividePoint = [x for x in boundingBoxs[i][2]]
dividePoint[0] = dividePoint[0] - 50
dividePoint[1] = dividePoint[1] - 50
if dividePoint[0] > 160:
print("Right")
elif dividePoint[0] <= 160:
print("Left")
centerPoint= (int(dividePoint[0]),int(dividePoint[1]))
k = cv2.line(k, startPoint, centerPoint, (0, 255, 0), 2)
result.write(k)
cv2.imshow("result", k)
else:
#print("no result")
result.write(k)
cv2.imshow("result", k)
key = cv2.waitKey(1)
if key==27 or 0xFF == ord('q'):
break
else:
break
else:
print("Cannot read the video file")
The error that I received is
OpenCV: FFMPEG: tag 0x4745504d/'MPEG' is not supported with codec id 2 and format 'mpeg / MPEG-1 Systems / MPEG program stream'
code:
import face_recognition as fr
import os
import cv2
import face_recognition
import numpy as np
from time import sleep
def get_encoded_faces():
encoded = {}
for dirpath, dnames, fname in os.walk("./faces"):
for f in fname:
if f.endswith(".jpg") or f.endswith(".png"):
face = fr.load_image_file("faces/" + f)
encoding = fr.face_encodings(face)[0]
encoded[f.split(".")[0]] = encoding
return encoded, fname
def unknown_image_encoded(img):
face = fr.load_image_file("faces/" + img)
encoding = fr.face_encodings(face)[0]
return encoding
def classify_face(im):
faces, fname = get_encoded_faces()
faces_encoded = list(faces.values())
known_face_names = list(faces.keys())
img = cv2.imread(im, 1)
face_locations = face_recognition.face_locations(img)
unknown_face_encodings = face_recognition.face_encodings(img, face_locations)
face_names = []
for face_encoding in unknown_face_encodings:
matches = face_recognition.compare_faces(faces_encoded, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(faces_encoded, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
for (top, right, bottom, left), name in zip(face_locations, face_names):
cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)
cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)
return face_names, fname
cap = cv2.VideoCapture(0)
while True:
ret, image = cap.read()
recog, fname = classify_face(image)
print(recog)
cv2.imshow(fname, image)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
video.release()
cv2.destroyAllWindows()
Error:
Traceback (most recent call last):
File "face.py", line 70, in <module>
recog, fname = classify_face(image)
File "face.py", line 37, in classify_face
img = cv2.imread(im, 1)
SystemError: <built-in function imread> returned NULL without setting an error
[ WARN:0] global C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-wbmte9m7\opencv\modules\videoio\src\cap_msmf.cpp (435) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
The code works properly while using an image but now when I tried using it with video/real-time its throwing this error
I guess it requires the path instead of the image that is passed on to it, is there any other work around
I am trying to recognize faces in real time and the major issue with it was detecting unknown faces so when I started coding for real time I got this error.
The code and the error message don't agree. Are you running an older version of the code?
Error message:
File "face.py", line 37, in classify_face
img = cv2.imread(im, 1)
Code:
img = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
For debugging it may be helpful to display the received frame from the camera with code like the following:
ret, image = cap.read()
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', grey)
cv2.waitKey()
cv2.imread(im, 1) requires im to be the filename (datatype: string) of the image that you want to read.
Using cap = cv2.VideoCapture(0), you don't need to read images from files anymore, since the image that you want to classify is returned as an array from cap.read().
To fix your code for using cv2.VideoCapture, remove img = cv2.imread(im, 1) from your classify_face method and change the method definition to
def classify_face(img):
instead of
def classify_face(im):
Note, that the 0 option of cv2.VideoCapture refers to reading the live video stream from a camera with index 0.