Face detection using retinaface code error - python

I am new to python. Currently I am working on a project to detect face using retinaface and opencv. I am using the retinafacewrapper from the retinaface git. Below is my code and I tried to run the code on an .mov video file and i got this error TypeError: detect_face() missing 1 required positional argument: 'img'. I need some help on this. Thank you.
from retinaface import RetinaFace
import cv2
def build_model():
from retinaface import RetinaFace
face_detector = RetinaFace.build_model()
return face_detector
def detect_face(face_detector, img , align = True):
from retinaface import RetinaFace
from retinaface.commons import postprocess
#---------------------------------
resp = []
# The BGR2RGB conversion will be done in the preprocessing step of retinaface.
# img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #retinaface expects RGB but OpenCV read BGR
"""
face = None
img_region = [0, 0, img.shape[0], img.shape[1]] #Really?
faces = RetinaFace.extract_faces(img_rgb, model = face_detector, align = align)
if len(faces) > 0:
face = faces[0][:, :, ::-1]
return face, img_region
"""
#--------------------------
obj = RetinaFace.detect_faces(img, model = face_detector, threshold = 0.9)
if type(obj) == dict:
for key in obj:
identity = obj[key]
facial_area = identity["facial_area"]
y = facial_area[1]
h = facial_area[3] - y
x = facial_area[0]
w = facial_area[2] - x
img_region = [x, y, w, h]
#detected_face = img[int(y):int(y+h), int(x):int(x+w)] #opencv
detected_face = img[facial_area[1]: facial_area[3], facial_area[0]: facial_area[2]]
if align:
landmarks = identity["landmarks"]
left_eye = landmarks["left_eye"]
right_eye = landmarks["right_eye"]
nose = landmarks["nose"]
#mouth_right = landmarks["mouth_right"]
#mouth_left = landmarks["mouth_left"]
detected_face = postprocess.alignment_procedure(detected_face, right_eye, left_eye, nose)
resp.append((detected_face, img_region))
return resp
path = "database/images/org_3fc67bdc820dc3c1_1647514190000.mov"
cap = cv2.VideoCapture(path)
# Video
while True:
ret, img = cap.read()
img = cv2.resize(img,(640,360))
img = detect_face(img)
cv2.imshow('Face detector', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

Download/Clone this repo and run pose_detection_retinaface.py file. This is the complete code for using retinaface for face detection.

Related

Python, cvzone - why do i get this ValueError?

I am trying to zoom into a picture with usage of two hands (gesture controled image zoom), but when trying to use two hands I get this error but I don't know why. When making my program I followed this tutorial: https://www.youtube.com/watch?v=VPaFV3QBsEw&t=675s. It's strange because the program worked for him.
This is the error I get:
hands, img = detector.findHands(img)
ValueError: too many values to unpack (expected 2)
This is my code:
import cv2
from cvzone.HandTrackingModule import HandDetector
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
detector = HandDetector(detectionCon=0.7)
startDist = None
scale = 0
cx, cy = 500,500
while True:
success, img = cap.read()
hands, img = detector.findHands(img)
img1 = cv2.imread("kung_fu_panda.png")
if len(hands) == 2:
if detector.fingersUp(hands[0]) == [1, 1, 0, 0, 0] and \
detector.fingersUp(hands[1]) == [1, 1, 0, 0, 0]:
lmList1 = hands[0]["lmList"]
lmList2 = hands[1]["lmList"]
# point 8 is the tip of the index finger
if startDist is None:
length, info, img = detector.findDistance(hands[0]["center"], hands[1]["center"], img)
startDist = length
length, info, img = detector.findDistance(hands[0]["center"], hands[1]["center"], img)
scale = int((length - startDist) // 2)
cx, cy = info[4:]
print(scale)
else:
startDist = None
try:
h1, w1, _= img1.shape
newH, newW = ((h1+scale)//2)*2, ((w1+scale)//2)*2
img1 = cv2.resize(img1, (newW,newH))
img[cy-newH//2:cy+ newH//2, cx-newW//2:cx+ newW//2] = img1
except:
pass
cv2.imshow("Image", img)
cv2.waitKey(1)
cvzone library keeps updating their library every time. As you can see at the beginning of the video, when he imports the cvzone package he uses cvzone version 1.5.0.
I tried your code with other versions and got an error similar to yours but with version 1.5.0 your code worked great.
you can use my answer here to change the version of your cvzone library in your project to 1.5.0.

a am getting an OpenCV Error in python... and my code is given below?? Please resolve it fast.Thnks in advance

plese solve this guys,
when i am running this code it is giving me an unaccepted error =
""OpenCV Error: Assertion failed (ssize.width > 0 && ssize.height > 0) in resize, file /build/opencv-L2vuMj/opencv-3.2.0+dfsg/modules/imgproc/src/imgwarp.cpp, line 3492""
Code:
import numpy as np
import dlib
import cv2
import face_recognition
import os
path = 'images'
image = []
classNames = []
myList = os.listdir(path)
for cl in myList:
curImg = cv2.imread(f'{path}/{cl}')
image.append(curImg)
classNames.append(os.path.splitext(cl)[0])
# print(classNames)
def findEncodings(image):
encodeList = []
for img in image:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
encodeListKnown = findEncodings(image)
print(len("Encoding Complete"))
cap = cv2.VideoCapture()
# print(cap)
while True:
success, img = cap.read()
imgS = cv2.resize(img, (0,0), None, 0.25, 0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrames = face_recognition.face_locations(imgS)
encodesCurFrames = face_recognition.face_encodings(imgS,
facesCurFrames)
for encodeFace,faceLoc in zip(encodesCurFrames,
facesCurFrames):
matches =
face_recognition.compare_faces(encodeListKnown,encodeFace)
faceDis =
face_recognition.face_distance(encodeListKnown,encodeFace)
print(faceDis)
(0, 0) is not a valid (width, height) tuple for resizing an image. It looks like you wanted to resize the image by scaling, in which case you can pass None instead of the size tuple:
imgS = cv2.resize(img, None, fx=0.25, fy=0.25)
will scale the image by 0.25 in both dimensions.

OpenCv imread error while trying it in real-time

code:
import face_recognition as fr
import os
import cv2
import face_recognition
import numpy as np
from time import sleep
def get_encoded_faces():
encoded = {}
for dirpath, dnames, fname in os.walk("./faces"):
for f in fname:
if f.endswith(".jpg") or f.endswith(".png"):
face = fr.load_image_file("faces/" + f)
encoding = fr.face_encodings(face)[0]
encoded[f.split(".")[0]] = encoding
return encoded, fname
def unknown_image_encoded(img):
face = fr.load_image_file("faces/" + img)
encoding = fr.face_encodings(face)[0]
return encoding
def classify_face(im):
faces, fname = get_encoded_faces()
faces_encoded = list(faces.values())
known_face_names = list(faces.keys())
img = cv2.imread(im, 1)
face_locations = face_recognition.face_locations(img)
unknown_face_encodings = face_recognition.face_encodings(img, face_locations)
face_names = []
for face_encoding in unknown_face_encodings:
matches = face_recognition.compare_faces(faces_encoded, face_encoding)
name = "Unknown"
face_distances = face_recognition.face_distance(faces_encoded, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
for (top, right, bottom, left), name in zip(face_locations, face_names):
cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)
cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)
return face_names, fname
cap = cv2.VideoCapture(0)
while True:
ret, image = cap.read()
recog, fname = classify_face(image)
print(recog)
cv2.imshow(fname, image)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
video.release()
cv2.destroyAllWindows()
Error:
Traceback (most recent call last):
File "face.py", line 70, in <module>
recog, fname = classify_face(image)
File "face.py", line 37, in classify_face
img = cv2.imread(im, 1)
SystemError: <built-in function imread> returned NULL without setting an error
[ WARN:0] global C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-wbmte9m7\opencv\modules\videoio\src\cap_msmf.cpp (435) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
The code works properly while using an image but now when I tried using it with video/real-time its throwing this error
I guess it requires the path instead of the image that is passed on to it, is there any other work around
I am trying to recognize faces in real time and the major issue with it was detecting unknown faces so when I started coding for real time I got this error.
The code and the error message don't agree. Are you running an older version of the code?
Error message:
File "face.py", line 37, in classify_face
img = cv2.imread(im, 1)
Code:
img = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
For debugging it may be helpful to display the received frame from the camera with code like the following:
ret, image = cap.read()
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', grey)
cv2.waitKey()
cv2.imread(im, 1) requires im to be the filename (datatype: string) of the image that you want to read.
Using cap = cv2.VideoCapture(0), you don't need to read images from files anymore, since the image that you want to classify is returned as an array from cap.read().
To fix your code for using cv2.VideoCapture, remove img = cv2.imread(im, 1) from your classify_face method and change the method definition to
def classify_face(img):
instead of
def classify_face(im):
Note, that the 0 option of cv2.VideoCapture refers to reading the live video stream from a camera with index 0.

How to reduce noise when using scharr filter?

I try to implement Sharr filter + remove noise. Here is my code:
import sys
import numpy as np
import cv2
import math
filename = sys.argv[1]
cap = cv2.VideoCapture(filename)
cap.set(cv2.cv.CV_CAP_PROP_POS_MSEC, 300000)
scale = 1
delta = 0
ddepth = cv2.CV_16S
while(True):
success, img = cap.read()
img = cv2.GaussianBlur(img,(3,3),0)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Scharr filter
scharr_grad_x = cv2.Scharr(gray,ddepth,1,0)
scharr_grad_y = cv2.Scharr(gray,ddepth,0,1)
scharr_abs_grad_x = cv2.convertScaleAbs(scharr_grad_x)
scharr_abs_grad_y = cv2.convertScaleAbs(scharr_grad_y)
#remove noize after scharr filter
scharr = np.hypot(scharr_abs_grad_x, scharr_abs_grad_y)
mean = np.mean(scharr);
scharr[scharr <= mean] = 0;
scharr = cv2.add(scharr_abs_grad_x,scharr_abs_grad_y)
cv2.imshow('scharr',scharr)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I found idea with numpy. But I can't correct make solution.
The result will be as on screenshots below.
Sharr
Scharr without noise I need.
How can I implement it in my code?

Disparity Map won't show/ not sure if calibrating properly

I wrote this code using other code that I found online. I don't get any errors, however I can't get the disparity map to show up using imshow. I saved the image and it is just black so I'm not doing something properly. Could someone please look through my code and see what's wrong with it? I may not be doing the stereocalibration properly but I am not sure.
Thank you!
import numpy as np
import cv2
import glob
import os
from matplotlib import pyplot as plt
print 'program starts'
from matplotlib import pyplot as plt
#termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 0.001)
#-------------------------------------------------------------------------
#INITIALIZATION VARIABLES
#prepare object points
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
#Arrays to store object points and image points from all the images.
objpoints = [] #3D points in real world space
imgpointsL = [] #2D points in image plane.
imgpointsR = []
#-------------------------------------------------------------------------
os.chdir('/home/pi/Desktop/LeftImg')
images = sorted(glob.glob('*.png'))
#print images
for fname in images:
img = cv2.imread(fname)
grayL = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find chess board corners
ret, cornersL = cv2.findChessboardCorners(grayL, (9,6))
# if found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
cv2.cornerSubPix(grayL, cornersL, (11,11),(-1,-1), criteria)
imgpointsL.append(cornersL)
print len(objpoints)
os.chdir('/home/pi/Desktop/RightImg')
images = sorted(glob.glob('*.png'))
#print images
for fname in images:
img = cv2.imread(fname)
grayR = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find chess board corners
ret, cornersR = cv2.findChessboardCorners(grayR, (9,6))
# if found, add object points, image points (after refining them)
if ret == True:
prevobjp = objp
objpoints.append(objp)
cv2.cornerSubPix(grayR, cornersR,(11,11),(-1,-1), criteria)
imgpointsR.append(cornersR)
c[prevobjp & objp]
if c.all():
print 'True'
else:
print 'False'
#print len(objpointsR)
#Draw and display corners
#img = cv2.drawChessboardCorners(img, (9,6), cornersR,ret)
# cv2.drawChessboardCorners(img, (9,6), cornersR,ret)
# plt.imshow(img)
# plt.show()
# print type(img)
# cv2.imshow('img', img)
# cv2.waitKey(500)
print 'start'
cameraMatrix1 = cv2.cv.CreateMat(3,3, cv2.CV_64FC1)
cameraMatrix2 = cv2.cv.CreateMat(3,3, cv2.CV_64FC1)
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objpoints, imgpointsL, imgpointsR, (384,288))
print cameraMatrix1, cameraMatrix2
print 'over'
R1= np.zeros(shape=(3,3))
R2= np.zeros(shape=(3,3))
P1= np.zeros(shape=(3,4))
P2= np.zeros(shape=(3,4))
Q= np.zeros(shape=(4,4))
map1x=[]
map1y=[]
map2x=[]
map2y=[]
#imgU1=[]
#imgU2=[]
#print cameraMatrix1
#print cameraMatrix1
os.chdir('/home/pi/Desktop')
imgL=cv2.imread('calLeft1.png')
imgR=cv2.imread('calRight1.png')
imgL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
imgR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)
cv2.stereoRectify(cameraMatrix1,distCoeffs1,cameraMatrix2,distCoeffs2,(384,288),R,T,R1,R2,P1,P2,Q,flags=cv2.cv.CV_CALIB_ZERO_DISPARITY,alpha=-1,newImageSize=(0,0))
#cv2.reprojectImageTo3D(disp,points,Q)
print Q
map1x,map1y=cv2.initUndistortRectifyMap(cameraMatrix1,distCoeffs1,R1,P1,(384,288),cv2.CV_32FC1)
map2x,map2y=cv2.initUndistortRectifyMap(cameraMatrix2,distCoeffs2,R2,P2,(384,288),cv2.CV_32FC1)
#cv2.remap(imgL,map1x,map1y,cv2.INTER_LINEAR, imgU1, cv2.BORDER_CONSTANT,0)
#cv2.remap(imgR,map2x,map2y,cv2.INTER_LINEAR, imgU2, cv2.BORDER_CONSTANT,0)
imgU1 = cv2.remap(imgL,map1x,map1y,cv2.INTER_LINEAR)
imgU2 = cv2.remap(imgR,map2x,map2y,cv2.INTER_LINEAR)
#imgU1 = imgU1.astype(np.uint8)
#imgU2 = imgU2.astype(np.uint8)
stereo = cv2.StereoBM(1,16,15)
disp=stereo.compute(imgU1,imgU2,disptype=cv2.CV_32FC1)
norm_coeff = 255/disp.max()
cv2.imshow('disp', disp*norm_coeff/255)
cv2.imwrite('dispimage.png', disp*norm_coeff/255)
# When everything done, release the capture
#cap.release()
#cv2.destroyAllWindows()

Categories

Resources