module 'cv2' has no attribute LBPHFaceRecognizer_create() - python

I am trying to learn face detection and I got this code from GeeksforGeeks tutorial. However When I run one of the two files, it shows the error AttributeError: module 'cv2' has no attribute 'LBPHFaceRecognizer_create'. I tried uninstalling open cv, installing pip install opencv-contrib-python as well as reinstalling open cv and running it. I am currently running open cv2 4.5.5. The tutorial advised to remove the '.face' from cv2.face.LBPHFaceRecognizer_create() for running cv2, however when I run it with .face, it displays module 'cv2' has no attribute 'face'. Please, someone, help me with this
# It helps in identifying the faces
import cv2, sys, numpy, os
from cv2 import *
size = 4
haar_file = 'haarcascade_frontalface_default.xml'
datasets = 'datasets'
# Part 1: Create fisherRecognizer
print('Recognizing Face Please Be in sufficient Lights...')
# Create a list of images and a list of corresponding names
(images, labels, names, id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(datasets):
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(datasets, subdir)
for filename in os.listdir(subjectpath):
path = subjectpath + '/' + filename
label = id
images.append(cv2.imread(path, 0))
labels.append(int(label))
id += 1
(width, height) = (130, 100)
# Create a Numpy array from the two lists above
(images, labels) = [numpy.array(lis) for lis in [images, labels]]
# OpenCV trains a model from the images
# NOTE FOR OpenCV2: remove '.face'
model = cv2.LBPHFaceRecognizer_create()
model.train(images, labels)
for i in range[0, 20]:
if i<10:
print(i)
i += 1
else:
print('Done wit it')
# Part 2: Use fisherRecognizer on camera stream
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(0)
while True:
(_, im) = webcam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x + w, y + h), (255, 0, 0), 2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
# Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)
if prediction[1]<500:
cv2.putText(im, '% s - %.0f' % (names[prediction[0]], prediction[1]), (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0))
else:
cv2.putText(im, 'not recognized', (x-10, y-10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0))
cv2.imshow('OpenCV', im)
key = cv2.waitKey(10)
if key == 27:
break

I think that you may need to explicitly state "cv2.face" not just "face..."
model = cv2.face.LBPHFaceRecognizer_create()
before doing so...did you confirm that you have a version of Opencv installed that contains the module Face? You can check like this:
import cv2
functions = dir(cv2)
for f in functions:
print (f)
and if not...install like this:
pip uninstall opencv-contrib-python
pip install opencv-contrib-python

Related

Python, cvzone - why do i get this ValueError?

I am trying to zoom into a picture with usage of two hands (gesture controled image zoom), but when trying to use two hands I get this error but I don't know why. When making my program I followed this tutorial: https://www.youtube.com/watch?v=VPaFV3QBsEw&t=675s. It's strange because the program worked for him.
This is the error I get:
hands, img = detector.findHands(img)
ValueError: too many values to unpack (expected 2)
This is my code:
import cv2
from cvzone.HandTrackingModule import HandDetector
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
detector = HandDetector(detectionCon=0.7)
startDist = None
scale = 0
cx, cy = 500,500
while True:
success, img = cap.read()
hands, img = detector.findHands(img)
img1 = cv2.imread("kung_fu_panda.png")
if len(hands) == 2:
if detector.fingersUp(hands[0]) == [1, 1, 0, 0, 0] and \
detector.fingersUp(hands[1]) == [1, 1, 0, 0, 0]:
lmList1 = hands[0]["lmList"]
lmList2 = hands[1]["lmList"]
# point 8 is the tip of the index finger
if startDist is None:
length, info, img = detector.findDistance(hands[0]["center"], hands[1]["center"], img)
startDist = length
length, info, img = detector.findDistance(hands[0]["center"], hands[1]["center"], img)
scale = int((length - startDist) // 2)
cx, cy = info[4:]
print(scale)
else:
startDist = None
try:
h1, w1, _= img1.shape
newH, newW = ((h1+scale)//2)*2, ((w1+scale)//2)*2
img1 = cv2.resize(img1, (newW,newH))
img[cy-newH//2:cy+ newH//2, cx-newW//2:cx+ newW//2] = img1
except:
pass
cv2.imshow("Image", img)
cv2.waitKey(1)
cvzone library keeps updating their library every time. As you can see at the beginning of the video, when he imports the cvzone package he uses cvzone version 1.5.0.
I tried your code with other versions and got an error similar to yours but with version 1.5.0 your code worked great.
you can use my answer here to change the version of your cvzone library in your project to 1.5.0.

Why is the groupRectangle() function attempting to convert a value?

Im using Python, cv2, numpy and pyautogui to bot a simple game it detects apples and stars coming onto the screen it worked fine but got confused when the game sped up as it was only searching for 1 item at a time so I changed to this to detect multiple at a time which works fine however it needs some grouping due to multiple detections of the same object this works sometimes but then after a random amount of time I get the following error...
Error line 76 cv.groupRectangles() param1 is attempting to convert a value.
while(True):
scr = np.array(screenshot.grab(dimensions))
scr_no_alpha = scr[:,:,:3]
result = cv.matchTemplate(scr_no_alpha, needle, cv.TM_CCOEFF_NORMED)
result2 = cv.matchTemplate(scr_no_alpha, needle2, cv.TM_CCOEFF_NORMED)
_, max_val, _, max_loc = cv.minMaxLoc(result)
_, max_val2, _, max_loc2 = cv.minMaxLoc(result2)
src = scr.copy()
x_top_row, y_top_row = 145,140
x_middle_row, y_middle_row = 185,185
x_bottom_row, y_bottom_row = 185,270
cv.rectangle(src, (x_top_row, y_top_row), (x_top_row+w, y_top_row+h), (0,255,0), 2)
cv.rectangle(src, (x_middle_row, y_middle_row), (x_middle_row+w, y_middle_row+h), (0,255,0), 2)
cv.rectangle(src, (x_bottom_row, y_bottom_row), (x_bottom_row+w, y_bottom_row+h), (0,255,0), 2)
x_star_row, y_star_row = 10,120
cv.rectangle(src, (x_star_row, y_star_row), (x_star_row+w2, y_star_row+h2), (0,255,0), 2)
threshold = 0.6
apple_locations = np.where(result >= threshold)
apple_locations = list(zip(*apple_locations[::-1]))
threshold = 0.4
star_locations = np.where(result2 >= threshold)
star_locations = list(zip(*star_locations[::-1]))
apple_colour = (0,0,255)
star_colour = (0,255,255)
line_type = cv.LINE_4
if len(apple_locations):
apples_group = []
for loc in apple_locations:
rect = [int(loc[0]), int(loc[1]), w, h]
print(rect)
apples_group.append(rect)
apples_group.append(rect)
print(apples_group)
apples_group, weights = cv.groupRectangles(apples_group, 2, 0.5)
print(apples_group)
print(f'{len(apples_group)} Needle(s) of type Apple found.')
for (x, y, w, h) in apples_group:
cv.rectangle(src, (x, y), (x+w, y+h), apple_colour, line_type)
for apple in apples_group:
if apple[0] in range(x_top_row, y_top_row) and apple[1] in range(x_top_row+w, y_top_row+h):
pyautogui.press('w')
if apple[0] in range(x_middle_row, y_middle_row) and apple[1] in range(x_middle_row+w, y_middle_row+h):
pyautogui.press('s')
if apple[0] in range(x_bottom_row, y_bottom_row) and apple[1] in range(x_bottom_row+w, y_bottom_row+h):
pyautogui.press('d')
cv.imshow('Matches', src)
if len(star_locations):
stars_group = []
for loc in star_locations:
rect = [int(loc[0]), int(loc[1]), w2, h2]
stars_group.append(rect)
stars_group.append(rect)
stars_group, weights = cv.groupRectangles(stars_group, 2, 0.5)
print(f'{len(stars_group)} Needle(s) of type Star found.')
for (x, y, w, h) in stars_group:
cv.rectangle(src, (x, y), (x+w, y+h), star_colour, line_type)
for star in stars_group:
if star[0] in range(x_star_row, y_star_row) and star[1] in range(x_star_row+w, y_star_row+h):
pyautogui.press('a')
cv.imshow('Matches', src)
cv.waitKey(1)
if keyboard.is_pressed('q'):
cv.destroyAllWindows()
break
For some unknown reason the w and h that are entered into the rect list are not always an int value so casting them to an int int(w), int(h) fixes the problem.

Python not responding when using face_recognition library

KNOWN_FACES_DIR = 'Known'
TOLERANCE = 0.6
FRAME_THICKNESS = 3
FONT_THICKNESS = 2
MODEL = 'cnn'
video=cv2.VideoCapture(0)
print('Loading known faces...')
known_faces = []
known_names = []
for name in os.listdir(KNOWN_FACES_DIR):
for filename in os.listdir(f'{KNOWN_FACES_DIR}/{name}'):
image = face_recognition.load_image_file(f'{KNOWN_FACES_DIR}/{name}/{filename}')
encoding = face_recognition.face_encodings(image)[0]
known_faces.append(encoding)
known_names.append(name)
while True:
ret,image=video.read()
locations = face_recognition.face_locations(image, model=MODEL)
encodings = face_recognition.face_encodings(image, locations)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
print(f', found {len(encodings)} face(s)')
for face_encoding, face_location in zip(encodings, locations):
results = face_recognition.compare_faces(known_faces, face_encoding, TOLERANCE)
match = None
if True in results:
match = known_names[results.index(True)]
print(f' - {match} from {results}')
top_left = (face_location[3], face_location[0])
bottom_right = (face_location[1], face_location[2])
cv2.rectangle(image, top_left, bottom_right,FRAME_THICKNESS)
top_left = (face_location[3], face_location[2])
bottom_right = (face_location[1], face_location[2] + 22)
cv2.rectangle(image, top_left, bottom_right,cv2.FILLED)
cv2.putText(image, match, (face_location[3] + 10, face_location[2] + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 200, 200), FONT_THICKNESS)
cv2.imshow(filename, image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()
I am using the face_recognition library for the face recognition project. This gives me the desired result but it keeps on not responding. It shows the result on a single frame after not responding for 20-30 sec. Is it because my laptop specs are not good or the code requires some changes?
https://www.youtube.com/watch?v=PdkPI92KSIs&list=RDCMUCfzlCWGWYyIQ0aLC5w48gBQ&index=2
This is the youtube video on how the result should look like.
Why don't you use deepface? Here, database path is the location where you stored facial images.
#!pip install deepface
from deepface import DeepFace
DeepFace.stream(db_path = "C:/my_db")

Passing and Receiving Images from Function

I'm creating a function (in Python) that expects/receives a single image of multiple human faces in it, and returns multiple smaller images (one image per human face). I am able to do a cv2.imshow inside the function and see the expected smaller images, but when I attempt a cv2.imshow from outside the function, it does not work (unable to see the smaller image, and get a TypeError instead). Would appreciate some guidance.
def stills(user_image):
#sub_frames = []
fqp_image_src = (user_image)
raw_pic = cv2.imread(fqp_image_src)
mpic = cv2.resize(raw_pic,(0,0), fx=0.30, fy=0.30)
mpic_rgb = cv2.cvtColor(mpic, cv2.COLOR_BGR2RGB)
face_boxes = haar_cascade_face.detectMultiScale(mpic_rgb, scaleFactor = 1.2, minNeighbors = 5)
count = int(len(face_boxes))
for i in range(count):
face_box = face_boxes[i]
final = cv2.rectangle(mpic, (face_box[0], face_box[1]), ((face_box[0]+face_box[2]),(face_box[1]+face_box[3])), (0,255,0),2)
sub_frame = final[face_box[1]:(face_box[1]+face_box[3]), face_box[0]:(face_box[0]+face_box[2])]
#sub_frames.append(sub_frame)
cv2.imshow('frame', sub_frame) # this works
cv2.waitKey()
return (sub_frame, final)
# calling the function
something = stills("abc.jpg")
cv2.imshow('frame',something) # this does not work
cv2.waitKey()
TypeError: Expected cv::UMat for argument 'mat'
This will do what you expected, just whit some simplification and with full file paths
.
One of the key erros was give detectMultiScale a colored image, the imput shuld have 1 dimension, with brigtness (gray scales).
In order to display a colored image with the faces in a box a copy of the image is needed to convert into gar scales and detect, giving coordenates to draw in the colored image.
import cv2
import os
# Take as a global the dir in witch is this file
PATH = os.path.dirname(os.path.abspath(__file__))
haar_cascade_face = cv2.CascadeClassifier(os.path.join(PATH, 'haarcascade_frontalface_alt.xml'))
def stills(user_image):
image = os.path.join(PATH, user_image)
image = cv2.imread(image)
image = cv2.resize(image, (0, 0), fx=0.30, fy=0.30)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
face_boxes = haar_cascade_face.detectMultiScale(gray_image, scaleFactor=1.073, minNeighbors=8)
final = image # make the funtion alwais give a image
sub_frames = []
# Check if there are faces
if len(face_boxes) > 0:
for x, y, w, h in face_boxes:
final = cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
sub_frame = image[y:y+h, x:x+w]
sub_frames.append([x, y, x+w, y+h])
cv2.imshow('sub_frame', sub_frame)
# cv2.waitKey() # No need to wait the user
else:
print('No faces found')
return (sub_frames, final)
if __name__ == '__main__':
fragments, final = stills("abc.jpg")
cv2.imshow('frame', final)
cv2.waitKey()

stop face recognition in python as soon as face recognized

this is my code. it detects the faces and recognize the faces. but i want it to stop as soon as the face is recognized. More clearly as soon as the face is detected recognizer should try to recognize it and after recognizing it, it should not do it again and again.
in short, for every face detected fisherface should run only once(for every face).
# facerec.py
import cv2, sys, numpy, os
import json
size = 4
fn_haar = 'haarcascade_frontalface_default.xml'
fn_dir = 'att_faces'
path2='/home/irum/Desktop/Face-Recognition/thakarrecog/UNKNOWNS'
path='/home/irum/Desktop/Face-Recognition/thakarrecog/att_faces'
# Part 1: Create fisherRecognizer
print('Training...')
# Create a list of images and a list of corresponding names
(images, lables, names, id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(fn_dir):
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(fn_dir, subdir)
for filename in os.listdir(subjectpath):
path = subjectpath + '/' + filename
lable = id
images.append(cv2.imread(path, 0))
lables.append(int(lable))
id += 1
(im_width, im_height) = (112, 92)
# Create a Numpy array from the two lists above
(images, lables) = [numpy.array(lis) for lis in [images, lables]]
# OpenCV trains a model from the images
model = cv2.createFisherFaceRecognizer()
model.train(images, lables)
# Part 2: Use fisherRecognizer on camera stream
haar_cascade = cv2.CascadeClassifier(fn_haar)
# Capturing camera feed
webcam = cv2.VideoCapture(0)
webcam.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 1920)
webcam.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 1080)
while True:
# Reading Frames from live stream
(rval, frame) = webcam.read()
frame=cv2.flip(frame,1,0)
#Convert frame into gray
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# Resize the gary
mini = cv2.resize(gray, (gray.shape[1] / size, gray.shape[0] / size))
# Detecting the face
faces = haar_cascade.detectMultiScale(mini,1.3, 5)
for i in range(len(faces)):
face_i = faces[i]
(x, y, w, h) = [v * size for v in face_i]
# Croping face
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
# Eualize Histogram
eq = cv2.equalizeHist(face_resize)
# Try to recognize the face
prediction = model.predict(eq)
print "Recognition Prediction" ,prediction
# Draw rectangle around the face
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
# Write the name of recognized face
result = {
'face': {
'distance': prediction,
'coords': {
'x': str(faces[0][0]),
'y': str(faces[0][1]),
'width': str(faces[0][2]),
'height': str(faces[0][3])
}
}
}
print "1 Result of Over all Prediction" ,result
if prediction[0]>0 and prediction[1]<600:
result = {
'face': {
'distance': prediction[1],
'coords': {
'x': str(faces[0][0]),
'y': str(faces[0][1]),
'width': str(faces[0][2]),
'height': str(faces[0][3])
}
}
}
dist = result['face']['distance']
print "Known Face DISTANCE" , dist
cv2.putText(frame,
'%s - %.0f' % (names[prediction[0]],prediction[1]),
(x-10, y-10), cv2.FONT_HERSHEY_DUPLEX,1,(255, 255, 0))
else:
print "for prediction more than 600"
print "prediction", prediction
result = {
'face': {
'distance': prediction[1],
'coords': {
'x': str(faces[0][0]),
'y': str(faces[0][1]),
'width': str(faces[0][2]),
'height': str(faces[0][3])
}
}
}
dist = result['face']['distance']
print "UNKNOWN FACE" , dist
cv2.putText(frame,
'Unknown',
(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(255, 255, 0))
pin=sorted([int(n[:n.find('.')]) for n in os.listdir(path2)
if n[0]!='.' ]+[0])[-1] + 1
cv2.imwrite('%s/%s.jpg' % (path2, pin), eq)
cv2.imshow('OpenCV', frame)
key = cv2.waitKey(10)
if key == 27:
break

Categories

Resources