Related
I have a face recognition code(Entire code given at the end) that works perfectly fine with the existing dataset.
But I wanted it to also add new faces into the dataset(Enrollments), after asking for a user input for the name of the new person in the frame [like this: new_name = print(Who is this?)]. So then I could create a new folder by the entered name, and store the face inside the frame. This is what I did:
new_name = input("Who is this?")
path_2 = os.path.join('Images',new_name)
os.mkdir(path_2)
print("Directory '% s' created" % new_name)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0,0,255),
thickness = 2)
cv2.putText(frame, new_name, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)
sub_face = frame[y:y+h, x:x+w]
FaceFileName = new_name + str(y+x) + ".jpg"
cv2.imwrite(os.path.join(path_2,FaceFileName),sub_face)
cv2.imshow("Frame",frame)
#if cv2.waitKey(1) & 0xFF == ord('q'):
# break
This worked fine with new people. But now I had to do something for unrecognized faces of known people.
In this case, we would already have a folder by the entered name. We must append the image into the existing folder by the entered name.
So for this I tried the below code: (Did not work)
else: # To store the unknown new face with name
new_name = input("Who is this?")
# If the new_name entered already exists as a folder
if os.path.isfile(new_name):
print(new_name,"folder already exists")
frame = imutils.resize(frame, width = 400)
rects = detector.detectMultiScale(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY),
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30))
FaceFileName = new_name + str(y+x) + ".jpg"
for (x, y, w, h) in rects:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("Frame",frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("k"):
p = os.path.join([new_name,FaceFileName.format(str(total).zfill(5))])
# cv2.imwrite(os.path.join(path_2,FaceFileName),sub_face)
cv2.imwrite(p, orig)
total += 1
print("Image saved")
elif key == ord("q"):
break
# If the new_name does not exist as a folder new folder has to be created
else:
path_2 = os.path.join('Images',new_name)
os.mkdir(path_2)
print("Directory '% s' created" % new_name)
frame = imutils.resize(frame, width = 400)
rects = detector.detectMultiScale(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY),
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30))
FaceFileName = new_name + str(y+x) + ".jpg"
for (x, y, w, h) in rects:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("Frame",frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("k"):
p = os.path.join([path_2,FaceFileName.format(str(total).zfill(5))])
# cv2.imwrite(os.path.join(path_2,FaceFileName),sub_face)
cv2.imwrite(p, orig)
total += 1
print("Image saved")
I am getting the error:
Who is this?Vishwesh
Traceback (most recent call last):
File "C:\Users\Vishw\databs.py", line 117, in <module>
os.mkdir(path_2)
FileExistsError: [WinError 183] Cannot create a file when that file already exists: 'Images\\Vishwesh'
[ WARN:0] global C:\Users\runneradmin\AppData\Local\Temp\pip-req-build-sgoydvi3\opencv\modules\videoio\src\cap_msmf.cpp (438) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
Complete code is given below. Let me know what is wrong with the code. Kindly help!
Note: I included the above codes under the # Face recognition on LIVE WEBCAM FEED section and as else statement for the "if True in matches:"
Here is my entire code:
# Extracting features from face
from imutils import paths
import face_recognition
import pickle
import cv2
import os
#Get paths of each file in folder named Images
#Images here contains my data(folders of various persons)
imagePaths = list(paths.list_images('Images'))
knownEncodings = []
knownNames = []
# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
# extract the person name from the image path
name = imagePath.split(os.path.sep)[-2]
# load the input image and convert it from BGR (OpenCV ordering)
# to dlib ordering (RGB)
image = cv2.imread(imagePath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#Use Face_recognition to locate faces
boxes = face_recognition.face_locations(rgb,model='hog')
# compute the facial embedding for the face
encodings = face_recognition.face_encodings(rgb, boxes)
# loop over the encodings
for encoding in encodings:
knownEncodings.append(encoding)
knownNames.append(name)
#save emcodings along with their names in dictionary data
data = {"encodings": knownEncodings, "names": knownNames}
#use pickle to save data into a file for later use
f = open("face_enc", "wb")
f.write(pickle.dumps(data))
f.close()
# Face recognition on LIVE WEBCAM FEED
import face_recognition
import pickle
import cv2
import os
#find path of xml file containing haarcascade file
cascPathface = os.path.dirname(
cv2.__file__) + "/data/haarcascade_frontalface_default.xml"
# load the harcaascade in the cascade classifier
faceCascade = cv2.CascadeClassifier(cascPathface)
# load the known faces and embeddings saved in last file
data = pickle.loads(open('face_enc', "rb").read())
print("Streaming started")
video_capture = cv2.VideoCapture(0)
# loop over frames from the video file stream
while True:
# grab the frame from the threaded video stream
ret, frame = video_capture.read()
orig = frame.copy()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray,
scaleFactor=1.05,
minNeighbors=3,
minSize=(60, 60),
flags=cv2.CASCADE_SCALE_IMAGE)
# convert the input frame from BGR to RGB
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# the facial embeddings for face in input
encodings = face_recognition.face_encodings(rgb)
names = []
# loop over the facial embeddings incase
# we have multiple embeddings for multiple fcaes
for encoding in encodings:
#Compare encodings with encodings in data["encodings"]
#Matches contain array with boolean values and True for the embeddings it matches closely
#and False for rest
matches = face_recognition.compare_faces(data["encodings"],encoding)
#set name =unknown if no encoding matches
name = "Unknown"
# check to see if we have found a match
if True in matches:
# Find positions at which we get True and store them
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
#Check the names at respective indexes we stored in matchedIdxs
name = data["names"][i]
#increase count for the name we got
counts[name] = counts.get(name, 0) + 1
#set name which has highest count
name = max(counts, key=counts.get)
else: # To store the unknown new face with name
new_name = input("Who is this?")
# If the new_name entered already exists as a folder
if os.path.isfile(new_name):
print(new_name,"folder already exists")
frame = imutils.resize(frame, width = 400)
rects = detector.detectMultiScale(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY),
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30))
FaceFileName = new_name + str(y+x) + ".jpg"
for (x, y, w, h) in rects:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("Frame",frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("k"):
p = os.path.join([new_name,FaceFileName.format(str(total).zfill(5))])
# cv2.imwrite(os.path.join(path_2,FaceFileName),sub_face)
cv2.imwrite(p, orig)
total += 1
print("Image saved")
elif key == ord("q"):
break
# If the new_name does not exist as a folder new folder has to be created
else:
path_2 = os.path.join('Images',new_name)
os.mkdir(path_2)
print("Directory '% s' created" % new_name)
frame = imutils.resize(frame, width = 400)
rects = detector.detectMultiScale(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY),
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30))
FaceFileName = new_name + str(y+x) + ".jpg"
for (x, y, w, h) in rects:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("Frame",frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("k"):
p = os.path.join([path_2,FaceFileName.format(str(total).zfill(5))])
# cv2.imwrite(os.path.join(path_2,FaceFileName),sub_face)
cv2.imwrite(p, orig)
total += 1
print("Image saved")
# update the list of names
names.append(name)
# loop over the recognized faces
for ((x, y, w, h), name) in zip(faces, names):
# rescale the face coordinates
# draw the predicted face name on the image
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, name, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)
cv2.imshow("Frame", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
So, your issue is rather related to file operations then to face recognition...
Try to check if folder alreday exists before trying to create it:
path_2 = os.path.join('Images',new_name)
if not os.path.exists(path_2):
os.mkdir(path_2)
Using the idea given by #Bohdan:
else: # To store the unknown new face with name
new_name = input("Who is this?")
path_2 = os.path.join('Images',new_name)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0,0,255),
thickness = 2)
cv2.putText(frame, new_name, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)
sub_face = frame[y:y+h, x:x+w]
FaceFileName = new_name + str(y+x) + ".jpg"
if not os.path.exists(path_2):
os.mkdir(path_2)
print("Directory '% s' created" % new_name)
cv2.imwrite(os.path.join(path_2,FaceFileName),sub_face)
# To store unrecognised faces of known people
else:
cv2.imwrite(os.path.join(path_2,FaceFileName),sub_face)
Code works perfectly fine!
I have a script for single-threaded sequential face detection in a photo, and a script for cutting out faces. How do I convert to multithreading? So that the images are not processed sequentially, but simultaneously, parallel to each other.
import os
import cv2
import numpy as np
# Define paths
base_dir = os.path.dirname(__file__)
prototxt_path = os.path.join(base_dir + 'data/deploy.prototxt')
caffemodel_path = os.path.join(base_dir + 'data/weights.caffemodel')
# Read the model
model = cv2.dnn.readNetFromCaffe(prototxt_path, caffemodel_path)
# Create directory 'updated_images' if it does not exist
if not os.path.exists('updated_images'):
print("New directory created")
os.makedirs('updated_images')
# Loop through all images and save images with marked faces
for file in os.listdir(base_dir + 'images'):
file_name, file_extension = os.path.splitext(file)
if (file_extension in ['.png','.jpg']):
print("Image path: {}".format(base_dir + 'images/' + file))
image = cv2.imread(base_dir + 'images/' + file)
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
model.setInput(blob)
detections = model.forward()
# Create frame around face
for i in range(0, detections.shape[2]):
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
confidence = detections[0, 0, i, 2]
# If confidence > 0.5, show box around face
if (confidence > 0.5):
cv2.rectangle(image, (startX, startY), (endX, endY), (255, 255, 255), 2)
cv2.imwrite(base_dir + 'updated_images/' + file, image)
print("Image " + file + " converted successfully")
I tried to push the face detection and selection into def and then monitor the parallel streams through pool and map, but I am very weak in this, and obviously did something wrong. The script just stopped working.
Here is how I would do it:
import os
import cv2
import numpy as np
import threading
base_dir = os.path.dirname(__file__)
prototxt_path = os.path.join(base_dir + 'data/deploy.prototxt')
caffemodel_path = os.path.join(base_dir + 'data/weights.caffemodel')
model = cv2.dnn.readNetFromCaffe(prototxt_path, caffemodel_path)
if not os.path.exists('updated_images'):
print("New directory created")
os.makedirs('updated_images')
def process(file, base_dir):
print("Image path: {}".format(base_dir + 'images/' + file))
image = cv2.imread(base_dir + 'images/' + file)
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
model.setInput(blob)
detections = model.forward()
h, w = image.shape[:2]
for i in range(detections.shape[2]):
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
startX, startY, endX, endY = box.astype("int")
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
cv2.rectangle(image, (startX, startY), (endX, endY), (255, 255, 255), 2)
cv2.imwrite(base_dir + 'updated_images/' + file, image)
print("Image " + file + " converted successfully")
for file in os.listdir(base_dir + 'images'):
file_name, file_extension = os.path.splitext(file)
if file_extension in ['.png','.jpg']:
thread = threading.Thread(target=process, args=(file, base_dir))
thread.start()
Most of it is the same as your code, except a large chunk is now in a function. I also took the liberty of removing some redundant code, such as how you don't need parenthesis to unpack an iterable, nor do you need parenthesis to do if statements.
As I don't have the files you open in your code, I'm unable to test it out, hence if there are any problems, there might be something I missed, so feel free to ping me if that happens.
The code below goes through files on my HDD which has 620,000 frames which I am extracting the faces from using OpenCV's DNN face detector. It works fine but it takes about 1 second per frame = 172 hours.
So I want to use multithreading to speed this up but am not sure how to do so.
NOTE: I have 4 CPU cores on my laptop and my HDD has read and write speeds of about 100 MB/s
Example of the file path : /Volumes/HDD/frames/Fold1_part1/01/0/04541.jpg
frames_path = "/Volumes/HDD/frames"
path_HDD = "/Volumes/HDD/Data"
def filePath(path):
for root, directories, files in os.walk(path, topdown=False):
for file in files:
if (directories == []):
pass
elif (len(directories) > 3):
pass
elif (len(root) == 29):
pass
else:
# Only want the roots with /Volumes/HDD/Data/Fold1_part1/01
for dir in directories:
path_video = os.path.join(root, dir)
for r, d, f in os.walk(path_video, topdown=False):
for fe in f:
fullPath = r[:32]
label = r[-1:]
folds = path_video.replace("/Volumes/HDD/Data/", "")
finalPath = os.path.join(frames_path, folds)
finalImage = os.path.join(finalPath, fe)
fullImagePath = os.path.join(path_video, fe)
try :
if (os.path.exists(finalPath) == False):
os.makedirs(finalPath)
extractFaces(fullImagePath, finalImage)
except OSError as error:
print(error)
sys.exit(0)
def extractFaces(imageTest, savePath):
model = "/Users/yudhiesh/Downloads/deep-learning-face-detection/res10_300x300_ssd_iter_140000.caffemodel"
prototxt = "/Users/yudhiesh/Downloads/deep-learning-face-detection/deploy.prototxt.txt"
net = cv2.dnn.readNet(model, prototxt)
# load the input image and construct an input blob for the image
# by resizing to a fixed 300x300 pixels and then normalizing it
image = cv2.imread(imageTest)
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0,(300, 300), (104.0, 177.0, 123.0))
print(f'Current file path {imageTest}')
# pass the blobs through the network and obtain the predictions
print("Computing object detections....")
net.setInput(blob)
detections = net.forward()
# Detect face with highest confidence
for i in range(0, detections.shape[2]):
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
confidence = detections[0, 0, i, 2]
# If confidence > 0.5, save it as a separate file
if (confidence > 0.5):
frame = image[startY:endY, startX:endX]
rect = dlib.rectangle(startX, startY, endX, endY)
image = image[startY:endY, startX:endX]
print(f'Saving image to {savePath}')
cv2.imwrite(savePath, image)
if __name__ == "__main__":
filePath(path_HDD)
Managed to cut the time down to 0.09-0.1 seconds per image. Thanks for the suggestion to use ProcessPoolExecutor.
frames_path = "/Volumes/HDD/frames"
path_HDD = "/Volumes/HDD/Data"
def filePath(path):
for root, directories, files in os.walk(path, topdown=False):
for file in files:
if (directories == []):
pass
elif (len(directories) > 3):
pass
elif (len(root) == 29):
pass
else:
# Only want the roots with /Volumes/HDD/Data/Fold1_part1/01
for dir in directories:
path_video = os.path.join(root, dir)
for r, d, f in os.walk(path_video, topdown=False):
for fe in f:
fullPath = r[:32]
label = r[-1:]
folds = path_video.replace("/Volumes/HDD/Data/", "")
finalPath = os.path.join(frames_path, folds)
finalImage = os.path.join(finalPath, fe)
fullImagePath = os.path.join(path_video, fe)
try :
if (os.path.exists(finalPath) == False):
os.makedirs(finalPath)
with concurrent.futures.ProcessPoolExecutor() as executor:
executor.map(extractFaces(fullImagePath, finalImage))
except OSError as error:
print(error)
sys.exit(0)
def extractFaces(imageTest, savePath):
model = "/Users/yudhiesh/Downloads/deep-learning-face-detection/res10_300x300_ssd_iter_140000.caffemodel"
prototxt = "/Users/yudhiesh/Downloads/deep-learning-face-detection/deploy.prototxt.txt"
net = cv2.dnn.readNet(model, prototxt)
# load the input image and construct an input blob for the image
# by resizing to a fixed 300x300 pixels and then normalizing it
image = cv2.imread(imageTest)
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0,(300, 300), (104.0, 177.0, 123.0))
print(f'Current file path {imageTest}')
# pass the blobs through the network and obtain the predictions
print("Computing object detections....")
net.setInput(blob)
detections = net.forward()
# Detect face with highest confidence
for i in range(0, detections.shape[2]):
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
confidence = detections[0, 0, i, 2]
# If confidence > 0.5, save it as a separate file
if (confidence > 0.5):
frame = image[startY:endY, startX:endX]
rect = dlib.rectangle(startX, startY, endX, endY)
image = image[startY:endY, startX:endX]
print(f'Saving image to {savePath}')
cv2.imwrite(savePath, image)
if __name__ == "__main__":
filePath(path_HDD)
I am trying to read read multi images on a folder and do some processing. I have a code that extracts facial landmark coordinates. But I can apply this code to only one image. I want the script to work with all images in the folder. I have read some solutions but they didn't work for me. Can you tell me how can I apply a loop for this?
This is my code:
import numpy as np
import cv2
import dlib
import os
from glob import glob
mouth_matrice= open("C:/Users/faruk/Desktop/matrices/mouth.txt","w")
lefteye_matrice= open("C:/Users/faruk/Desktop/matrices/lefteye.txt","w")
righteye_matrice= open("C:/Users/faruk/Desktop/matrices/righteye.txt","w")
cascPath = ("C:/opencv/sources/data/haarcascades_cuda/haarcascade_frontalface_default.xml")
all_matrice= open("C:/Users/faruk/Desktop/matrices/all.txt","w")
#imagePath = ("C:/Users/faruk/Desktop/Dataset/Testing/342_spontaneous_smile_4 (2-17-2018 8-37-58 PM)/342_spontaneous_smile_4 357.jpg")
mypath=os.path.join("c:", os.sep, "Users", "faruk", "Desktop", "Dataset","Testing2")
PREDICTOR_PATH = ("C:/Users/faruk/Desktop/Working projects/facial-landmarks/shape_predictor_68_face_landmarks.dat")
JAWLINE_POINTS = list(range(0, 17))
RIGHT_EYEBROW_POINTS = list(range(17, 22))
LEFT_EYEBROW_POINTS = list(range(22, 27))
NOSE_POINTS = list(range(27, 36))
#RIGHT_EYE_POINTS = list(range(36, 42))
RIGHT_EYE_POINTS = list([36,39])
ALL_POINTS= list([36,39,42,45,48,51,54,57])
##LEFT_EYE_POINTS = list(range(42, 48))
LEFT_EYE_POINTS = list([42, 45])
##MOUTH_OUTLINE_POINTS = list(range(48, 61))
MOUTH_OUTLINE_POINTS = list([48,51,54,57])
MOUTH_INNER_POINTS = list(range(61, 68))
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
predictor = dlib.shape_predictor(PREDICTOR_PATH)
# Read the image
cv2.namedWindow('Landmarks found',cv2.WINDOW_NORMAL)
cv2.resizeWindow('Landmarks found', 800,800)
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.05,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
print("Found {0} faces!".format(len(faces)))
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Converting the OpenCV rectangle coordinates to Dlib rectangle
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
landmarks = np.matrix([[p.x, p.y]
for p in predictor(image, dlib_rect).parts()])
#landmarks_display = landmarks[LEFT_EYE_POINTS]
landmarks_display = np.matrix(landmarks[ALL_POINTS])
for idx, point in enumerate(landmarks_display):
pos = (point[0, 0], point[0, 1])
cv2.circle(image, pos, 2, color=(0, 255, 255), thickness=-1)
np.savetxt(all_matrice,landmarks_display,fmt='%.f',newline=',')
all_matrice.close()
# Draw a rectangle around the faces
cv2.imshow("Landmarks found", image)
cv2.waitKey(0)
You can use something like this to get paths of all images in a directory:
import os
# Folder with images
directory = 'c:/users/username/path/'
for filename in os.listdir(directory):
if filename.endswith(".jpg"):
image_path = os.path.join(directory, filename)
# Your code
continue
else:
continue
You need to add your code and process each path.
Hope this helps.
Edit:
I have no way to test it and it certainly needs a cleanup but might just work. Not sure what image extensions you want to include so i only included jpg.
import os
import numpy as np
import cv2
import dlib
# Chage directory path to the path of your image folder
directory = 'c:/users/admin/desktop/'
mouth_matrice= open("C:/Users/faruk/Desktop/matrices/mouth.txt","w")
lefteye_matrice= open("C:/Users/faruk/Desktop/matrices/lefteye.txt","w")
righteye_matrice= open("C:/Users/faruk/Desktop/matrices/righteye.txt","w")
cascPath = ("C:/opencv/sources/data/haarcascades_cuda/haarcascade_frontalface_default.xml")
all_matrice= open("C:/Users/faruk/Desktop/matrices/all.txt","w")
mypath=os.path.join("c:", os.sep, "Users", "faruk", "Desktop", "Dataset","Testing2")
PREDICTOR_PATH = ("C:/Users/faruk/Desktop/Working projects/facial-landmarks/shape_predictor_68_face_landmarks.dat")
JAWLINE_POINTS = list(range(0, 17))
RIGHT_EYEBROW_POINTS = list(range(17, 22))
LEFT_EYEBROW_POINTS = list(range(22, 27))
NOSE_POINTS = list(range(27, 36))
#RIGHT_EYE_POINTS = list(range(36, 42))
RIGHT_EYE_POINTS = list([36,39])
ALL_POINTS= list([36,39,42,45,48,51,54,57])
##LEFT_EYE_POINTS = list(range(42, 48))
LEFT_EYE_POINTS = list([42, 45])
##MOUTH_OUTLINE_POINTS = list(range(48, 61))
MOUTH_OUTLINE_POINTS = list([48,51,54,57])
MOUTH_INNER_POINTS = list(range(61, 68))
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
predictor = dlib.shape_predictor(PREDICTOR_PATH)
for filename in os.listdir(directory):
if filename.endswith(".jpg"):
imagePath=os.path.join(directory, filename)
cv2.namedWindow('Landmarks found',cv2.WINDOW_NORMAL)
cv2.resizeWindow('Landmarks found', 800,800)
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(gray,
scaleFactor=1.05,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
print("Found {0} faces!".format(len(faces)))
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Converting the OpenCV rectangle coordinates to Dlib rectangle
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
landmarks = np.matrix([[p.x, p.y] for p in predictor(image, dlib_rect).parts()])
#landmarks_display = landmarks[LEFT_EYE_POINTS]
landmarks_display = np.matrix(landmarks[ALL_POINTS])
for idx, point in enumerate(landmarks_display):
pos = (point[0, 0], point[0, 1])
cv2.circle(image, pos, 2, color=(0, 255, 255), thickness=-1)
np.savetxt(all_matrice,landmarks_display,fmt='%.f',newline=',')
all_matrice.close()
# Draw a rectangle around the faces
cv2.imshow("Landmarks found", image)
cv2.waitKey(0)
continue
else:
continue
P.s You should try and learn basic programming concepts before you try to tackle something like face recognition or image processing.
I am working with a face recognition using OpenCV in python. I want to close this window then open another window when the cam recognized a user. (Nevermind the opening of the window, i already did that) If I just open the another window, it loops and shows plenty of windows. I did search in the internet but no luck. Can someone help me? Here is my code:
import cv2, sys, numpy, os
size = 1
fn_haar = 'data/haarcascade_frontalface_alt.xml'
fn_dir = 'userface'
print('Loading..')
(images, lables, names, img_id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(fn_dir):
for subdir in dirs:
names[img_id] = subdir
subjectpath = os.path.join(fn_dir, subdir)
for filename in os.listdir(subjectpath):
f_name, f_extension = os.path.splitext(filename)
if (f_extension.lower() not in ['.png','.jpg','.jpeg','.gif','.pgm']):
print("Skipping "+filename+", wrong file type")
continue
path = subjectpath + "/" + filename
lable = img_id
images.append(cv2.imread(path, 0))
lables.append(int(lable))
img_id += 1
(im_width, im_height) = (112, 92)
(images, lables) = [numpy.array(lis) for lis in [images, lables]]
model = cv2.face.FisherFaceRecognizer_create()
model.train(images, lables)
webcam = cv2.VideoCapture(0)
classifier = cv2.CascadeClassifier(fn_haar)
while(True):
rval = False
while(not rval):
(rval, frame) = webcam.read()
if (not rval):
print("Failed to open webcam, Trying again...")
frame = cv2.flip(frame, 1, 0)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mini = cv2.resize(frame, (int(frame.shape[1] / size), int(frame.shape[0] /
size)))
faces = classifier.detectMultiScale(mini)
for i in range(len(faces)):
face_i = faces[i]
(x, y, w, h) = [v * size for v in face_i]
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
prediction = model.predict(face_resize)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
if prediction[1]<2300:
cv2.putText(frame, '%s - %.0f' % (names[prediction[0]], prediction[1]),
(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
else:
cv2.putText(frame, 'Unknown', (x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0,
255, 0))
cv2.imshow("Login using Face Recognition", frame)
key = cv2.waitKey(10)
if (key == 27):
break
Im pretty sure you need to create a named window so then you can specifically close that window itself.
You can then destroy the windows when you don't need them anymore:
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Depends on how you use cv2.namedWindow and cv2.imshow.
It could slow down your application if you are using a different winname every time as you would be creating new windows.
I would suggest to just use cv.imshow and modify your code to have a variable that creates a unique title for the current window.
win_name = 'Login using Face Recognition: '
success_count = 0
while(True):
# ... your recognition logic that would set `recognized`
recognized = True
if recognized:
cv2.destroyWindow(win_name + str(success_count))
success_count += 1
cv2.imshow(win_name + str(success_count), frame)
key = cv2.waitKey(10)
if (key == 27):
break