Python+OpenCV: cv2.imwrite - python

I'm trying to detect a face and write down area with the face in a separate file.
How can I do it? I think that i must use "faces" (you can see this var in code). But how?
from ffnet import mlgraph, ffnet, tmlgraph, imlgraph
import pylab
import sys
import cv,cv2
import numpy
cascade = cv.Load('C:\opencv\data\haarcascades\haarcascade_frontalface_alt.xml')
def detect(image):
bitmap = cv.fromarray(image)
faces = cv.HaarDetectObjects(bitmap, cascade, cv.CreateMemStorage(0))
if faces:
for (x,y,w,h),n in faces:
cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,255),3)
return image
if __name__ == "__main__":
cam = cv2.VideoCapture(0)
while 1:
_,frame =cam.read()
frame = numpy.asarray(detect(frame))
cv2.imshow("features", frame)
if cv2.waitKey(1) == 0x1b: # ESC
print 'ESC pressed. Exiting ...'
break

This following code should extract face in images and save faces on disk
def detect(image):
image_faces = []
bitmap = cv.fromarray(image)
faces = cv.HaarDetectObjects(bitmap, cascade, cv.CreateMemStorage(0))
if faces:
for (x,y,w,h),n in faces:
image_faces.append(image[y:(y+h), x:(x+w)])
#cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,255),3)
return image_faces
if __name__ == "__main__":
cam = cv2.VideoCapture(0)
while 1:
_,frame =cam.read()
image_faces = []
image_faces = detect(frame)
for i, face in enumerate(image_faces):
cv2.imwrite("face-" + str(i) + ".jpg", face)
#cv2.imshow("features", frame)
if cv2.waitKey(1) == 0x1b: # ESC
print 'ESC pressed. Exiting ...'
break

Alternatively, with MTCNN and OpenCV(other dependencies including TensorFlow also required), you can:
1 Perform face detection(Input an image, output all boxes of detected faces):
from mtcnn.mtcnn import MTCNN
import cv2
face_detector = MTCNN()
img = cv2.imread("Anthony_Hopkins_0001.jpg")
detect_boxes = face_detector.detect_faces(img)
print(detect_boxes)
[{'box': [73, 69, 98, 123], 'confidence': 0.9996458292007446, 'keypoints': {'left_eye': (102, 116), 'right_eye': (150, 114), 'nose': (129, 142), 'mouth_left': (112, 168), 'mouth_right': (146, 167)}}]
2 save all detected faces to separate files:
for i in range(len(detect_boxes)):
box = detect_boxes[i]["box"]
face_img = img[box[1]:(box[1] + box[3]), box[0]:(box[0] + box[2])]
cv2.imwrite("face-{:03d}.jpg".format(i+1), face_img)
3 or Draw rectangles of all detected faces:
for box in detect_boxes:
box = box["box"]
pt1 = (box[0], box[1]) # top left
pt2 = (box[0] + box[2], box[1] + box[3]) # bottom right
cv2.rectangle(img, pt1, pt2, (0,255,0), 2)
cv2.imwrite("detected-boxes.jpg", img)

wtluo, great !
May I propose a slight modification of your code 2. ? Here it is:
for i, detected_box in enumerate(detect_boxes):
box = detected_box["box"]
face_img = img[ box[1]:box[1] + box[3], box[0]:box[0] + box[2] ]
cv2.imwrite("face-{:03d}.jpg".format(i+1), face_img)

Related

My Code is only detecting single face only after applying For Loop also

I am trying to Detect Emotions using my model which takes images as an array, but my code is detecting faces using MTCNN in which the code is detecting only a single face, I have also applied for loop but still it is detecting single face only. Detecting Single face in Single Frame.
import keras
from keras.models import load_model
from time import sleep
from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
import cv2
import numpy as np
from configs import Configs
from PIL import Image
from keras.applications.vgg16 import preprocess_input
from mtcnn.mtcnn import MTCNN
cfg = Configs()
detector = MTCNN()
model = load_model(cfg.model_path)
emotions = ['Angry', 'Disgust', 'Fear', 'Happy',
'Neutral', 'Pain', 'Sad', 'Surprise']
# Sample Video on which we have to Test
cap = cv2.VideoCapture(cfg.video_path)
while(cap.isOpened()):
ret, frame = cap.read()
faces = detector.detect_faces(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if ret == False:
break
#for face in faces:
# x = faces[0]['box'][0]
# y = faces[0]['box'][1]
# w = faces[0]['box'][2]
# h = faces[0]['box'][3]
#face = x,y,w,h
for face in faces:
x = faces[0]['box'][0]
y = faces[0]['box'][1]
w = faces[0]['box'][2]
h = faces[0]['box'][3]
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
if np.sum([roi_gray]) != 0:
roi = roi_gray.astype('float')/255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
prediction = model.predict(roi)[0]
label = emotions[prediction.argmax()]
label_position = (x, y)
cv2.putText(frame, label, label_position,
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
else:
cv2.putText(frame, 'No Faces', (30, 80),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
size = cv2.resize(frame[:, :, ::1], (560, 640))
cv2.imshow('Emotion Detector', size)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

Having error in predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") RuntimeError:

I'm keep running with error in line 8.
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") RuntimeError: Unable to open shape_predictor_68_face_landmarks.da
I downloaded the files I tried to add the files to working directory but PyCharm doesn't let me drag and drop anything.
import cv2
import numpy as np
import dlib
cap = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
def midpoint(p1 ,p2):
return int((p1.x + p2.x)/2), int((p1.y + p2.y)/2)
while True:
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
for face in faces:
#x, y = face.left(), face.top()
#x1, y1 = face.right(), face.bottom()
#cv2.rectangle(frame, (x, y), (x1, y1), (0, 255, 0), 2)
landmarks = predictor(gray, face)
left_point = (landmarks.part(36).x, landmarks.part(36).y)
right_point = (landmarks.part(39).x, landmarks.part(39).y)
center_top = midpoint(landmarks.part(37), landmarks.part(38))
center_bottom = midpoint(landmarks.part(41), landmarks.part(40))
hor_line = cv2.line(frame, left_point, right_point, (0, 255, 0), 2)
ver_line = cv2.line(frame, center_top, center_bottom, (0, 255, 0), 2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
Solve the problem. I went in my Mac open pycharm install dlib and at first I had the same problem. the type in google shape_predictor_68_face_landmarks.dat. search different option the best source: https://github.com/davisking/dlib-models/blob/master/shape_predictor_68_face_landmarks.dat.bz2 download the file and I dropped the file in sam

Real time face recognition ran slowly on Raspberry Pi3

I am using Raspberry Pi3 for face recognition and this is my code to detect the faces but the real time recognition ran slowly
cam = cv2.VideoCapture(0)
rec = cv2.face.LBPHFaceRecognizer_create();
rec.read(...'/data/recognizer/trainingData.yml')
getId = 0
font = cv2.FONT_HERSHEY_SIMPLEX
userId = 0
i = 0
while (cam.isOpened() and i<91):
i=i+1
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceDetect.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
getId, conf = rec.predict(gray[y:y + h, x:x + w]) # This will predict the id of the face
# print conf;
if conf < 50:
userId = getId
cv2.putText(img, "Detected", (x, y + h), font, 2, (0, 255, 0), 2)
record = Records.objects.get(id=userId)
record.is_present = True
record.save()
else:
cv2.putText(img, "Unknown", (x, y + h), font, 2, (0, 0, 255), 2)
# Printing that number below the face
# #Prams cam image, id, location,font style, color, stroke
cv2.imshow("Face", img)
cv2.waitKey(50)`
How to correct it please ? Thanks for your helping hand.
You should use threads to mazimize performance. imutils is a library that lets you use threading both on picamera and webcam capture. The issue here is that there are too many Input output operations being performed in between frames.
Here is the article that helped increase my fps:
https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
And this is the code you can add:
import imutils
from imutils.video.pivideostream import PiVideoStream
Then instead of cam = cv2.VideoCapture(0)
use cam = PiVideoStream().start()
and instead of ret, img = cam.read()
use im = cam.read()
and to release the camera use:
cam.stop()

Read multi images in a folder python

I am trying to read read multi images on a folder and do some processing. I have a code that extracts facial landmark coordinates. But I can apply this code to only one image. I want the script to work with all images in the folder. I have read some solutions but they didn't work for me. Can you tell me how can I apply a loop for this?
This is my code:
import numpy as np
import cv2
import dlib
import os
from glob import glob
mouth_matrice= open("C:/Users/faruk/Desktop/matrices/mouth.txt","w")
lefteye_matrice= open("C:/Users/faruk/Desktop/matrices/lefteye.txt","w")
righteye_matrice= open("C:/Users/faruk/Desktop/matrices/righteye.txt","w")
cascPath = ("C:/opencv/sources/data/haarcascades_cuda/haarcascade_frontalface_default.xml")
all_matrice= open("C:/Users/faruk/Desktop/matrices/all.txt","w")
#imagePath = ("C:/Users/faruk/Desktop/Dataset/Testing/342_spontaneous_smile_4 (2-17-2018 8-37-58 PM)/342_spontaneous_smile_4 357.jpg")
mypath=os.path.join("c:", os.sep, "Users", "faruk", "Desktop", "Dataset","Testing2")
PREDICTOR_PATH = ("C:/Users/faruk/Desktop/Working projects/facial-landmarks/shape_predictor_68_face_landmarks.dat")
JAWLINE_POINTS = list(range(0, 17))
RIGHT_EYEBROW_POINTS = list(range(17, 22))
LEFT_EYEBROW_POINTS = list(range(22, 27))
NOSE_POINTS = list(range(27, 36))
#RIGHT_EYE_POINTS = list(range(36, 42))
RIGHT_EYE_POINTS = list([36,39])
ALL_POINTS= list([36,39,42,45,48,51,54,57])
##LEFT_EYE_POINTS = list(range(42, 48))
LEFT_EYE_POINTS = list([42, 45])
##MOUTH_OUTLINE_POINTS = list(range(48, 61))
MOUTH_OUTLINE_POINTS = list([48,51,54,57])
MOUTH_INNER_POINTS = list(range(61, 68))
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
predictor = dlib.shape_predictor(PREDICTOR_PATH)
# Read the image
cv2.namedWindow('Landmarks found',cv2.WINDOW_NORMAL)
cv2.resizeWindow('Landmarks found', 800,800)
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.05,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
print("Found {0} faces!".format(len(faces)))
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Converting the OpenCV rectangle coordinates to Dlib rectangle
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
landmarks = np.matrix([[p.x, p.y]
for p in predictor(image, dlib_rect).parts()])
#landmarks_display = landmarks[LEFT_EYE_POINTS]
landmarks_display = np.matrix(landmarks[ALL_POINTS])
for idx, point in enumerate(landmarks_display):
pos = (point[0, 0], point[0, 1])
cv2.circle(image, pos, 2, color=(0, 255, 255), thickness=-1)
np.savetxt(all_matrice,landmarks_display,fmt='%.f',newline=',')
all_matrice.close()
# Draw a rectangle around the faces
cv2.imshow("Landmarks found", image)
cv2.waitKey(0)
You can use something like this to get paths of all images in a directory:
import os
# Folder with images
directory = 'c:/users/username/path/'
for filename in os.listdir(directory):
if filename.endswith(".jpg"):
image_path = os.path.join(directory, filename)
# Your code
continue
else:
continue
You need to add your code and process each path.
Hope this helps.
Edit:
I have no way to test it and it certainly needs a cleanup but might just work. Not sure what image extensions you want to include so i only included jpg.
import os
import numpy as np
import cv2
import dlib
# Chage directory path to the path of your image folder
directory = 'c:/users/admin/desktop/'
mouth_matrice= open("C:/Users/faruk/Desktop/matrices/mouth.txt","w")
lefteye_matrice= open("C:/Users/faruk/Desktop/matrices/lefteye.txt","w")
righteye_matrice= open("C:/Users/faruk/Desktop/matrices/righteye.txt","w")
cascPath = ("C:/opencv/sources/data/haarcascades_cuda/haarcascade_frontalface_default.xml")
all_matrice= open("C:/Users/faruk/Desktop/matrices/all.txt","w")
mypath=os.path.join("c:", os.sep, "Users", "faruk", "Desktop", "Dataset","Testing2")
PREDICTOR_PATH = ("C:/Users/faruk/Desktop/Working projects/facial-landmarks/shape_predictor_68_face_landmarks.dat")
JAWLINE_POINTS = list(range(0, 17))
RIGHT_EYEBROW_POINTS = list(range(17, 22))
LEFT_EYEBROW_POINTS = list(range(22, 27))
NOSE_POINTS = list(range(27, 36))
#RIGHT_EYE_POINTS = list(range(36, 42))
RIGHT_EYE_POINTS = list([36,39])
ALL_POINTS= list([36,39,42,45,48,51,54,57])
##LEFT_EYE_POINTS = list(range(42, 48))
LEFT_EYE_POINTS = list([42, 45])
##MOUTH_OUTLINE_POINTS = list(range(48, 61))
MOUTH_OUTLINE_POINTS = list([48,51,54,57])
MOUTH_INNER_POINTS = list(range(61, 68))
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
predictor = dlib.shape_predictor(PREDICTOR_PATH)
for filename in os.listdir(directory):
if filename.endswith(".jpg"):
imagePath=os.path.join(directory, filename)
cv2.namedWindow('Landmarks found',cv2.WINDOW_NORMAL)
cv2.resizeWindow('Landmarks found', 800,800)
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(gray,
scaleFactor=1.05,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
print("Found {0} faces!".format(len(faces)))
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Converting the OpenCV rectangle coordinates to Dlib rectangle
dlib_rect = dlib.rectangle(int(x), int(y), int(x + w), int(y + h))
landmarks = np.matrix([[p.x, p.y] for p in predictor(image, dlib_rect).parts()])
#landmarks_display = landmarks[LEFT_EYE_POINTS]
landmarks_display = np.matrix(landmarks[ALL_POINTS])
for idx, point in enumerate(landmarks_display):
pos = (point[0, 0], point[0, 1])
cv2.circle(image, pos, 2, color=(0, 255, 255), thickness=-1)
np.savetxt(all_matrice,landmarks_display,fmt='%.f',newline=',')
all_matrice.close()
# Draw a rectangle around the faces
cv2.imshow("Landmarks found", image)
cv2.waitKey(0)
continue
else:
continue
P.s You should try and learn basic programming concepts before you try to tackle something like face recognition or image processing.

Close a window when face is recognized

I am working with a face recognition using OpenCV in python. I want to close this window then open another window when the cam recognized a user. (Nevermind the opening of the window, i already did that) If I just open the another window, it loops and shows plenty of windows. I did search in the internet but no luck. Can someone help me? Here is my code:
import cv2, sys, numpy, os
size = 1
fn_haar = 'data/haarcascade_frontalface_alt.xml'
fn_dir = 'userface'
print('Loading..')
(images, lables, names, img_id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(fn_dir):
for subdir in dirs:
names[img_id] = subdir
subjectpath = os.path.join(fn_dir, subdir)
for filename in os.listdir(subjectpath):
f_name, f_extension = os.path.splitext(filename)
if (f_extension.lower() not in ['.png','.jpg','.jpeg','.gif','.pgm']):
print("Skipping "+filename+", wrong file type")
continue
path = subjectpath + "/" + filename
lable = img_id
images.append(cv2.imread(path, 0))
lables.append(int(lable))
img_id += 1
(im_width, im_height) = (112, 92)
(images, lables) = [numpy.array(lis) for lis in [images, lables]]
model = cv2.face.FisherFaceRecognizer_create()
model.train(images, lables)
webcam = cv2.VideoCapture(0)
classifier = cv2.CascadeClassifier(fn_haar)
while(True):
rval = False
while(not rval):
(rval, frame) = webcam.read()
if (not rval):
print("Failed to open webcam, Trying again...")
frame = cv2.flip(frame, 1, 0)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mini = cv2.resize(frame, (int(frame.shape[1] / size), int(frame.shape[0] /
size)))
faces = classifier.detectMultiScale(mini)
for i in range(len(faces)):
face_i = faces[i]
(x, y, w, h) = [v * size for v in face_i]
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
prediction = model.predict(face_resize)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
if prediction[1]<2300:
cv2.putText(frame, '%s - %.0f' % (names[prediction[0]], prediction[1]),
(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
else:
cv2.putText(frame, 'Unknown', (x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0,
255, 0))
cv2.imshow("Login using Face Recognition", frame)
key = cv2.waitKey(10)
if (key == 27):
break
Im pretty sure you need to create a named window so then you can specifically close that window itself.
You can then destroy the windows when you don't need them anymore:
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Depends on how you use cv2.namedWindow and cv2.imshow.
It could slow down your application if you are using a different winname every time as you would be creating new windows.
I would suggest to just use cv.imshow and modify your code to have a variable that creates a unique title for the current window.
win_name = 'Login using Face Recognition: '
success_count = 0
while(True):
# ... your recognition logic that would set `recognized`
recognized = True
if recognized:
cv2.destroyWindow(win_name + str(success_count))
success_count += 1
cv2.imshow(win_name + str(success_count), frame)
key = cv2.waitKey(10)
if (key == 27):
break

Categories

Resources