Leveraging GPU with OpenCV 4.2 - python

I am trying to use the GPU of my virtual machine with OpenCV library (4.2) and Python 3.7.
I have installed opencv with CUDA and the following command returns 1:
import cv2
count = cv2.cuda.getCudaEnabledDeviceCount()
print(count)
I tried to run my code with and without leveraging GPU:
start = time.time()
network = cv.dnn.readNetFromDarknet(config_path, weights_path)
if IS_CUDA:
network.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
network.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)
my_function(network, frame)
end = time.time()
print(end - start)
But the running time is the same.
My question: Is using setPreferableBackend() and setPreferableTarget() enough to leverage GPU power ?

First of all, before runnning into code, you must check your working environement
What virtual machine are you using ? usually virtual machines does not support GPU virtualisation. You must add PCI-Passthrough to get this feature.
If true, can you first check that your device is successfully detected by running nvidia-smi (within your favorite bash/cmd environment) in order to check your nvidia drivers.
Then check that your opencv-python supports your gpu by running in a python shell cv2.cuda.getCudaEnabledDeviceCount()
if the result is 0, and nvidia-smi detects your gpu, you must compile your python opencv wheel with the support of your specific gpu model and install it to your python environment.

It should be enough to make it use CUDA and you should see a noticeable speedup. Check your Task Manager's Performance tab and see if your Nvidia GPU is being used. I don't know what's happening behind the scenes in my_function(), so I can't tell if there's something different in how you're using it.
This is the class that I usually copy around when I want a quick setup for the Yolo network
import cv2
import numpy as np
class Yolo:
def __init__(self, cfg, weights, names, conf_thresh, nms_thresh, use_cuda = False):
# save thresholds
self.ct = conf_thresh;
self.nmst = nms_thresh;
# create net
self.net = cv2.dnn.readNet(weights, cfg);
print("Finished: " + str(weights));
self.classes = [];
file = open(names, 'r');
for line in file:
self.classes.append(line.strip());
# use gpu + CUDA to speed up detections
if use_cuda:
self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA);
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA);
# get output names
layer_names = self.net.getLayerNames();
self.output_layers = [layer_names[i[0]-1] for i in self.net.getUnconnectedOutLayers()];
# runs detection on the image and draws on it
def detect(self, img, target_id = None):
# get detection stuff
b, c, ids, idxs = self.get_detection_data(img, target_id);
# draw result
img = self.draw(img, b, c, ids, idxs);
return img, len(idxs);
# returns boxes, confidences, class_ids, and indexes (indices?)
def get_detection_data(self, img, target_id = None):
# get output
layer_outputs = self.get_inf(img);
# get dims
height, width = img.shape[:2];
# filter thresholds and target
b, c, ids, idxs = self.thresh(layer_outputs, width, height, target_id);
return b, c, ids, idxs;
# runs the network on an image
def get_inf(self, img):
# construct a blob
blob = cv2.dnn.blobFromImage(img, 1 / 255.0, (416,416), swapRB=True, crop=False);
# get response
self.net.setInput(blob);
layer_outputs = self.net.forward(self.output_layers);
return layer_outputs;
# filters the layer output by conf, nms and id
def thresh(self, layer_outputs, width, height, target_id = None):
# some lists
boxes = [];
confidences = [];
class_ids = [];
# each layer outputs
for output in layer_outputs:
for detection in output:
# get id and confidence
scores = detection[5:];
class_id = np.argmax(scores);
confidence = scores[class_id];
# filter out low confidence
if confidence > self.ct:
# filter by target_id if set
if target_id is None or class_id == target_id:
# scale bounding box back to the image size
box = detection[0:4] * np.array([width, height, width, height]);
(cx, cy, w, h) = box.astype('int');
# grab the top-left corner of the box
tx = int(cx - (w / 2));
ty = int(cy - (h / 2));
# update lists
boxes.append([tx,ty,int(w),int(h)]);
confidences.append(float(confidence));
class_ids.append(class_id);
# apply NMS
idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.ct, self.nmst);
return boxes, confidences, class_ids, idxs;
# draw detections on image
def draw(self, img, boxes, confidences, class_ids, idxs):
# check for zero
if len(idxs) > 0:
# loop over indices
for i in idxs.flatten():
# extract the bounding box coords
(x,y) = (boxes[i][0], boxes[i][1]);
(w,h) = (boxes[i][2], boxes[i][3]);
# draw a box
cv2.rectangle(img, (x,y), (x+w,y+h), (0,0,255), 2);
# draw text
text = "{}: {:.4}".format(self.classes[class_ids[i]], confidences[i]);
cv2.putText(img, text, (x, y-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2);
return img;
You can use it like this
import cv2
import numpy as np
# this is the "yolo.py" file, I assume it's in the same folder as this program
from yolo import Yolo
# these are the filepaths of the yolo files
weights = "yolov3-tiny.weights";
config = "yolov3-tiny.cfg";
labels = "yolov3.txt";
# init yolo network
target_class_id = 79; # toothbrush
conf_thresh = 0.4; # less == more boxes (but more false positives)
nms_thresh = 0.4; # less == more boxes (but more overlap)
net = Yolo(config, weights, labels, conf_thresh, nms_thresh, use_cuda = True);
# open video capture
cap = cv2.VideoCapture(0); # probably laptop webcam
# loop
done = False;
while not done:
# get frame
ret, frame = cap.read();
if not ret:
done = cv2.waitKey(1) == ord('q');
continue;
# draw detection
# frame, _ = net.detect(frame, target_id=target_class_id); # use this to filter by a single class_id
frame, _ = net.detect(frame); # use this to not filter by class_id
# show
cv2.imshow("Marked", frame);
done = cv2.waitKey(1) == ord('q');
Edit: Oh, ok, lol. I found my install notes and the very first instruction is this:
Follow PyImageSearch's Blog to install opencv with cuda support
The url is:
https://www.pyimagesearch.com/2020/02/03/how-to-use-opencvs-dnn-module-with-nvidia-gpus-cuda-and-cudnn/
Not sure how helpful this will be, but I apparently thought the tutorial was clear enough that I didn't need any additional notes or corrections for this step.

Related

Mediapipe Display Body Landmarks Only

I have installed Mediapipe (0.9.0.1) using Python (3.7.0) on windows 11.
I have been able to successfully get Mediapipe to generate landmarks (for face and body); for an image, video, and webcam stream.
I would like to now get Mediapipe to only draw body specific landmarks (i.e. exclude facial landmarks).
I understand that I may use OpenCV (or Czone) to accomplish this goal, however, I am looking to achieve my objective using Mediapipe (i.e. using the draw_landmarks function in the MediaPipe library).
The specific bit of code I am trying (but with errors) is the following:
#Initialize a list to store the detected landmarks.
landmarks = []
# Iterate over the Mediapipe detected landmarks.
for landmark in results.pose_landmarks.landmark:
# Append the Mediapipe landmark into the list.
landmarks.append((int(landmark.x * width), int(landmark.y * height),
(landmark.z * width)))
#create index list for specific landmarks
body_landmark_indices = [11,12,13,14,15,16,23,24,25,26,27,28,29,30,31,32]
landmark_list_body = []
#Create a list which only has the required landmarks
for index in body_landmark_indices:
landmark_list_body.append(landmarks[index - 1])
mp_drawing.draw_landmarks(
image=output_image,
landmark_list=landmark_list_body.pose_landmarks,
connections=mp_pose.POSE_CONNECTIONS,
landmark_drawing_spec=landmark_drawing_spec,
connection_drawing_spec=connection_drawing_spec)`
Executing the above I get the error `'list' object has no attribute 'pose_landmarks'
I have replaced landmark_list=landmark_list_body.pose_landmarks, with landmark_list=landmark_list_body but with errors.
I am now very tiered and out of ideas. Is there a capeless hero out there?
Thanks.
You can try the following approach:
import cv2
import mediapipe as mp
import numpy as np
from mediapipe.python.solutions.pose import PoseLandmark
from mediapipe.python.solutions.drawing_utils import DrawingSpec
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose
custom_style = mp_drawing_styles.get_default_pose_landmarks_style()
custom_connections = list(mp_pose.POSE_CONNECTIONS)
# list of landmarks to exclude from the drawing
excluded_landmarks = [
PoseLandmark.LEFT_EYE,
PoseLandmark.RIGHT_EYE,
PoseLandmark.LEFT_EYE_INNER,
PoseLandmark.RIGHT_EYE_INNER,
PoseLandmark.LEFT_EAR,
PoseLandmark.RIGHT_EAR,
PoseLandmark.LEFT_EYE_OUTER,
PoseLandmark.RIGHT_EYE_OUTER,
PoseLandmark.NOSE,
PoseLandmark.MOUTH_LEFT,
PoseLandmark.MOUTH_RIGHT ]
for landmark in excluded_landmarks:
# we change the way the excluded landmarks are drawn
custom_style[landmark] = DrawingSpec(color=(255,255,0), thickness=None)
# we remove all connections which contain these landmarks
custom_connections = [connection_tuple for connection_tuple in custom_connections
if landmark.value not in connection_tuple]
IMAGE_FILES = ["test.jpg"]
BG_COLOR = (192, 192, 192)
with mp_pose.Pose(
static_image_mode=True,
model_complexity=2,
enable_segmentation=True,
min_detection_confidence=0.5) as pose:
for idx, file in enumerate(IMAGE_FILES):
image = cv2.imread(file)
image_height, image_width, _ = image.shape
results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
annotated_image = image.copy()
mp_drawing.draw_landmarks(
annotated_image,
results.pose_landmarks,
connections = custom_connections, # passing the modified connections list
landmark_drawing_spec=custom_style) # and drawing style
cv2.imshow('landmarks', annotated_image)
cv2.waitKey(0)
It modifies the DrawingSpec and POSE_CONNECTIONS to "hide" a subset of landmarks.
However, due to the way the draw_landmarks() function is implemented in Mediapipe, it is also required to add a condition in drawing_utils.py (located in site-packages/mediapipe/python/solutions):
if drawing_spec.thickness == None: continue
Add it before the Line 190 (# White circle border). The result should look like this:
...
drawing_spec = landmark_drawing_spec[idx] if isinstance(
landmark_drawing_spec, Mapping) else landmark_drawing_spec
if drawing_spec.thickness == None: continue
# White circle border
circle_border_radius = max(drawing_spec.circle_radius + 1,
int(drawing_spec.circle_radius * 1.2))
...
This change is required in order to completely eliminate the white border that is drawn around landmarks regardless of their drawing specification.
Hope it helps.

Object detection model for detecting rectangular shape (text cursor) in a video?

I'm currently doing some research to detect and locate a text-cursor (you know, the blinking rectangle shape that indicates the character position when you type on your computer) from a screen-record video. To do that, I've trained YOLOv4 model with custom object dataset (I took a reference from here) and planning to also implement DeepSORT to track the moving cursor.
Here's the example of training data I used to train YOLOv4:
Here's what I want to achieve:
Do you think using YOLOv4 + DeepSORT is considered overkill for this task? I'm asking because as of now, only 70%-80% of the video frame that contains the text-cursor can be successfully detected by the model. If it is overkill after all, do you know any other method that can be implemented for this task?
Anyway, I'm planning to detect the text-cursor not only from Visual Studio Code window, but also from Browser (e.g., Google Chrome) and Text Processor (e.g., Microsoft Word) as well. Something like this:
I'm considering the Sliding Window method as an alternative, but from what I've read, the method might consume much resources and perform slower. I'm also considering Template Matching from OpenCV (like this), but I don't think it will perform better and faster than the YOLOv4.
The constraint is about the performance speed (i.e, how many frames can be processed given amount of time) and the detection accuracy (i.e, I want to avoid letter 'l' or '1' detected as the text-cursor, since those characters are similar in some font). But higher accuracy with slower FPS is acceptable I think.
I'm currently using Python, Tensorflow, and OpenCV for this.
Thank you very much!
This would work if the cursor is the only moving object on the screen. Here is the before and after:
Before:
After:
The code:
import cv2
import numpy as np
BOX_WIDTH = 10
BOX_HEIGHT = 20
def process_img(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = np.ones((5, 5))
img_canny = cv2.Canny(img_gray, 50, 50)
return img_canny
def get_contour(img):
contours, hierarchies = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
if contours:
return max(contours, key=cv2.contourArea)
def get_line_tip(cnt1, cnt2):
x1, y1, w1, h1 = cv2.boundingRect(cnt1)
if h1 > BOX_HEIGHT / 2:
if np.any(cnt2):
x2, y2, w2, h2 = cv2.boundingRect(cnt2)
if x1 < x2:
return x1, y1
return x1 + w1, y1
def get_rect(x, y):
half_width = BOX_WIDTH // 2
lift_height = BOX_HEIGHT // 6
return (x - half_width, y - lift_height), (x + half_width, y + BOX_HEIGHT - lift_height)
cap = cv2.VideoCapture("screen_record.mkv")
success, img_past = cap.read()
cnt_past = np.array([])
line_tip_past = 0, 0
while True:
success, img_live = cap.read()
if not success:
break
img_live_processed = process_img(img_live)
img_past_processed = process_img(img_past)
img_diff = cv2.bitwise_xor(img_live_processed, img_past_processed)
cnt = get_contour(img_diff)
line_tip = get_line_tip(cnt, cnt_past)
if line_tip:
cnt_past = cnt
line_tip_past = line_tip
else:
line_tip = line_tip_past
rect = get_rect(*line_tip)
img_past = img_live.copy()
cv2.rectangle(img_live, *rect, (0, 0, 255), 2)
cv2.imshow("Cursor", img_live)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
Breaking it down:
Import the necessary libraries:
import cv2
import numpy as np
Define the size of the tracking box depending on the size of the cursor:
BOX_WIDTH = 10
BOX_HEIGHT = 20
Define a function to process the frames into edges:
def process_img(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = np.ones((5, 5))
img_canny = cv2.Canny(img_gray, 50, 50)
return img_canny
Define a function that would retrieve the contour with the greatest area in an image (the cursor doesn't need to be large for this to work, it can be tiny if needed):
def get_contour(img):
contours, hierarchies = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
if contours:
return max(contours, key=cv2.contourArea)
Define a function that will take in 2 contours, one being the contour of the cursor + some text for the current frame, the other being the contour + some stray text for the contour of the cursor + some text from the frame before. With the two contours, we can identify if the cursor is moving left or right:
def get_line_tip(cnt1, cnt2):
x1, y1, w1, h1 = cv2.boundingRect(cnt1)
if h1 > BOX_HEIGHT / 2:
if np.any(cnt2):
x2, y2, w2, h2 = cv2.boundingRect(cnt2)
if x1 < x2:
return x1, y1
return x1 + w1, y1
Define a function that will take in the tip points of the cursor, and return a box based on the BOX_WIDTH and BOX_HEIGHT constants defined earlier:
def get_rect(x, y):
half_width = BOX_WIDTH // 2
lift_height = BOX_HEIGHT // 6
return (x - half_width, y - lift_height), (x + half_width, y + BOX_HEIGHT - lift_height)
Define a capture devices for the video, and remove one frame from the start of the video and store it in a variable that will be used as the frame before every frame. Also define temporary values for the past contour and past line tip:
cap = cv2.VideoCapture("screen_record.mkv")
success, img_past = cap.read()
cnt_past = np.array([])
line_tip_past = 0, 0
Use a while loop, and read from the video. Process the frame and the frame before that frame in the video:
while True:
success, img_live = cap.read()
if not success:
break
img_live_processed = process_img(img_live)
img_past_processed = process_img(img_past)
With the processed frames, we can find the difference between the frame using the cv2.bitwise_xor method to get where the movement is on the screen. Then, we can find the contour of the movement between the 2 frames using the get_contour function defined:
img_diff = cv2.bitwise_xor(img_live_processed, img_past_processed)
cnt = get_contour(img_diff)
With the contour, we can utilize the get_line_tip function defined to find the tip of the cursor. If a tip was found, save it into the line_tip_past variable to use for the next iteration, and if a tip was not found, we can us the past tip we saved as the current tip:
line_tip = get_line_tip(cnt, cnt_past)
if line_tip:
cnt_past = cnt
line_tip_past = line_tip
else:
line_tip = line_tip_past
Now we define a rectangle using the cursor tip and the get_rect function we defined earlier, and draw it onto the current frame. But before drawing it on, we save the frame to be the frame before the current frame of the next iteration:
rect = get_rect(*line_tip)
img_past = img_live.copy()
cv2.rectangle(img_live, *rect, (0, 0, 255), 2)
Finally, we display the frame:
cv2.imshow("Cursor", img_live)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()

Set an ROI in OpenCV

I am very new to OpenCV and Python. I have followed a tutorial to use YOLO using yolov3-tiny. It can detect vehicles fine. But what I need to complete my project is to count the number of vehicles that passes a particular lane. If I use the method where the vehicle is detected (the bounding box appears) to count, the count becomes very inaccurate since, the bounding box keeps blinking (meaning, it keeps on locating the same vehicle again, sometimes up to 5 times), so this is not a good way to count.
So I figured, how about if I just count a vehicle if it gets to a certain point. I have seen a lot of codes that seems to make this but, since I am a beginner, it really is hard for me to understand let alone, run it in my system. Their samples need to install so many things that I can't do because it throws errors.
See my sample code below:
cap = cv2.VideoCapture('rtsp://username:password#xxx.xxx.xxx.xxx:xxx/cam/realmonitor?channel=1')
whT = 320
confThreshold = 0.75
nmsThreshold = 0.3
list_of_vehicles = ["bicycle","car","motorbike","bus","truck"]
classesFile = 'coco.names'
classNames = []
with open(classesFile, 'r') as f:
classNames = f.read().rstrip('\n').split('\n')
modelConfiguration = 'yolov3-tiny.cfg'
modelWeights = 'yolov3-tiny.weights'
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
total_vehicle_count = 0
def getVehicleCount(boxes, class_name):
global total_vehicle_count
dict_vehicle_count = {}
if(class_name in list_of_vehicles):
total_vehicle_count += 1
# print(total_vehicle_count)
return total_vehicle_count, dict_vehicle_count
def findObjects(ouputs, img):
hT, wT, cT = img.shape
bbox = []
classIds = []
confs = []
for output in outputs:
for det in output:
scores = det[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
w, h = int(det[2] * wT), int(det[3] * hT)
x, y = int((det[0] * wT) - w/2), int((det[1] * hT) - h/2)
bbox.append([x, y, w, h])
classIds.append(classId)
confs.append(float(confidence))
indices = cv2.dnn.NMSBoxes(bbox, confs, confThreshold, nmsThreshold)
for i in indices:
i = i[0]
box = bbox[i]
getVehicleCount(bbox, classNames[classIds[i]])
x, y, w, h = box[0], box[1], box[2], box[3]
cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,255), 1)
cv2.putText(img, f'{classNames[classIds[i]].upper()} {int(confs[i]*100)}%', (x,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,0,255), 2)
while True:
success, img = cap.read()
blob = cv2.dnn.blobFromImage(img, 1/255, (whT,whT), [0,0,0], 1, crop=False)
net.setInput(blob)
layerNames = net.getLayerNames()
outputnames = [layerNames[i[0]-1] for i in net.getUnconnectedOutLayers()]
# print(outputnames)
outputs = net.forward(outputnames)
findObjects(outputs, img)
cv2.imshow('Image', img)
if cv2.waitKey(1) & 0XFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
With this code, 1 vehicle is counted sometimes up to 50 count, depending on the size, which is highly inaccurate. How can I create an ROI so that when the detected vehicle passes that point, that will be the only time it will count.
First, I would recommend that you consider using a visual tracker to track each detected rectangle. This is important even if you have an ROI to crop the image close to your counting zone/line. That is because even if the ROI is localized, the detection might still blink a couple of times causing a miscount. That is especially valid if another vehicle can enter the ROI while the first one is still passing it.
I recommend using the easy-to-use tracker provided by the widely used dlib library. Please refer to this example on how to use it.
Instead of counting detections within an ROI, you need to define an ROI line (within your ROI). Then, track detections rectangles centers in each frame. Finally, increase your counter once a rectangle center passes the ROI line.
Regarding how to count a rectangle passing the ROI line:
Select two points to define your ROI line.
Use your points to find the parameters for the general line formula ax + by + c = 0
For each frame, plug the rectangle center coordinates in the formula and keep track of the sign of the result.
If the sign of the result changes that means that the rectangle center has passed the line.

Incorrect facenet recognition

I've been working on a face recognition attendance management system. I've built the pipeline from scratch but in the end,the script recognizes the wrong face among a group of 10 classes.
I've implemented the following pipeline using Tensorflow and Python.
Capture images, resize, align them using dlib's shape predictor and store them in named folders for later comparison while performing recognition.
Pickle the images into a data.pickle file for later deserialization.
Using OpenCV to implement MTCNN algorithm to detect faces in a frame captured by webcam
passing these frames into a facenet network to create 128-D embeddings and compared accordingly with the embeddings present in pickle database.
Given Below is the main file which runs step 3 and 4:
from keras import backend as K
import time
from multiprocessing.dummy import Pool
K.set_image_data_format('channels_first')
import cv2
import os
import glob
import numpy as np
from numpy import genfromtxt
import tensorflow as tf
from keras.models import load_model
from fr_utils import *
from inception_blocks_v2 import *
from mtcnn.mtcnn import MTCNN
import dlib
from imutils import face_utils
import imutils
import pickle
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
FRmodel = load_model('face-rec_Google.h5')
# detector = dlib.get_frontal_face_detector()
detector = MTCNN()
# FRmodel = faceRecoModel(input_shape=(3, 96, 96))
#
# # detector = dlib.get_frontal_face_detector()
# # predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# def triplet_loss(y_true, y_pred, alpha = 0.3):
# """
# Implementation of the triplet loss as defined by formula (3)
#
# Arguments:
# y_pred -- python list containing three objects:
# anchor -- the encodings for the anchor images, of shape (None, 128)
# positive -- the encodings for the positive images, of shape (None, 128)
# negative -- the encodings for the negative images, of shape (None, 128)
#
# Returns:
# loss -- real number, value of the loss
# """
#
# anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]
#
# pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis=-1)
# neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis=-1)
# basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
# loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0))
#
# return loss
#
# FRmodel.compile(optimizer = 'adam', loss = triplet_loss, metrics = ['accuracy'])
# load_weights_from_FaceNet(FRmodel)
def ret_model():
return FRmodel
def prepare_database():
pickle_in = open("data.pickle","rb")
database = pickle.load(pickle_in)
return database
def unpickle_something(pickle_file):
pickle_in = open(pickle_file,"rb")
unpickled_file = pickle.load(pickle_in)
return unpickled_file
def webcam_face_recognizer(database):
cv2.namedWindow("preview")
vc = cv2.VideoCapture(0)
while vc.isOpened():
ret, frame = vc.read()
img_rgb = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
img = frame
# We do not want to detect a new identity while the program is in the process of identifying another person
img = process_frame(img,img)
cv2.imshow("Preview", img)
cv2.waitKey(1)
vc.release()
def process_frame(img, frame):
"""
Determine whether the current frame contains the faces of people from our database
"""
# rects = detector(img)
rects = detector.detect_faces(img)
# Loop through all the faces detected and determine whether or not they are in the database
identities = []
for (i,rect) in enumerate(rects):
(x,y,w,h) = rect['box'][0],rect['box'][1],rect['box'][2],rect['box'][3]
img = cv2.rectangle(frame,(x, y),(x+w, y+h),(255,0,0),2)
identity = find_identity(frame, x-50, y-50, x+w+50, y+h+50)
cv2.putText(img, identity,(10,500), cv2.FONT_HERSHEY_SIMPLEX , 4,(255,255,255),2,cv2.LINE_AA)
if identity is not None:
identities.append(identity)
if identities != []:
cv2.imwrite('example.png',img)
return img
def find_identity(frame, x,y,w,h):
"""
Determine whether the face contained within the bounding box exists in our database
x1,y1_____________
| |
| |
|_________________x2,y2
"""
height, width, channels = frame.shape
# The padding is necessary since the OpenCV face detector creates the bounding box around the face and not the head
part_image = frame[y:y+h, x:x+w]
return who_is_it(part_image, database, FRmodel)
def who_is_it(image, database, model):
encoding = img_to_encoding(image, model)
min_dist = 100
# Loop over the database dictionary's names and encodings.
for (name, db_enc) in database.items():
# Compute L2 distance between the target "encoding" and the current "emb" from the database.
dist = np.linalg.norm(db_enc.flatten() - encoding.flatten())
print('distance for %s is %s' %(name, dist))
# If this distance is less than the min_dist, then set min_dist to dist, and identity to name
if dist < min_dist:
min_dist = dist
identity = name
if min_dist >0.1:
print('Unknown person')
else:
print(identity)
return identity
if __name__ == "__main__":
database = prepare_database()
webcam_face_recognizer(database)
What am I doing wrong here?
Here the FRmodel is the facenet trained model
Few points:
I don't see resizing, aligning and whitening of the input face image that is fed into the network.
You cannot add a fixed margin of 50 to a variable-sized face. There has to be a scaling such that the face region fills almost the same region in every input image.
I am not sure about the model you are using, but if you are using FaceNet, your accepted matching threshold, 0.1, seems to be very low. It will not accept any matches unless it is the same exact image (with a distance of 0.0), or has a very minimal variation from the gallery image.

Face Recognization in Python & Open CV

I am able to find the faces and save them in my local directory using python and open cv as per code below from video
import cv2
import numpy as np
import os
vc = cv2.VideoCapture('new1.avi')
c=1
if vc.isOpened():
rval , frame = vc.read()
else:
rval = False
while rval:
rval, frame = vc.read()
cv2.imwrite(str(c) + '.jpg',frame)
image_name=str(c)+'.jpg'
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
image=cv2.imread(image_name)
gray=cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
print "Found {0} faces!".format(len(faces))
if len(faces)>=1:
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("Faces found" ,image)
cv2.waitKey(0)
else:
a="rm "+image_name
os.popen(a)
c = c + 1
cv2.waitKey(1)
vc.release()
But now i want to get identification of that person which has face in that video....
How can i define the person's identification?
Like to scan the face and match it into my local face database and if match found give the name and etc etc
To differentiate between people in photos is not a trivial task, but there are some examples out there. As mentioned by Derman in an earlier comment the best way is to use machine learning to teach the program what different persons faces looks like. One way is to manually find and extract features in peoples faces, such as the distance between eyes ratio to distance between eyes and mouth and so on. This would though need attention to the effects of lens distortion and perspective. There is multiple research papers discussing the best techniques, like this paper using eigen vectors from a set of faces to find most probable match
Face Recognition Using Eigen Faces
There is a machine learning toolbox for Python which is called scikit-learn which implements support for classification, regression, clustering and so on. You can use it to train neural networks and support vector machines among others. Here is a complete example of how to implement the Eigenface method using SVM with scikit-learn and python:
Complete implementation using Python
You can use Either EigenFaceRecognizer or FisherFaceRecognizer or LBHP
All these three algorithms are inbuilt in python
# Create a recognizer object
recognizer = cv2.face.createEigenFaceRecognizer()
# But Remember for EigenFaces all the images whether training or testing has to be of same shape
#==========================================================================
# get_images_and_labels function will give us list of images and list of labels to train our recognizer that we created in the first line
# function requires the path of the directory where all the images is stored
#===========================================================================
def get_images_and_labels(path):
# Append all the absolute image paths in a list image_paths
image_paths = [os.path.join(path, f) for f in os.listdir(path) if not
f.endswith('.sad')]
# images will contains face images
images = []
# labels will contains the label that is assigned to the image
labels = []
final_images = []
largest_image_size = 0
largest_width = 0
largest_height = 0
for image_path in image_paths:
# Read the image and convert to grayscale
image_pil = Image.open(image_path).convert('L')
# Convert the image format into numpy array
image = np.array(image_pil, 'uint8')
# Get the label of the image
nbr = int(os.path.split(image_path)[1].split(".")[0].replace("subject", ""))
# Detect the face in the image
faces = faceCascade.detectMultiScale(image)
# If face is detected, append the face to images and the label to labels
for (x, y, w, h) in faces:
images.append(image[y: y + h, x: x + w])
labels.append(nbr)
cv2.imshow("Adding faces to traning set...", image[y: y + h, x: x + w])
cv2.waitKey(50)
# return the images list and labels list
for image in images:
if image.size > largest_image_size:
largest_image_size = image.size
largest_width, largest_height = image.shape
for image in images:
image = cv2.resize(image, (largest_width, largest_height), interpolation=cv2.INTER_CUBIC)
final_images.append(image)
return final_images, labels, largest_width, largest_height
#===================================================================
# Perform the tranining
# trainer takes two parameters as input
# first parameter is the list of images
# second parameter is a numpy array of their corresponding labels
#===================================================================
recognizer.train(images, np.array(labels)) # training takes as input the list
image_paths = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.sad')]
for image_path in image_paths:
predict_image_pil = Image.open(image_path).convert('L')
predict_image = np.array(predict_image_pil, 'uint8')
faces = faceCascade.detectMultiScale(predict_image)
for (x, y, w, h) in faces:
result = cv2.face.MinDistancePredictCollector()
predict_image = predict_image[y: y + h, x: x + w]
predict_image = cv2.resize(predict_image, (max_width, max_heigth), interpolation=cv2.INTER_CUBIC)
# =========================================================
# predict method will give us the prediction
# we will get the label in the next statement
# predicted_image is the image that you want to recognize
# =========================================================
recognizer.predict(predict_image, result, 0) # this statement will give the prediction
# ==========================================
# This statement below will give us label
# ==========================================
nbr_predicted = result.getLabel()
# ==========================================
# conf will tell us how much confident our recognizer is in it's prediction
# ==========================================
conf = result.getDist()
nbr_actual = int(os.path.split(image_path)[1].split(".")[0].replace("subject", ""))
if nbr_actual == nbr_predicted:
print("{} is Correctly Recognized with confidence {}".format(nbr_actual, conf))
else:
print("{} is Incorrect Recognized as {}".format(nbr_actual, nbr_predicted))
sys.exit()

Categories

Resources