Cropping face using dlib facial landmarks - python

I am trying to crop a face using the facial landmarks identified by dlib. The right eyebrow is causing problems - the crop goes flat across rather than follow the eyebrow arc.
What am I doing wrong here?
from imutils import face_utils
import imutils
import numpy as np
import collections
import dlib
import cv2
def face_remap(shape):
remapped_image = shape.copy()
# left eye brow
remapped_image[17] = shape[26]
remapped_image[18] = shape[25]
remapped_image[19] = shape[24]
remapped_image[20] = shape[23]
remapped_image[21] = shape[22]
# right eye brow
remapped_image[22] = shape[21]
remapped_image[23] = shape[20]
remapped_image[24] = shape[19]
remapped_image[25] = shape[18]
remapped_image[26] = shape[17]
# neatening
remapped_image[27] = shape[0]
return remapped_image
"""
MAIN CODE STARTS HERE
"""
# load the input image, resize it, and convert it to grayscale
image = cv2.imread("images/faceCM1.jpg")
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
out_face = np.zeros_like(image)
# initialize dlib's face detector (HOG-based) and then create the facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(SHAPE_PREDICTOR)
# detect faces in the grayscale image
rects = detector(gray, 1)
# loop over the face detections
for (i, rect) in enumerate(rects):
"""
Determine the facial landmarks for the face region, then convert the facial landmark (x, y)-coordinates to a NumPy array
"""
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
#initialize mask array
remapped_shape = np.zeros_like(shape)
feature_mask = np.zeros((image.shape[0], image.shape[1]))
# we extract the face
remapped_shape = face_remap(shape)
cv2.fillConvexPoly(feature_mask, remapped_shape[0:27], 1)
feature_mask = feature_mask.astype(np.bool)
out_face[feature_mask] = image[feature_mask]
cv2.imshow("mask_inv", out_face)
cv2.imwrite("out_face.png", out_face)
sample image of cropped face showing the issue

Using the convex hull formed by the 68 landmarks didn't exactly achieve the desired output, so I had the following approach to this problem using scikit-image instead of OpenCV
1. Load image and predict 68 landmarks
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
img = dlib.load_rgb_image('mean.jpg')
rect = detector(img)[0]
sp = predictor(img, rect)
landmarks = np.array([[p.x, p.y] for p in sp.parts()])
2. Select the landmarks that represents the shape of the face
(I had to reverse the order of the eyebrows landmarks because the 68 landmarks aren't ordered to describe the face outline)
outline = landmarks[[*range(17), *range(26,16,-1)]]
3. Draw a polygon using these landmarks using scikit-image
Y, X = skimage.draw.polygon(outline[:,1], outline[:,0])
4. Create a canvas with zeros and use the polygon as mask to original image
cropped_img = np.zeros(img.shape, dtype=np.uint8)
cropped_img[Y, X] = img[Y, X]
For the sake of completeness, I provide below a solution using scipy.spatial.ConvexHull, if this option is still preferred
vertices = ConvexHull(landmarks).vertices
Y, X = skimage.draw.polygon(landmarks[vertices, 1], landmarks[vertices, 0])
cropped_img = np.zeros(img.shape, dtype=np.uint8)
cropped_img[Y, X] = img[Y, X]

Its because the face shape you are providing is not convex.
fillConvexPoly works perfectly on convex shapes only, In this case there is a concave corner (at point #27) and hence the results are messed up.
To fix this, modify the function as
def face_remap(shape):
remapped_image = cv2.convexHull(shape)
return remapped_image
This would give you a result which looks like.
Now you may write some more code to remove the triangular section on forehead (if you want it that way)

Related

Mediapipe Display Body Landmarks Only

I have installed Mediapipe (0.9.0.1) using Python (3.7.0) on windows 11.
I have been able to successfully get Mediapipe to generate landmarks (for face and body); for an image, video, and webcam stream.
I would like to now get Mediapipe to only draw body specific landmarks (i.e. exclude facial landmarks).
I understand that I may use OpenCV (or Czone) to accomplish this goal, however, I am looking to achieve my objective using Mediapipe (i.e. using the draw_landmarks function in the MediaPipe library).
The specific bit of code I am trying (but with errors) is the following:
#Initialize a list to store the detected landmarks.
landmarks = []
# Iterate over the Mediapipe detected landmarks.
for landmark in results.pose_landmarks.landmark:
# Append the Mediapipe landmark into the list.
landmarks.append((int(landmark.x * width), int(landmark.y * height),
(landmark.z * width)))
#create index list for specific landmarks
body_landmark_indices = [11,12,13,14,15,16,23,24,25,26,27,28,29,30,31,32]
landmark_list_body = []
#Create a list which only has the required landmarks
for index in body_landmark_indices:
landmark_list_body.append(landmarks[index - 1])
mp_drawing.draw_landmarks(
image=output_image,
landmark_list=landmark_list_body.pose_landmarks,
connections=mp_pose.POSE_CONNECTIONS,
landmark_drawing_spec=landmark_drawing_spec,
connection_drawing_spec=connection_drawing_spec)`
Executing the above I get the error `'list' object has no attribute 'pose_landmarks'
I have replaced landmark_list=landmark_list_body.pose_landmarks, with landmark_list=landmark_list_body but with errors.
I am now very tiered and out of ideas. Is there a capeless hero out there?
Thanks.
You can try the following approach:
import cv2
import mediapipe as mp
import numpy as np
from mediapipe.python.solutions.pose import PoseLandmark
from mediapipe.python.solutions.drawing_utils import DrawingSpec
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose
custom_style = mp_drawing_styles.get_default_pose_landmarks_style()
custom_connections = list(mp_pose.POSE_CONNECTIONS)
# list of landmarks to exclude from the drawing
excluded_landmarks = [
PoseLandmark.LEFT_EYE,
PoseLandmark.RIGHT_EYE,
PoseLandmark.LEFT_EYE_INNER,
PoseLandmark.RIGHT_EYE_INNER,
PoseLandmark.LEFT_EAR,
PoseLandmark.RIGHT_EAR,
PoseLandmark.LEFT_EYE_OUTER,
PoseLandmark.RIGHT_EYE_OUTER,
PoseLandmark.NOSE,
PoseLandmark.MOUTH_LEFT,
PoseLandmark.MOUTH_RIGHT ]
for landmark in excluded_landmarks:
# we change the way the excluded landmarks are drawn
custom_style[landmark] = DrawingSpec(color=(255,255,0), thickness=None)
# we remove all connections which contain these landmarks
custom_connections = [connection_tuple for connection_tuple in custom_connections
if landmark.value not in connection_tuple]
IMAGE_FILES = ["test.jpg"]
BG_COLOR = (192, 192, 192)
with mp_pose.Pose(
static_image_mode=True,
model_complexity=2,
enable_segmentation=True,
min_detection_confidence=0.5) as pose:
for idx, file in enumerate(IMAGE_FILES):
image = cv2.imread(file)
image_height, image_width, _ = image.shape
results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
annotated_image = image.copy()
mp_drawing.draw_landmarks(
annotated_image,
results.pose_landmarks,
connections = custom_connections, # passing the modified connections list
landmark_drawing_spec=custom_style) # and drawing style
cv2.imshow('landmarks', annotated_image)
cv2.waitKey(0)
It modifies the DrawingSpec and POSE_CONNECTIONS to "hide" a subset of landmarks.
However, due to the way the draw_landmarks() function is implemented in Mediapipe, it is also required to add a condition in drawing_utils.py (located in site-packages/mediapipe/python/solutions):
if drawing_spec.thickness == None: continue
Add it before the Line 190 (# White circle border). The result should look like this:
...
drawing_spec = landmark_drawing_spec[idx] if isinstance(
landmark_drawing_spec, Mapping) else landmark_drawing_spec
if drawing_spec.thickness == None: continue
# White circle border
circle_border_radius = max(drawing_spec.circle_radius + 1,
int(drawing_spec.circle_radius * 1.2))
...
This change is required in order to completely eliminate the white border that is drawn around landmarks regardless of their drawing specification.
Hope it helps.

Yolov8: finding the corners of a polygon segmentation

I’m trying to find the corners of a polygon segmentation that was made with Yolov8, as in this image:
chessboard segmentation
This is my code:
model_trained = YOLO("runs/segment/yolov8n-seg_chessboard/weights/best.pt")
results = model_trained.predict(source="1.jpgresized.jpg", line_thickness=2, save_txt=True, save=True)
masks = results[0].masks # Masks object
masks.segments[0] # a numpy array of
I'm not able to figure out how to get the four corners of the segmentation out of this array.
Thanks!
Notice, that this could involve quite a lot of fine-tuning for you particular case. The idea here is to pass the segmentation mask to goodFeaturesToTrack which finds strong corners in it. Then you pick the 4 best candidates. Finally they are plotted on the original image.
from ultralytics.yolo.engine.model import YOLO
import cv2
def on_predict_batch_end(predictor):
# results -> List[batch_size]
path, im, im0s, vid_cap, s = predictor.batch
predictor.results = zip(predictor.results, im0s)
model = YOLO("yolov8n-seg.pt")
model.add_callback("on_predict_batch_end", on_predict_batch_end)
results = model.predict(source="0", show=True, stream=True, classes=67)
for i, (result, im0) in enumerate(results):
Masks = result.masks
if Masks is not None:
for mask in Masks.masks:
x = mask.cpu().numpy()
corners = cv2.goodFeaturesToTrack(x, 4, 0.5 , 50) # get 4 corners
for corner in corners: # plot the corners on the original image
x,y = corner.ravel()
cv2.circle(im0,(int(x), int(y)),5,(0, 0, 255),-1)

How to remove multiple polygons using Opencv python

Hi StackOverflow team,
I have an image and I want to remove many portions/parts from the image. I tried to use the below code taken from Cropping Concave polygon from Image using Opencv python
Assume I have this image . Also, I have multiple polygons (such as rectangular shapes or any form of a polygon) from the image achieved via lebelme annotation tool. So, I want to remove those shapes from the images or simply changing their pixels to white.
In other words, Labelme Tool will give you a dictionary file, where the dictionary has a key consisting of the points of each portion/polygon/shape)
Then the polygon points can be easily extracted from the dictionary file. After points are extracted, we can define our points by giving names (e.g a,b,s...h), and each one is in this multidimensional format "[[1526, 319], [1526, 376], [1593, 379], [1591, 324]]"
Here I thought of whitening each region. but whitening of multidimensional array seems to be unreliable.
import numpy as np
import cv2
import json
with open('ann1.json') as f:
data = json.load(f)
#%%
a = data['shapes'][0]['points']; b = data['shapes'][1]['points']; c = data['shapes'][2]['points'];
#%%
img = cv2.imread("lena.jpg")
pts = np.array(a) # Points
#%%
## (1) Crop the bounding rect
rect = cv2.boundingRect(pts)
x,y,w,h = rect
croped = img[y:y+h, x:x+w].copy()
## (2) make mask
pts = pts - pts.min(axis=0)
mask = np.zeros(croped.shape[:2], np.uint8)
cv2.drawContours(mask, [pts], -1, (255, 255, 255), -1, cv2.LINE_AA)
## (3) do bit-op
dst = cv2.bitwise_and(croped, croped, mask=mask)
## (4) add the white background
bg = np.ones_like(croped, np.uint8)*255
cv2.bitwise_not(bg,bg, mask=mask)
dst2 = bg+ dst
#cv2.imwrite("croped.png", croped)
#cv2.imwrite("mask.png", mask)
#cv2.imwrite("dst.png", dst)
cv2.imwrite("dst2.png", dst2)
Using Lena I have this output .
But I need to go further and whiten other points/polygons, for example, the eyes.
As you can see my code can use only one polygon points. I tried appending two other polygon points in my case the two eyes and got .
By appending, I mean I added the multidimensional points (e.g. pts = np.array(a+b+c)).
In short, having an image is there a short way to remove these multiple polygons from the image (by keeping the dimensions of the image) using OpenCV and python.
Json File:
https://drive.google.com/file/d/1UyOYUVMHpu2vBBEdR99bwrRX5xIfdOCa/view?usp=sharing
You'll need to use to loop to go through all the points in the JSON file. I've edited your code to reflect this.
import cv2
import json
import matplotlib.pyplot as plt
import numpy as np
img_path =r"/path/to/lena.png"
json_path = r"/path/to/lena.json"
with open(json_path) as f:
data = json.load(f)
img = cv2.imread(img_path)
for idx in np.arange(len(data['shapes'])):
if idx == 0: #can remove this
continue #can remove this
a = data['shapes'][idx]['points']
pts = np.array(a) # Points
## (1) Crop the bounding rect
rect = cv2.boundingRect(pts)
print(rect)
x,y,w,h = rect
img[y:y+h, x:x+w] = (255, 255, 255)
plt.imshow(img)
plt.show()
Output:
I ignored the first line, since it didn't visualize the results nicely. I took your lead and used rectangles instead of polygons. If you need polygons, you'll need to use something like cv2.drawContours() or cv2.polylines() or cv2.fillPoly() as is recommnded in the SO answer you have linked here, to achieve it.
I would like to share with you my expected solution which is a bit modified version of #Shawn Mathew answer.
Input image:
Code:
with open('lena.json') as f:
json_file = json.load(f)
img = cv2.imread("folder/lena.jpg")
for polygon in np.arange(len(json_file['shapes'])):
pts = np.array(json_file['shapes'][polygon]['points'])
# If your polygons are rectangular, you can fill with white color to the areas you want be removed by uncommenting the below two lines
# x,y,w,h = cv2.boundingRect(pts)
# cv2.rectangle(img, (x, y), (x+w, y+h), (255, 255, 255), -1)
# if your polygons are different shapes other than rectangles you can just use the below line
cv2.fillPoly(img, pts =[pts], color=(255,255,255))
plt.imshow(img)
plt.show()
The color of the image changed because of Matplotlib, if you want to preserve the color save the image using cv2.imwrite

OpenCV SURF for live streaming from webcam in Python

I am working on surf implementation in opencv using python which will detect the template in the given image. I have modified the code such that it will take video capture from the webcam connected and convert into images and then apply surf on it. Following is the modified code.
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
while(True):
ret ,img = cap.read()
# Convert them to grayscale
imgg =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# SURF extraction
surf = cv2.SURF()
kp, descritors = surf.detect(imgg,None,useProvidedKeypoints = False)
# Setting up samples and responses for kNN
samples = np.array(descritors)
responses = np.arange(len(kp),dtype = np.float32)
# kNN training
knn = cv2.KNearest()
knn.train(samples,responses)
# Now loading a template image and searching for similar keypoints
template = cv2.imread('template.png')
templateg= cv2.cvtColor(template,cv2.COLOR_BGR2GRAY)
keys,desc = surf.detect(templateg,None,useProvidedKeypoints = False)
for h,des in enumerate(desc):
des = np.array(des,np.float32).reshape((1,128))
retval, results, neigh_resp, dists = knn.find_nearest(des,1)
res,dist = int(results[0][0]),dists[0][0]
if dist<0.1: # draw matched keypoints in red color
color = (0,0,255)
else: # draw unmatched in blue color
print dist
color = (255,0,0)
#Draw matched key points on original image
x,y = kp[res].pt
center = (int(x),int(y))
cv2.circle(img,center,2,color,-1)
#Draw matched key points on template image
x,y = keys[h].pt
center = (int(x),int(y))
cv2.circle(template,center,2,color,-1)
cv2.imwrite('img',img)
cv2.imwrite('tm',template)
cv2.waitKey(0)
cap.release()
But the error which is coming is
knn.train(samples,responses)
TyepError: data type = 17 is not supported
Does anybody have any idea on this?
CV probably expects regular arrays but you are passing numpy arrays instead. Try this
knn.train(samples.tolist(),responses.tolist())

Input samples must be floating-point matrix in function CvKNearest::find_nearest

I am trying to execute the code from this URL
However, I started getting this error:
error: ..\..\..\..\opencv\modules\ml\src\knearest.cpp:370: error: (-5) Input samples must be floating-point matrix (<num_samples>x<var_count>) in function CvKNearest::find_nearest
I have not made any major changes though. But will paste what I did:
import scipy as sp
import numpy as np
import cv2
# Load the images
img =cv2.imread("image1.png")
# Convert them to grayscale
imgg =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# SURF extraction
surf = cv2.FeatureDetector_create("SURF")
surfDescriptorExtractor = cv2.DescriptorExtractor_create("SURF")
kp = surf.detect(imgg)
kp, descritors = surfDescriptorExtractor.compute(imgg,kp)
# Setting up samples and responses for kNN
samples = np.array(descritors)
responses = np.arange(len(kp),dtype = np.float32)
# kNN training
knn = cv2.KNearest()
knn.train(samples,responses)
modelImages = ["image2.png"]
for modelImage in modelImages:
# Now loading a template image and searching for similar keypoints
template = cv2.imread(modelImage)
templateg= cv2.cvtColor(template,cv2.COLOR_BGR2GRAY)
keys = surf.detect(templateg)
keys,desc = surfDescriptorExtractor.compute(templateg, keys)
for h,des in enumerate(desc):
#debug
print(des.shape)
des = np.array(des,np.float32).reshape(64L)
retval, results, neigh_resp, dists = knn.find_nearest(des,1)
res,dist = int(results[0][0]),dists[0][0]
if dist<0.1: # draw matched keypoints in red color
color = (0,0,255)
else: # draw unmatched in blue color
#print dist
color = (255,0,0)
#Draw matched key points on original image
x,y = kp[res].pt
center = (int(x),int(y))
cv2.circle(img,center,2,color,-1)
#Draw matched key points on template image
x,y = keys[h].pt
center = (int(x),int(y))
cv2.circle(template,center,2,color,-1)
cv2.imshow('img',img)
cv2.imshow('tm',template)
cv2.waitKey(0)
cv2.destroyAllWindows()
Any help is greatly appreciated.
Thanks in advance!

Categories

Resources