-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathface_landmarks_detection.py
100 lines (88 loc) · 4.03 KB
/
face_landmarks_detection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import cv2
import dlib
import imutils
import numpy as np
from imutils import face_utils
from file_handler import write_file
from landmarks import FACIAL_LANDMARKS_IDXS
# initialize Dlib face detector (HOG-based) and create the facial landmark predictor
def initialize_face_detection(shape_predictor):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor)
return [detector, predictor]
# process input image
def process_input_image(image_path):
# load the input image, resize it, and convert it to grayscale
image = cv2.imread(image_path)
resized_image = imutils.resize(image, width=500)
gray_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
return [resized_image, gray_image]
# determine the facial landmarks, extract and write the regions
def face_detector(detector, predictor, image, gray_image, image_segments_dir):
# detect faces in the grayscale image
rects = detector(gray_image, 1)
# loop over the face detections
for (i, rect) in enumerate(rects):
# determine the facial landmarks for the face region, then
# convert the landmark (x, y)-coordinates to a NumPy array
shape = predictor(gray_image, rect)
shape = face_utils.shape_to_np(shape)
# loop over the face parts individually
for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
# clone the original image so we can draw on it, then
# display the name of the face part on the image
clone = image.copy()
cv2.putText(clone, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0, 0, 255), 2)
# loop over the subset of facial landmarks, drawing the
# specific face part
for (x, y) in shape[i:j]:
cv2.circle(clone, (x, y), 1, (0, 0, 255), -1)
# extract the ROI of the face region as a separate image
(x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
roi = image[y:y + h, x:x + w]
roi = imutils.resize(roi, width=250, inter=cv2.INTER_CUBIC)
# show the particular face part
cv2.imshow("ROI", roi)
write_file(image_segments_dir + name, roi)
cv2.imshow("Image", clone)
# cv2.waitKey(0)
# visualize all facial landmarks with a transparent overlay
output = face_utils.visualize_facial_landmarks(image, shape)
cv2.imshow("Image", output)
# cv2.waitKey(0)
# show landmarks on the input image
def visualize_facial_landmarks(image, shape, colors=None, alpha=0.75):
# create two copies of the input image -- one for the
# overlay and one for the final output image
overlay = image.copy()
output = image.copy()
# if the colors list is None, initialize it with a unique
# color for each facial landmark region
if colors is None:
colors = [(19, 199, 109), (79, 76, 240), (230, 159, 23),
(168, 100, 168), (158, 163, 32),
(163, 38, 32), (180, 42, 220)]
# loop over the facial landmark regions individually
for (i, name) in enumerate(FACIAL_LANDMARKS_IDXS.keys()):
# grab the (x, y)-coordinates associated with the
# face landmark
(j, k) = FACIAL_LANDMARKS_IDXS[name]
pts = shape[j:k]
# check if are supposed to draw the jawline
if name == "jaw":
# since the jawline is a non-enclosed facial region,
# just draw lines between the (x, y)-coordinates
for l in range(1, len(pts)):
ptA = tuple(pts[l - 1])
ptB = tuple(pts[l])
cv2.line(overlay, ptA, ptB, colors[i], 2)
# otherwise, compute the convex hull of the facial
# landmark coordinates points and display it
else:
hull = cv2.convexHull(pts)
cv2.drawContours(overlay, [hull], -1, colors[i], -1)
# apply the transparent overlay
cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)
# return the output image
return output