@@ -299,66 +299,84 @@ def forward(self, img, save_path=None, return_prob=False):
299
299
else :
300
300
return faces
301
301
302
- def detect (self , img ):
303
- """Detect all faces in PIL image and return bounding boxes.
302
+ def detect (self , img , landmarks = False ):
303
+ """Detect all faces in PIL image and return bounding boxes and optional facial landmarks .
304
304
305
305
This method is used by the forward method and is also useful for face detection tasks
306
- that require lower-level handling of bounding boxes (e.g., face tracking). The
307
- functionality of the forward function can be emulated by using this method followed by
308
- the extract_face() function.
306
+ that require lower-level handling of bounding boxes and facial landmarks (e.g., face
307
+ tracking). The functionality of the forward function can be emulated by using this method
308
+ followed by the extract_face() function.
309
309
310
310
Arguments:
311
311
img {PIL.Image or list} -- A PIL image or a list of PIL images.
312
+
313
+ Keyword Arguments:
314
+ landmarks {bool} -- Whether to return facial landmarks in addition to bounding boxes.
315
+ (default: {False})
312
316
313
317
Returns:
314
318
tuple(numpy.ndarray, list) -- For N detected faces, a tuple containing an
315
319
Nx4 array of bounding boxes and a length N list of detection probabilities.
316
320
Returned boxes will be sorted in descending order by detection probability if
317
321
self.select_largest=False, otherwise the largest face will be returned first.
318
322
If `img` is a list of images, the items returned have an extra dimension
319
- (batch) as the first dimension.
323
+ (batch) as the first dimension. Optionally, a third item, the facial landmarks,
324
+ are returned if `landmarks=True`.
320
325
321
326
Example:
322
327
>>> from PIL import Image, ImageDraw
323
328
>>> from facenet_pytorch import MTCNN, extract_face
324
329
>>> mtcnn = MTCNN(keep_all=True)
325
- >>> boxes, probs = mtcnn.detect(img)
330
+ >>> boxes, probs, points = mtcnn.detect(img, landmarks=True )
326
331
>>> # Draw boxes and save faces
327
332
>>> img_draw = img.copy()
328
333
>>> draw = ImageDraw.Draw(img_draw)
329
- >>> for i, box in enumerate(boxes):
330
- ... draw.rectangle(box.tolist())
334
+ >>> for i, (box, point) in enumerate(zip(boxes, points)):
335
+ ... draw.rectangle(box.tolist(), width=5)
336
+ ... for p in point:
337
+ ... draw.rectangle((p - 10).tolist() + (p + 10).tolist(), width=10)
331
338
... extract_face(img, box, save_path='detected_face_{}.png'.format(i))
332
339
>>> img_draw.save('annotated_faces.png')
333
340
"""
334
341
335
342
with torch .no_grad ():
336
- batch_boxes = detect_face (
343
+ batch_boxes , batch_points = detect_face (
337
344
img , self .min_face_size ,
338
345
self .pnet , self .rnet , self .onet ,
339
346
self .thresholds , self .factor ,
340
347
self .device
341
348
)
342
349
343
- boxes , probs = [], []
344
- for box in batch_boxes :
350
+ boxes , probs , points = [], [], []
351
+ for box , point in zip ( batch_boxes , batch_points ) :
345
352
box = np .array (box )
353
+ point = np .array (point )
346
354
if len (box ) == 0 :
347
355
boxes .append (None )
348
356
probs .append ([None ])
357
+ points .append (None )
349
358
elif self .select_largest :
350
- box = box [np .argsort ((box [:, 2 ] - box [:, 0 ]) * (box [:, 3 ] - box [:, 1 ]))[::- 1 ]]
359
+ box_order = np .argsort ((box [:, 2 ] - box [:, 0 ]) * (box [:, 3 ] - box [:, 1 ]))[::- 1 ]
360
+ box = box [box_order ]
361
+ point = point [box_order ]
351
362
boxes .append (box [:, :4 ])
352
363
probs .append (box [:, 4 ])
364
+ points .append (point )
353
365
else :
354
366
boxes .append (box [:, :4 ])
355
367
probs .append (box [:, 4 ])
368
+ points .append (point )
356
369
boxes = np .array (boxes )
357
370
probs = np .array (probs )
371
+ points = np .array (points )
358
372
359
373
if not isinstance (img , Iterable ):
360
374
boxes = boxes [0 ]
361
375
probs = probs [0 ]
376
+ points = points [0 ]
377
+
378
+ if landmarks :
379
+ return boxes , probs , points
362
380
363
381
return boxes , probs
364
382
0 commit comments