File size: 2,914 Bytes
901e379 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
import argparse
import cv2
import torch
import numpy as np
import ctypes
import os.path
import time
from face_detect.detect_imgs import get_face_boundingbox
from face_landmark.GetLandmark import get_face_landmark
from face_feature.GetFeature import get_face_feature
from face_pose.GetPose import get_face_pose
import face_manage.manage as db_manage
def GetImageInfo(image, faceMaxCount):
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
### Detection
start_time = time.time() * 1000
boxes, scores = get_face_boundingbox(image)
boxes = boxes[:faceMaxCount]
scores = scores[:faceMaxCount]
count = len(boxes)
bboxes = []
bscores = []
for idx in range(count):
bboxes.append(boxes[idx].data.numpy())
bscores.append(scores[idx].data.numpy())
# print("Detection time = %s ms" % (time.time() * 1000 - start_time))
### Landmark
start_time = time.time() * 1000
landmarks = [] ### np.zeros((count, 136), dtype=np.float32)
for idx in range(count):
landmarks.append(get_face_landmark(gray_image, boxes[idx]).data.numpy())
# print("Landmark time = %s ms" % (time.time() * 1000 - start_time))
### Pose
poses = []
for idx in range(count):
poses.append(get_face_pose(boxes[idx], landmarks[idx]))
### Feature
start_time = time.time() * 1000
features = []
alignimgs = []
for idx in range(count):
alignimg, feature = get_face_feature(image, landmarks[idx])
features.append(feature)
alignimgs.append(alignimg)
print("Feature extraction time = %s ms" % (time.time() * 1000 - start_time))
####
if 0:
for idx in range(count):
print_image = image.copy()
box = boxes[idx].numpy()
print(">>>>>>>>: ", box)
landmark = landmarks[idx]
cv2.rectangle(print_image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 0, 255), 2)
for p in range(68):
cv2.circle(print_image, (int(landmark[p * 2]), int(landmark[p * 2 + 1])), 1, (255,255,255))
cv2.imshow("face recognition", print_image)
cv2.waitKey()
return count, bboxes, bscores, landmarks, alignimgs, features
def get_similarity(feat1, feat2):
return (np.sum(feat1 * feat2) + 1) * 50
if __name__ == '__main__':
threshold = 75
test_directory = 'test'
efn = os.getcwd() + "/test/1.png"
img = cv2.imread(efn, cv2.IMREAD_COLOR)
count, boxes, scores, landmarks, alignimgs, features1 = GetImageInfo(img, 5)
vfn = os.getcwd() + "/test/3.png"
img = cv2.imread(vfn, cv2.IMREAD_COLOR)
count, boxes, scores, landmarks, alignimgs, features2 = GetImageInfo(img, 5)
score = get_similarity(features1[0], features2[0])
print('score = ', score)
if score > threshold:
print('same person')
else:
print('different person')
|