File size: 3,106 Bytes
e9826e7 eaa5438 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
---
license: bigscience-openrail-m
---
Face verification
import os
import cv2
from insightface.app import FaceAnalysis
import torch
# prompt: compare face embediggs
```
class FaceRec:
def __init__(self):
self.foldername = '/home/emmanuel/Pictures/Webcam'
self.files = []
self.embeds = []
self.diff = []
self.ground_mathches = []
self.sampling = None
def folder(self, attempt=True, folder='/home/emmanuel/Pictures/Webcam'):
if attempt:
for file in os.listdir(folder):
self.files.append(file)
self.image_pair = list(zip(self.files[0:len(self.files)//2], self.files[len(self.files)//2:]))
print(self.image_pair)
else:
self.foldername = '/home/emmanuel/Pictures/webcam'
self.files = []
self.folder(attempt=True, folder=self.foldername)
def embeddings(self, image):
app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
app.prepare(ctx_id=0, det_size=(640, 640))
image1 = cv2.imread(image)
faces = app.get(image1)
faceid_embeds = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)
return(torch.Tensor(faceid_embeds))
def face_embed(self, face, face1):
# Load the two images and get their face embeddings.
face_encodings = self.embeddings(face)
face_encodings1 = self.embeddings(face1)
return(torch.nn.functional.cosine_similarity(face_encodings, face_encodings1))
def closeness(self):
self.embeds = []
for faces in self.image_pair:
self.embeds.append(self.face_embed(self.foldername+'/'+faces[0], self.foldername+'/'+faces[1]))
return(0)
def compare(self, attempt=True):
self.diff = []
for diffs in list(zip(self.embeds[0:len(self.embeds)//2], self.embeds[len(self.embeds)//2:])):
self.diff.append(torch.nn.functional.pairwise_distance(diffs[0], diffs[1]))
def expectation(self):
mean, std = torch.mean(torch.Tensor(self.diff[0:])), torch.std(torch.Tensor(self.diff[0:]))
distribute = torch.distributions.Normal(mean, std)
self.sampling = distribute.sample(sample_shape=(10,))
def model(self):
self.closeness()
return(self.compare())
def verify(self):
self.folder()
self.model()
self.expectation()
self.folder(attempt=False)
self.model()
fails = 0
success = 0
max_itter = 10
while max_itter >= 0:
for samples in self.sampling:
if self.diff[0] <= samples:
success = success+1
else:
fails = fails+1
max_itter = max_itter-1
if fails > success:
return(False)
else:
return(True)
Recognition = FaceRec()
print(Recognition.verify())
```
|