File size: 3,856 Bytes
e9826e7 eaa5438 b222cbf eaa5438 b222cbf eaa5438 b222cbf eaa5438 b222cbf eaa5438 b222cbf eaa5438 b222cbf eaa5438 b222cbf eaa5438 b222cbf eaa5438 b222cbf eaa5438 b222cbf eaa5438 b222cbf eaa5438 b222cbf eaa5438 b222cbf eaa5438 b222cbf eaa5438 b222cbf eaa5438 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
---
license: bigscience-openrail-m
---
Face verification
import os
import cv2
from insightface.app import FaceAnalysis
import torch
# prompt: compare face embediggs
```
import os
import cv2
from insightface.app import FaceAnalysis
import torch
import torch.nn.functional as F
# prompt: compare face embediggs
class FaceRec:
def __init__(self):
self.foldername = '/home/emmanuel-nsanga/Pictures/Webcam'
self.files = []
self.files_attempt = []
self.embeds = []
self.diff = []
self.ground_mathches = []
self.sample_true = []
self.sample_attemt = []
self.folder_attempt='/home/emmanuel-nsanga/Pictures/Webcam/'
self.folder_ground = '/home/emmanuel-nsanga/Pictures/webcam/'
self.folder_camera = '/home/emmanuel-nsanga/Pictures/camera/'
self.files_ground = [self.folder_ground+files for files in os.listdir(self.folder_ground)]
self.files_attempt = [self.folder_attempt+files for files in os.listdir(self.folder_attempt)]
self.files_camera = [self.folder_camera+files for files in os.listdir(self.folder_camera)]
self.zip_ground = list(zip(self.files_ground, self.files_attempt))
self.zip_attempt = list(zip(self.files_attempt, self.files_camera))
def embeddings(self, image):
app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
app.prepare(ctx_id=0, det_size=(640, 640))
image1 = cv2.imread(image)
faces = app.get(image1)
faceid_embeds = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)
return(torch.Tensor(faceid_embeds))
def face_embed(self, face, face1):
# Load the two images and get their face embeddings.
face_encodings = self.embeddings(face)
face_encodings1 = self.embeddings(face1)
return(torch.nn.functional.cosine_similarity(face_encodings, face_encodings1))
def expectation(self, sample_data):
mean, std = torch.mean(sample_data), torch.std(sample_data)
distribute = torch.distributions.Normal(mean, std)
return(distribute.sample(sample_shape=(10,)))
def sim_distribution(self):
attempt_embeddings = self.zip_ground[0::]
ground_embeddings = self.zip_attempt[len(self.zip_ground)::]
w_r_t_g = self.zip_ground[0::]
w_r_t_c = self.zip_attempt
w_r_t_g = self.zip_ground[0::len(self.zip_ground)//2]
w_r_t_tr = self.zip_ground[len(self.zip_ground)//2::]
ground_embeddings = [self.face_embed(attempting, attempt) for attempting, attempt in w_r_t_g]
attempt_ground = [self.face_embed(attempting, attempt) for attempting, attempt in w_r_t_tr]
ground_embeddings_g = [self.face_embed(attempting, attempt) for attempting, attempt in w_r_t_g]
attempt_ground_c = [self.face_embed(attempting, attempt) for attempting, attempt in w_r_t_c]
self.sampling_ground = self.expectation(torch.Tensor(ground_embeddings))
self.sampling_attempt_g = self.expectation(torch.Tensor(attempt_ground))
self.sampling_ground = self.expectation(torch.Tensor(ground_embeddings_g))
self.sampling_attempt_c = self.expectation(torch.Tensor(attempt_ground_c))
return(self.sampling_ground, self.sampling_attempt_g, self.sampling_ground, self.sampling_attempt_c)
def model(self):
sim_distribution = self.sim_distribution()
xy = torch.mean(torch.Tensor([x-y for x, y in zip(sim_distribution[2], sim_distribution[3])]))
print(xy.item())
if xy.item() < 0.5:
print(True)
else:
print(False)
Recognition = FaceRec()
print(Recognition.model())
```
|