Spaces:
Runtime error
Runtime error
fix input image size
Browse files- actors_matching/api.py +23 -10
- app.py +1 -1
actors_matching/api.py
CHANGED
@@ -3,36 +3,49 @@ import json
|
|
3 |
import annoy
|
4 |
from typing import Tuple
|
5 |
|
6 |
-
EMBEDDING_DIMENSION=128
|
7 |
ANNOY_INDEX_FILE = "models/actors_annoy_index.ann"
|
8 |
ANNOY_METADATA_FILE = "models/actors_annoy_metadata.json"
|
9 |
ANNOY_MAPPING_FILE = "models/actors_mapping.json"
|
10 |
|
|
|
11 |
def load_annoy_index(
|
12 |
-
index_file
|
13 |
-
metadata_file
|
14 |
-
mapping_file
|
15 |
) -> Tuple[annoy.AnnoyIndex, dict]:
|
16 |
"""Load annoy index and associated mapping file"""
|
17 |
-
with open(metadata_file) as f:
|
18 |
annoy_index_metadata = json.load(f)
|
19 |
|
20 |
annoy_index = annoy.AnnoyIndex(f=EMBEDDING_DIMENSION, **annoy_index_metadata)
|
21 |
annoy_index.load(index_file)
|
22 |
|
23 |
-
with open(mapping_file) as f:
|
24 |
mapping = json.load(f)
|
25 |
mapping = {int(k): v for k, v in mapping.items()}
|
26 |
return annoy_index, mapping
|
27 |
|
28 |
-
|
|
|
|
|
|
|
29 |
"""Extract face location, embeddings, and top n_matches matches"""
|
30 |
-
face_locations = face_recognition.face_locations(
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
matches = []
|
33 |
distances = []
|
34 |
for emb in embeddings:
|
35 |
m, d = annoy_index.get_nns_by_vector(emb, n_matches, include_distances=True)
|
36 |
matches.append(m)
|
37 |
distances.append(d)
|
38 |
-
return [
|
|
|
|
|
|
|
|
3 |
import annoy
|
4 |
from typing import Tuple
|
5 |
|
6 |
+
EMBEDDING_DIMENSION = 128
|
7 |
ANNOY_INDEX_FILE = "models/actors_annoy_index.ann"
|
8 |
ANNOY_METADATA_FILE = "models/actors_annoy_metadata.json"
|
9 |
ANNOY_MAPPING_FILE = "models/actors_mapping.json"
|
10 |
|
11 |
+
|
12 |
def load_annoy_index(
|
13 |
+
index_file=ANNOY_INDEX_FILE,
|
14 |
+
metadata_file=ANNOY_METADATA_FILE,
|
15 |
+
mapping_file=ANNOY_MAPPING_FILE,
|
16 |
) -> Tuple[annoy.AnnoyIndex, dict]:
|
17 |
"""Load annoy index and associated mapping file"""
|
18 |
+
with open(metadata_file) as f:
|
19 |
annoy_index_metadata = json.load(f)
|
20 |
|
21 |
annoy_index = annoy.AnnoyIndex(f=EMBEDDING_DIMENSION, **annoy_index_metadata)
|
22 |
annoy_index.load(index_file)
|
23 |
|
24 |
+
with open(mapping_file) as f:
|
25 |
mapping = json.load(f)
|
26 |
mapping = {int(k): v for k, v in mapping.items()}
|
27 |
return annoy_index, mapping
|
28 |
|
29 |
+
|
30 |
+
def analyze_image(
|
31 |
+
image, annoy_index, n_matches: int = 1, num_jitters: int = 1, model: str = "large"
|
32 |
+
):
|
33 |
"""Extract face location, embeddings, and top n_matches matches"""
|
34 |
+
face_locations = face_recognition.face_locations(
|
35 |
+
image, number_of_times_to_upsample=1
|
36 |
+
)
|
37 |
+
if not face_locations:
|
38 |
+
face_locations = face_recognition.face_locations(image, model="cnn")
|
39 |
+
embeddings = face_recognition.face_encodings(
|
40 |
+
image, num_jitters=num_jitters, model=model, known_face_locations=face_locations
|
41 |
+
)
|
42 |
matches = []
|
43 |
distances = []
|
44 |
for emb in embeddings:
|
45 |
m, d = annoy_index.get_nns_by_vector(emb, n_matches, include_distances=True)
|
46 |
matches.append(m)
|
47 |
distances.append(d)
|
48 |
+
return [
|
49 |
+
dict(embeddings=e, matches=m, distances=d, face_locations=f)
|
50 |
+
for e, m, d, f in zip(embeddings, matches, distances, face_locations)
|
51 |
+
]
|
app.py
CHANGED
@@ -59,7 +59,7 @@ iface = gr.Interface(
|
|
59 |
and limitations of the tool!""",
|
60 |
article=Path("README.md").read_text(),
|
61 |
inputs=[
|
62 |
-
gr.inputs.Image(shape=
|
63 |
gr.inputs.Textbox(
|
64 |
label="Who's that?", placeholder="Optional, you can leave this blank"
|
65 |
),
|
|
|
59 |
and limitations of the tool!""",
|
60 |
article=Path("README.md").read_text(),
|
61 |
inputs=[
|
62 |
+
gr.inputs.Image(shape=None, label="Your image"),
|
63 |
gr.inputs.Textbox(
|
64 |
label="Who's that?", placeholder="Optional, you can leave this blank"
|
65 |
),
|