Spaces:
Runtime error
Runtime error
Mohammed Innat
commited on
Commit
•
ba39187
1
Parent(s):
3510c12
Update app.py
Browse files
app.py
CHANGED
@@ -2,21 +2,27 @@ import gradio as gr
|
|
2 |
from facemesh import mp_face_mesh_fn
|
3 |
from facedetect import mp_face_detect_fn
|
4 |
from handposedetect import mp_hand_pose_detect_fn
|
|
|
5 |
from posestimate import mp_pose_estimation_fn
|
6 |
from holistic import mp_holistic_fn
|
|
|
7 |
|
8 |
|
9 |
-
def run_mediapipe(image, soln_type):
|
10 |
if soln_type == 'facemesh':
|
11 |
-
annotated_image = mp_face_mesh_fn(image)
|
12 |
elif soln_type == 'facedetect':
|
13 |
-
annotated_image = mp_face_detect_fn(image)
|
14 |
elif soln_type == 'handpose':
|
15 |
-
annotated_image = mp_hand_pose_detect_fn(image)
|
16 |
elif soln_type == 'pose estimate':
|
17 |
-
annotated_image = mp_pose_estimation_fn(image)
|
18 |
elif soln_type == 'holistic':
|
19 |
-
annotated_image = mp_holistic_fn(image)
|
|
|
|
|
|
|
|
|
20 |
return annotated_image
|
21 |
|
22 |
|
@@ -26,15 +32,19 @@ def main():
|
|
26 |
'facemesh',
|
27 |
'handpose',
|
28 |
'pose estimate',
|
29 |
-
'holistic'
|
|
|
|
|
30 |
]
|
31 |
|
32 |
sample_images = [
|
33 |
-
["examples/0.jpg", solutions[0]],
|
34 |
-
["examples/1.jpg", solutions[1]],
|
35 |
-
["examples/2.png", solutions[2]],
|
36 |
-
["examples/3.jpg", solutions[3]],
|
37 |
-
["examples/4.jpg", solutions[4]],
|
|
|
|
|
38 |
]
|
39 |
|
40 |
iface = gr.Interface(
|
@@ -47,6 +57,13 @@ def main():
|
|
47 |
default=solutions[0],
|
48 |
label='Solutions'
|
49 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
],
|
51 |
outputs=gr.outputs.Image(label="MediaPipe"),
|
52 |
title="Google MediaPipe Demo",
|
|
|
2 |
from facemesh import mp_face_mesh_fn
|
3 |
from facedetect import mp_face_detect_fn
|
4 |
from handposedetect import mp_hand_pose_detect_fn
|
5 |
+
from objectron3d import mp_objectron_fn
|
6 |
from posestimate import mp_pose_estimation_fn
|
7 |
from holistic import mp_holistic_fn
|
8 |
+
from selfiseg import mp_selfi_segment_fn
|
9 |
|
10 |
|
11 |
+
def run_mediapipe(image, soln_type, min_detection_confidence):
|
12 |
if soln_type == 'facemesh':
|
13 |
+
annotated_image = mp_face_mesh_fn(image, min_detect_conf=min_detection_confidence)
|
14 |
elif soln_type == 'facedetect':
|
15 |
+
annotated_image = mp_face_detect_fn(image, min_detect_conf=min_detection_confidence)
|
16 |
elif soln_type == 'handpose':
|
17 |
+
annotated_image = mp_hand_pose_detect_fn(image, min_detect_conf=min_detection_confidence)
|
18 |
elif soln_type == 'pose estimate':
|
19 |
+
annotated_image = mp_pose_estimation_fn(image, min_detect_conf=min_detection_confidence)
|
20 |
elif soln_type == 'holistic':
|
21 |
+
annotated_image = mp_holistic_fn(image, min_detect_conf=min_detection_confidence)
|
22 |
+
elif soln_type == 'objectron':
|
23 |
+
annotated_image = mp_objectron_fn(image, min_detect_conf=min_detection_confidence)
|
24 |
+
elif soln_type == 'selfie segment':
|
25 |
+
annotated_image = mp_selfi_segment_fn(image)
|
26 |
return annotated_image
|
27 |
|
28 |
|
|
|
32 |
'facemesh',
|
33 |
'handpose',
|
34 |
'pose estimate',
|
35 |
+
'holistic',
|
36 |
+
'objectron',
|
37 |
+
'selfie segment'
|
38 |
]
|
39 |
|
40 |
sample_images = [
|
41 |
+
["examples/0.jpg", solutions[0], 0.5],
|
42 |
+
["examples/1.jpg", solutions[1], 0.5],
|
43 |
+
["examples/2.png", solutions[2], 0.5],
|
44 |
+
["examples/3.jpg", solutions[3], 0.5],
|
45 |
+
["examples/4.jpg", solutions[4], 0.5],
|
46 |
+
["examples/5.jpg", solutions[5], 0.3],
|
47 |
+
["examples/6.jpg", solutions[6], None],
|
48 |
]
|
49 |
|
50 |
iface = gr.Interface(
|
|
|
57 |
default=solutions[0],
|
58 |
label='Solutions'
|
59 |
),
|
60 |
+
gr.inputs.Slider(
|
61 |
+
0,
|
62 |
+
1,
|
63 |
+
step=0.05,
|
64 |
+
default=0.5,
|
65 |
+
label='Minimum Detection Confidence'
|
66 |
+
)
|
67 |
],
|
68 |
outputs=gr.outputs.Image(label="MediaPipe"),
|
69 |
title="Google MediaPipe Demo",
|