Spaces:
Runtime error
Runtime error
File size: 2,866 Bytes
8478e62 df6416b 8478e62 ba39187 8478e62 ba39187 8478e62 ba39187 8478e62 ba39187 8478e62 ba39187 8478e62 ba39187 8478e62 ba39187 8478e62 ba39187 8478e62 ba39187 8478e62 ba39187 8478e62 1cc4915 8478e62 02a025f 8478e62 1cc4915 ba39187 3ab1028 ba39187 8478e62 1cc4915 828ef5a 77eeab9 8478e62 feba329 8478e62 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import gradio as gr
# necessary packages
from facemesh import mp_face_mesh_fn
from facedetect import mp_face_detect_fn
from handposedetect import mp_hand_pose_detect_fn
from objectron3d import mp_objectron_fn
from posestimate import mp_pose_estimation_fn
from holistic import mp_holistic_fn
from selfiseg import mp_selfi_segment_fn
def run_mediapipe(image, soln_type, min_detection_confidence):
if soln_type == 'facemesh':
annotated_image = mp_face_mesh_fn(image, min_detect_conf=min_detection_confidence)
elif soln_type == 'facedetect':
annotated_image = mp_face_detect_fn(image, min_detect_conf=min_detection_confidence)
elif soln_type == 'handpose':
annotated_image = mp_hand_pose_detect_fn(image, min_detect_conf=min_detection_confidence)
elif soln_type == 'pose estimate':
annotated_image = mp_pose_estimation_fn(image, min_detect_conf=min_detection_confidence)
elif soln_type == 'holistic':
annotated_image = mp_holistic_fn(image, min_detect_conf=min_detection_confidence)
elif soln_type == 'objectron':
annotated_image = mp_objectron_fn(image, min_detect_conf=min_detection_confidence)
elif soln_type == 'selfie segment':
annotated_image = mp_selfi_segment_fn(image)
return annotated_image
def main():
solutions = [
'facedetect',
'facemesh',
'handpose',
'pose estimate',
'holistic',
'objectron',
'selfie segment'
]
sample_images = [
["examples/0.jpg", solutions[0], 0.5],
["examples/1.jpg", solutions[1], 0.5],
["examples/2.png", solutions[2], 0.5],
["examples/3.jpg", solutions[3], 0.5],
["examples/4.jpg", solutions[4], 0.5],
["examples/5.jpg", solutions[5], 0.3],
["examples/6.jpg", solutions[6], None],
]
iface = gr.Interface(
fn=run_mediapipe,
inputs=[
gr.Image(label="Input Image"),
gr.Radio(
solutions,
type='value',
value=solutions[0],
label='Solutions'
),
gr.Slider(
0,
1,
step=0.05,
value=0.5,
label='Minimum Detection Confidence'
)
],
outputs=gr.Image(label="MediaPipe", type='numpy'),
title="Google MediaPipe Demo",
description="Few solutions of <a href='https://mediapipe.dev/'>Google MediaPipe</a> are presented here. Note, to test `Objectron` model, make sure to ensure <a href='https://github.com/google/mediapipe/blob/master/mediapipe/python/solutions/objectron.py#L128-L133'>Model Type</a>. Currently it only supports `Shoe`, `Chair`, `Cup` and `Camera` object.",
examples=sample_images
)
iface.launch()
if __name__ == '__main__':
main()
|