Spaces:
Runtime error
Runtime error
import gradio as gr | |
# necessary packages | |
from facemesh import mp_face_mesh_fn | |
from facedetect import mp_face_detect_fn | |
from handposedetect import mp_hand_pose_detect_fn | |
from objectron3d import mp_objectron_fn | |
from posestimate import mp_pose_estimation_fn | |
from holistic import mp_holistic_fn | |
from selfiseg import mp_selfi_segment_fn | |
def run_mediapipe(image, soln_type, min_detection_confidence): | |
if soln_type == 'facemesh': | |
annotated_image = mp_face_mesh_fn(image, min_detect_conf=min_detection_confidence) | |
elif soln_type == 'facedetect': | |
annotated_image = mp_face_detect_fn(image, min_detect_conf=min_detection_confidence) | |
elif soln_type == 'handpose': | |
annotated_image = mp_hand_pose_detect_fn(image, min_detect_conf=min_detection_confidence) | |
elif soln_type == 'pose estimate': | |
annotated_image = mp_pose_estimation_fn(image, min_detect_conf=min_detection_confidence) | |
elif soln_type == 'holistic': | |
annotated_image = mp_holistic_fn(image, min_detect_conf=min_detection_confidence) | |
elif soln_type == 'objectron': | |
annotated_image = mp_objectron_fn(image, min_detect_conf=min_detection_confidence) | |
elif soln_type == 'selfie segment': | |
annotated_image = mp_selfi_segment_fn(image) | |
return annotated_image | |
def main(): | |
solutions = [ | |
'facedetect', | |
'facemesh', | |
'handpose', | |
'pose estimate', | |
'holistic', | |
'objectron', | |
'selfie segment' | |
] | |
sample_images = [ | |
["examples/0.jpg", solutions[0], 0.5], | |
["examples/1.jpg", solutions[1], 0.5], | |
["examples/2.png", solutions[2], 0.5], | |
["examples/3.jpg", solutions[3], 0.5], | |
["examples/4.jpg", solutions[4], 0.5], | |
["examples/5.jpg", solutions[5], 0.3], | |
["examples/6.jpg", solutions[6], None], | |
] | |
iface = gr.Interface( | |
fn=run_mediapipe, | |
inputs=[ | |
gr.Image(label="Input Image"), | |
gr.Radio( | |
solutions, | |
type='value', | |
value=solutions[0], | |
label='Solutions' | |
), | |
gr.Slider( | |
0, | |
1, | |
step=0.05, | |
value=0.5, | |
label='Minimum Detection Confidence' | |
) | |
], | |
outputs=gr.Image(label="MediaPipe", type='numpy'), | |
title="Google MediaPipe Demo", | |
description="Few solutions of <a href='https://mediapipe.dev/'>Google MediaPipe</a> are presented here. Note, to test `Objectron` model, make sure to ensure <a href='https://github.com/google/mediapipe/blob/master/mediapipe/python/solutions/objectron.py#L128-L133'>Model Type</a>. Currently it only supports `Shoe`, `Chair`, `Cup` and `Camera` object.", | |
examples=sample_images | |
) | |
iface.launch() | |
if __name__ == '__main__': | |
main() | |