import gradio as gr from facemesh import mp_face_mesh_fn from facedetect import mp_face_detect_fn from handposedetect import mp_hand_pose_detect_fn from posestimate import mp_pose_estimation_fn from holistic import mp_holistic_fn def run_mediapipe(image, soln_type): if soln_type == 'facemesh': annotated_image = mp_face_mesh_fn(image) elif soln_type == 'facedetect': annotated_image = mp_face_detect_fn(image) elif soln_type == 'handpose': annotated_image = mp_hand_pose_detect_fn(image) elif soln_type == 'pose estimate': annotated_image = mp_pose_estimation_fn(image) elif soln_type == 'holistic': annotated_image = mp_holistic_fn(image) return annotated_image def main(): solutions = [ 'facedetect', 'facemesh', 'handpose', 'pose estimate', 'holistic' ] sample_images = [ ["examples/0.jpg", solutions[0]], ["examples/1.jpg", solutions[1]], ["examples/2.png", solutions[2]], ["examples/3.jpg", solutions[3]], ["examples/4.jpg", solutions[4]], ] iface = gr.Interface( fn=run_mediapipe, inputs=[ gr.inputs.Image(label="Input Image"), gr.inputs.Radio( solutions, type='value', default=solutions[0], label='Solutions' ), ], outputs=gr.outputs.Image(label="MediaPipe"), title="Google MediaPipe Demo", description="Few solutions of Google MediaPipe are presented here.", examples=sample_images ) iface.launch() if __name__ == '__main__': main()