|
import gradio as gr |
|
import pandas as pd |
|
import cv2 |
|
import mediapipe as mp |
|
import os |
|
from statistics import mean |
|
import numpy as np |
|
from mediapipe.tasks import python |
|
from mediapipe.tasks.python import vision |
|
from mediapipe.framework.formats import landmark_pb2 |
|
from mediapipe import solutions |
|
|
|
|
|
import matplotlib |
|
matplotlib.use("Agg") |
|
import matplotlib.pyplot as plt |
|
|
|
cropped_image = [] |
|
analyzed_image = [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
base_options = python.BaseOptions(model_asset_path='face_landmarker_v2_with_blendshapes.task') |
|
options = vision.FaceLandmarkerOptions(base_options=base_options, |
|
output_face_blendshapes=True, |
|
output_facial_transformation_matrixes=True, |
|
num_faces=1) |
|
detector = vision.FaceLandmarker.create_from_options(options) |
|
|
|
|
|
def video_identity(video): |
|
return video |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def handle_image(input_image): |
|
global cropped_image, analyzed_image |
|
cv2.imwrite("image.jpg", input_image) |
|
image = mp.Image.create_from_file("image.jpg") |
|
|
|
detection_result = detector.detect(image) |
|
cropped_image = image.numpy_view().copy() |
|
analyzed_image = image.numpy_view().copy() |
|
|
|
face_landmarks_list = detection_result.face_landmarks |
|
|
|
|
|
face_landmarks = face_landmarks_list[0] |
|
face_landmarks_proto = landmark_pb2.NormalizedLandmarkList() |
|
face_landmarks_proto.landmark.extend([ |
|
landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in face_landmarks |
|
]) |
|
|
|
height, width, _ = cropped_image.shape |
|
p1 = [int(face_landmarks_proto.landmark[70].x * width), int(face_landmarks_proto.landmark[70].y * height)] |
|
cv2.circle(input_image, (p1[0], p1[1]), 10, (0, 0, 255), -1) |
|
p2 = [int(face_landmarks_proto.landmark[346].x * width), int(face_landmarks_proto.landmark[346].y * height)] |
|
cv2.circle(input_image, (p2[0], p2[1]), 10, (0, 0, 255), -1) |
|
print(p1[0], p1[1], p2[0], p2[1], height, width) |
|
cropped_image = cropped_image[p1[1]:p2[1], p1[0]:p2[0]] |
|
|
|
|
|
return (cropped_image) |
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown( |
|
""" |
|
# Iris detection |
|
""") |
|
|
|
|
|
image1 = gr.Image() |
|
b = gr.Button("Analyze") |
|
|
|
|
|
gr.Markdown( |
|
""" |
|
# Cropped image |
|
""") |
|
|
|
|
|
|
|
cropped_image = gr.Image() |
|
|
|
out = [cropped_image] |
|
b.click(fn=handle_image, inputs=image1, outputs=out) |
|
|
|
demo.launch() |
|
|
|
|
|
|
|
|
|
|
|
|