ECX_V001 / app.py
Ziv Pollak
First version
4c71129
raw
history blame
3.09 kB
import gradio as gr
import pandas as pd
import cv2
import mediapipe as mp
import os
from statistics import mean
import numpy as np
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
from mediapipe.framework.formats import landmark_pb2
from mediapipe import solutions
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
cropped_image = []
analyzed_image = []
# take a phone
# run face landmark on it to crop image
# run our model on it
# Display results
# Create a FaceLandmarker object.
base_options = python.BaseOptions(model_asset_path='face_landmarker_v2_with_blendshapes.task')
options = vision.FaceLandmarkerOptions(base_options=base_options,
output_face_blendshapes=True,
output_facial_transformation_matrixes=True,
num_faces=1)
detector = vision.FaceLandmarker.create_from_options(options)
def video_identity(video):
return video
#demo = gr.Interface(video_identity,
# gr.Video(shape = (1000,1000), source="webcam"),
# "playable_video")
def handle_image(input_image):
global cropped_image, analyzed_image
cv2.imwrite("image.jpg", input_image)
image = mp.Image.create_from_file("image.jpg")
detection_result = detector.detect(image)
cropped_image = image.numpy_view().copy()
analyzed_image = image.numpy_view().copy()
face_landmarks_list = detection_result.face_landmarks
# Draw the face landmarks.
face_landmarks = face_landmarks_list[0]
face_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
face_landmarks_proto.landmark.extend([
landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in face_landmarks
])
height, width, _ = cropped_image.shape
p1 = [int(face_landmarks_proto.landmark[70].x * width), int(face_landmarks_proto.landmark[70].y * height)]
cv2.circle(input_image, (p1[0], p1[1]), 10, (0, 0, 255), -1)
p2 = [int(face_landmarks_proto.landmark[346].x * width), int(face_landmarks_proto.landmark[346].y * height)]
cv2.circle(input_image, (p2[0], p2[1]), 10, (0, 0, 255), -1)
print(p1[0], p1[1], p2[0], p2[1], height, width)
cropped_image = cropped_image[p1[1]:p2[1], p1[0]:p2[0]]
# [row starting from the top]
#return ([input_image, cropped_image])
return (cropped_image)
with gr.Blocks() as demo:
gr.Markdown(
"""
# Iris detection
""")
#video1 = gr.Video(height=200, width=200)#source="webcam")
image1 = gr.Image()
b = gr.Button("Analyze")
gr.Markdown(
"""
# Cropped image
""")
#cropped_image = gr.Gallery(
# label="cropped", show_label=False, elem_id="cropped"
#)
cropped_image = gr.Image()
out = [cropped_image]
b.click(fn=handle_image, inputs=image1, outputs=out)
demo.launch()