File size: 3,086 Bytes
df0c2fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import gradio as gr
import pandas as pd
import cv2
import mediapipe as mp
import os
from statistics import mean
import numpy as np
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
from mediapipe.framework.formats import landmark_pb2
from mediapipe import solutions


import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt

cropped_image = []
analyzed_image = []
# take a phone
# run face landmark on it to crop image
# run our model on it
# Display results

# Create a FaceLandmarker object.
base_options = python.BaseOptions(model_asset_path='face_landmarker_v2_with_blendshapes.task')
options = vision.FaceLandmarkerOptions(base_options=base_options,
                                       output_face_blendshapes=True,
                                       output_facial_transformation_matrixes=True,
                                       num_faces=1)
detector = vision.FaceLandmarker.create_from_options(options)


def video_identity(video):
    return video


#demo = gr.Interface(video_identity,
#                    gr.Video(shape = (1000,1000), source="webcam"),
#                    "playable_video")


def handle_image(input_image):
    global cropped_image, analyzed_image
    cv2.imwrite("image.jpg", input_image) 
    image = mp.Image.create_from_file("image.jpg")

    detection_result = detector.detect(image)
    cropped_image = image.numpy_view().copy()
    analyzed_image = image.numpy_view().copy()

    face_landmarks_list = detection_result.face_landmarks

    # Draw the face landmarks.
    face_landmarks = face_landmarks_list[0]
    face_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
    face_landmarks_proto.landmark.extend([
        landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in face_landmarks
    ])

    height, width, _ = cropped_image.shape
    p1 = [int(face_landmarks_proto.landmark[70].x * width), int(face_landmarks_proto.landmark[70].y * height)]
    cv2.circle(input_image, (p1[0], p1[1]), 10, (0, 0, 255), -1)
    p2 = [int(face_landmarks_proto.landmark[346].x * width), int(face_landmarks_proto.landmark[346].y * height)]
    cv2.circle(input_image, (p2[0], p2[1]), 10, (0, 0, 255), -1)
    print(p1[0], p1[1], p2[0], p2[1], height, width)
    cropped_image = cropped_image[p1[1]:p2[1], p1[0]:p2[0]]
    # [row starting from the top]
    #return ([input_image, cropped_image])
    return (cropped_image)



with gr.Blocks() as demo:
            gr.Markdown(
            """
            # Iris detection
            """)
            #video1 = gr.Video(height=200, width=200)#source="webcam")

            image1 = gr.Image()
            b = gr.Button("Analyze")


            gr.Markdown(
                """
                # Cropped image
                """)
            #cropped_image = gr.Gallery(
            #    label="cropped", show_label=False, elem_id="cropped"
            #)
            cropped_image = gr.Image()

            out = [cropped_image] 
            b.click(fn=handle_image, inputs=image1, outputs=out)

demo.launch()