Ziv Pollak commited on
Commit
df0c2fd
1 Parent(s): df9124e

First version

Browse files
Files changed (1) hide show
  1. clinical1.py +102 -0
clinical1.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import cv2
4
+ import mediapipe as mp
5
+ import os
6
+ from statistics import mean
7
+ import numpy as np
8
+ from mediapipe.tasks import python
9
+ from mediapipe.tasks.python import vision
10
+ from mediapipe.framework.formats import landmark_pb2
11
+ from mediapipe import solutions
12
+
13
+
14
+ import matplotlib
15
+ matplotlib.use("Agg")
16
+ import matplotlib.pyplot as plt
17
+
18
+ cropped_image = []
19
+ analyzed_image = []
20
+ # take a phone
21
+ # run face landmark on it to crop image
22
+ # run our model on it
23
+ # Display results
24
+
25
+ # Create a FaceLandmarker object.
26
+ base_options = python.BaseOptions(model_asset_path='face_landmarker_v2_with_blendshapes.task')
27
+ options = vision.FaceLandmarkerOptions(base_options=base_options,
28
+ output_face_blendshapes=True,
29
+ output_facial_transformation_matrixes=True,
30
+ num_faces=1)
31
+ detector = vision.FaceLandmarker.create_from_options(options)
32
+
33
+
34
+ def video_identity(video):
35
+ return video
36
+
37
+
38
+ #demo = gr.Interface(video_identity,
39
+ # gr.Video(shape = (1000,1000), source="webcam"),
40
+ # "playable_video")
41
+
42
+
43
+ def handle_image(input_image):
44
+ global cropped_image, analyzed_image
45
+ cv2.imwrite("image.jpg", input_image)
46
+ image = mp.Image.create_from_file("image.jpg")
47
+
48
+ detection_result = detector.detect(image)
49
+ cropped_image = image.numpy_view().copy()
50
+ analyzed_image = image.numpy_view().copy()
51
+
52
+ face_landmarks_list = detection_result.face_landmarks
53
+
54
+ # Draw the face landmarks.
55
+ face_landmarks = face_landmarks_list[0]
56
+ face_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
57
+ face_landmarks_proto.landmark.extend([
58
+ landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in face_landmarks
59
+ ])
60
+
61
+ height, width, _ = cropped_image.shape
62
+ p1 = [int(face_landmarks_proto.landmark[70].x * width), int(face_landmarks_proto.landmark[70].y * height)]
63
+ cv2.circle(input_image, (p1[0], p1[1]), 10, (0, 0, 255), -1)
64
+ p2 = [int(face_landmarks_proto.landmark[346].x * width), int(face_landmarks_proto.landmark[346].y * height)]
65
+ cv2.circle(input_image, (p2[0], p2[1]), 10, (0, 0, 255), -1)
66
+ print(p1[0], p1[1], p2[0], p2[1], height, width)
67
+ cropped_image = cropped_image[p1[1]:p2[1], p1[0]:p2[0]]
68
+ # [row starting from the top]
69
+ #return ([input_image, cropped_image])
70
+ return (cropped_image)
71
+
72
+
73
+
74
+ with gr.Blocks() as demo:
75
+ gr.Markdown(
76
+ """
77
+ # Iris detection
78
+ """)
79
+ #video1 = gr.Video(height=200, width=200)#source="webcam")
80
+
81
+ image1 = gr.Image()
82
+ b = gr.Button("Analyze")
83
+
84
+
85
+ gr.Markdown(
86
+ """
87
+ # Cropped image
88
+ """)
89
+ #cropped_image = gr.Gallery(
90
+ # label="cropped", show_label=False, elem_id="cropped"
91
+ #)
92
+ cropped_image = gr.Image()
93
+
94
+ out = [cropped_image]
95
+ b.click(fn=handle_image, inputs=image1, outputs=out)
96
+
97
+ demo.launch()
98
+
99
+
100
+
101
+
102
+