Spaces:
Sleeping
Sleeping
Ziv Pollak
commited on
Commit
•
1342daf
1
Parent(s):
ad0e45f
Add application file
Browse files
app.py
ADDED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import mediapipe as mp
|
3 |
+
import cv2
|
4 |
+
import pandas as pd
|
5 |
+
from statistics import mean
|
6 |
+
|
7 |
+
# Run simple face mesh
|
8 |
+
mp_face_mesh = mp.solutions.face_mesh
|
9 |
+
mp_drawing = mp.solutions.drawing_utils
|
10 |
+
drawing_spec = mp_drawing.DrawingSpec(thickness=2, circle_radius=3)
|
11 |
+
|
12 |
+
global pupilLocation, movementLeft, movementRight
|
13 |
+
pupilLocation = pd.DataFrame()
|
14 |
+
movementLeft = pd.DataFrame(index=['Up', 'Center', 'Down'], columns=['Left', 'Center', 'Right'])
|
15 |
+
movementRight = pd.DataFrame(index=['Up', 'Center', 'Down'], columns=['Left', 'Center', 'Right'])
|
16 |
+
|
17 |
+
# TO DO:
|
18 |
+
# 1. Calibration screen
|
19 |
+
|
20 |
+
|
21 |
+
def findIris(input_img1, input_img2, input_img3, input_img4, input_img5):
|
22 |
+
global pupilLocation
|
23 |
+
pupilLocation = pd.DataFrame() # Make sure it is empty
|
24 |
+
images = [input_img1, input_img2, input_img3, input_img4, input_img5]
|
25 |
+
output_images = []
|
26 |
+
pupil_sizes = []
|
27 |
+
with mp_face_mesh.FaceMesh(max_num_faces=1, refine_landmarks=True,
|
28 |
+
static_image_mode=True,
|
29 |
+
min_detection_confidence=0.45) as face_mesh:
|
30 |
+
for image in images:
|
31 |
+
if image is None:
|
32 |
+
continue
|
33 |
+
|
34 |
+
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
35 |
+
if not results.multi_face_landmarks:
|
36 |
+
continue
|
37 |
+
|
38 |
+
annotated_image = image.copy()
|
39 |
+
for face_landmarks in results.multi_face_landmarks:
|
40 |
+
height, width, _ = annotated_image.shape
|
41 |
+
nose = [int(face_landmarks.landmark[168].x * width), int(face_landmarks.landmark[168].y * height)]
|
42 |
+
cv2.circle(annotated_image, (nose[0], nose[1]), 3, (0, 0, 255), -1)
|
43 |
+
|
44 |
+
leftIrisPoints = [474, 475, 476, 477]
|
45 |
+
rightIrisPoints = [469, 470, 471, 472]
|
46 |
+
# right, top, left, bottom
|
47 |
+
|
48 |
+
left_iris = []
|
49 |
+
for p in leftIrisPoints:
|
50 |
+
point = [int(face_landmarks.landmark[p].x * width), int(face_landmarks.landmark[p].y * height)]
|
51 |
+
left_iris.append(point)
|
52 |
+
cv2.circle(annotated_image, point, 1, (255, 0, 255), 2)
|
53 |
+
|
54 |
+
right_iris = []
|
55 |
+
for p in rightIrisPoints:
|
56 |
+
point = [int(face_landmarks.landmark[p].x * width), int(face_landmarks.landmark[p].y * height)]
|
57 |
+
right_iris.append(point)
|
58 |
+
cv2.circle(annotated_image, point, 1, (255, 0, 255), 2)
|
59 |
+
|
60 |
+
leftIris_leftside = (int(left_iris[2][0]), int(left_iris[2][1]))
|
61 |
+
leftIris_rightside = (int(left_iris[0][0]), int(left_iris[0][1]))
|
62 |
+
leftIris_top = (int(left_iris[1][0]), int(left_iris[1][1]))
|
63 |
+
leftIris_bottom = (int(left_iris[3][0]), int(left_iris[3][1]))
|
64 |
+
rightIris_leftside = (int(right_iris[2][0]), int(right_iris[2][1]))
|
65 |
+
rightIris_rightside = (int(right_iris[0][0]), int(right_iris[0][1]))
|
66 |
+
rightIris_top = (int(right_iris[1][0]), int(right_iris[1][1]))
|
67 |
+
rightIris_bottom = (int(right_iris[3][0]), int(right_iris[3][1]))
|
68 |
+
|
69 |
+
'''
|
70 |
+
cv2.circle(annotated_image,
|
71 |
+
(int((leftIris_leftside[0] + leftIris_rightside[0]) / 2),
|
72 |
+
int((leftIris_top[1] + leftIris_bottom[1]) / 2)),
|
73 |
+
# int(abs(leftIris_leftside[0] - leftIris_rightside[0])/2
|
74 |
+
1,
|
75 |
+
(0, 255, 255), 2)
|
76 |
+
|
77 |
+
cv2.circle(annotated_image,
|
78 |
+
(int((rightIris_leftside[0] + rightIris_rightside[0]) / 2),
|
79 |
+
int((rightIris_top[1] + rightIris_bottom[1]) / 2)),
|
80 |
+
# int(abs(rightIris_leftside[0] - rightIris_rightside[0]) / 2
|
81 |
+
1,
|
82 |
+
(0, 255, 255), 2)
|
83 |
+
'''
|
84 |
+
|
85 |
+
left = leftIris_leftside[0] - 150
|
86 |
+
right = rightIris_rightside[0] + 150
|
87 |
+
up = leftIris_top[1] - 50
|
88 |
+
down = leftIris_bottom[1] + 50
|
89 |
+
annotated_image = annotated_image[up:down, left:right]
|
90 |
+
|
91 |
+
name = 'TBD'
|
92 |
+
newRow = pd.Series([name,
|
93 |
+
leftIris_leftside[0] - nose[0],
|
94 |
+
leftIris_top[1] - nose[1],
|
95 |
+
leftIris_rightside[0] - nose[0],
|
96 |
+
leftIris_bottom[1] - nose[1],
|
97 |
+
rightIris_leftside[0] - nose[0],
|
98 |
+
rightIris_top[1] - nose[1],
|
99 |
+
rightIris_rightside[0] - nose[0],
|
100 |
+
rightIris_bottom[1] - nose[1]
|
101 |
+
])
|
102 |
+
newRow = newRow.to_frame().T
|
103 |
+
pupilLocation = pd.concat([pupilLocation, newRow], axis=0, ignore_index=True)
|
104 |
+
#print("Inside pupil Location = ", pupilLocation)
|
105 |
+
#filename = directoy_name + 'Analysis/' + name[0:-4] + '-analysis.jpg'
|
106 |
+
#cv2.imwrite(filename, annotated_image)
|
107 |
+
|
108 |
+
x1 = (leftIris_leftside[0] - nose[0] + leftIris_rightside[0] - nose[0]) / 2
|
109 |
+
y1 = (leftIris_top[1] - nose[1] + leftIris_bottom[1] - nose[1]) / 2
|
110 |
+
x2 = (rightIris_leftside[0] - nose[0] + rightIris_rightside[0] - nose[0]) / 2
|
111 |
+
y2 = (rightIris_top[1] - nose[1] + rightIris_bottom[1] - nose[1]) / 2
|
112 |
+
print("Slope=", (y2 - y1) / (x2 - x1))
|
113 |
+
text = "Slope=" + str(round((y2 - y1) / (x2 - x1), 2))
|
114 |
+
cv2.putText(annotated_image, text,
|
115 |
+
(5, 110), cv2.FONT_HERSHEY_SIMPLEX,
|
116 |
+
1, (255, 255, 0), 1, cv2.LINE_AA)
|
117 |
+
|
118 |
+
print("left iris size in pixels = ", abs(leftIris_leftside[0] - leftIris_rightside[0]))
|
119 |
+
print("Right iris size in pixels = ", abs(rightIris_leftside[0] - rightIris_rightside[0]))
|
120 |
+
pupil_sizes.append(abs(leftIris_leftside[0] - leftIris_rightside[0]))
|
121 |
+
pupil_sizes.append(abs(rightIris_leftside[0] - rightIris_rightside[0]))
|
122 |
+
|
123 |
+
output_images.append(annotated_image)
|
124 |
+
|
125 |
+
# calculate final results from pupilLocations
|
126 |
+
pupilDiff = pupilLocation.copy()
|
127 |
+
pupilDiff = pupilDiff.drop(pupilDiff.columns[0], axis=1) # Remove file name
|
128 |
+
for i in range(pupilDiff.shape[0] - 1): # Calculate deltas
|
129 |
+
pupilDiff.loc[i + 1] = abs(pupilDiff.loc[i + 1] - pupilDiff.loc[0])
|
130 |
+
print("pupilDiff=", pupilDiff)
|
131 |
+
pupilDiff = pupilDiff.drop(0, axis=0) # Remove first row was was used as reference row
|
132 |
+
#print("pupilDiff (in pixels)=", pupilDiff)
|
133 |
+
|
134 |
+
# Find average pupil size
|
135 |
+
pupil_sizes.remove(max(pupil_sizes))
|
136 |
+
pupil_sizes.remove(min(pupil_sizes))
|
137 |
+
pupil_average = mean(pupil_sizes) # this should be 11.7 mm
|
138 |
+
pixels = 11.7 / pupil_average
|
139 |
+
print("pixels (In MM) = ", pixels)
|
140 |
+
|
141 |
+
# Left Eye movement
|
142 |
+
movementLeft.iloc[0, 0] = ' '
|
143 |
+
movementLeft.iloc[0, 2] = ' '
|
144 |
+
movementLeft.iloc[1, 1] = 0 # reference point
|
145 |
+
movementLeft.iloc[2, 0] = ' '
|
146 |
+
movementLeft.iloc[2, 2] = ' '
|
147 |
+
|
148 |
+
# Y movement only
|
149 |
+
movementLeft.iloc[0, 1] = round(abs(pupilLocation.iloc[0, 4] - pupilLocation.iloc[1, 4]) * pixels, 0) # Up
|
150 |
+
movementLeft.iloc[2, 1] = round(abs(pupilLocation.iloc[0, 2] - pupilLocation.iloc[3, 2]) * pixels, 0) # Down
|
151 |
+
|
152 |
+
# X movement only
|
153 |
+
movementLeft.iloc[1, 0] = round(abs(pupilLocation.iloc[0, 3] - pupilLocation.iloc[1, 3]) * pixels, 1) # Left
|
154 |
+
movementLeft.iloc[1, 2] = round(abs(pupilLocation.iloc[0, 1] - pupilLocation.iloc[2, 1]) * pixels, 1) # Right
|
155 |
+
|
156 |
+
|
157 |
+
# Right Eye Movement
|
158 |
+
movementRight.iloc[0, 0] = ' '
|
159 |
+
movementRight.iloc[0, 2] = ' '
|
160 |
+
movementRight.iloc[1, 1] = 0 # reference point
|
161 |
+
movementRight.iloc[2, 0] = ' '
|
162 |
+
movementRight.iloc[2, 2] = ' '
|
163 |
+
|
164 |
+
# Y movement only
|
165 |
+
movementRight.iloc[0, 1] = round(abs(pupilLocation.iloc[0, 8] - pupilLocation.iloc[1, 8]) * pixels, 0) # Up
|
166 |
+
movementRight.iloc[2, 1] = round(abs(pupilLocation.iloc[0, 6] - pupilLocation.iloc[3, 6]) * pixels, 0) # Down
|
167 |
+
|
168 |
+
# X movement only
|
169 |
+
movementRight.iloc[1, 0] = round(abs(pupilLocation.iloc[0, 7] - pupilLocation.iloc[1, 7]) * pixels, 0) # Left
|
170 |
+
movementRight.iloc[1, 2] = round(abs(pupilLocation.iloc[0, 5] - pupilLocation.iloc[2, 5]) * pixels, 0) # Right
|
171 |
+
|
172 |
+
return output_images[0], output_images[1], output_images[2], output_images[3], output_images[4], pupilLocation, movementLeft, movementRight
|
173 |
+
|
174 |
+
|
175 |
+
with gr.Blocks() as demo:
|
176 |
+
gr.Markdown(
|
177 |
+
"""
|
178 |
+
# Range of Motion Image Analysis
|
179 |
+
Take 5 pictures below looking stright, left, right, up & down
|
180 |
+
""")
|
181 |
+
with gr.Row():
|
182 |
+
with gr.Column(scale=1):
|
183 |
+
img1 = gr.Image(shape=(1000, 1000), source='webcam', label='Front')
|
184 |
+
with gr.Column(scale=1):
|
185 |
+
out1 = gr.Image(label='Out-Front')
|
186 |
+
with gr.Row():
|
187 |
+
with gr.Column(scale=1):
|
188 |
+
img2 = gr.Image(shape=(1000, 1000), source='webcam', label='Left')
|
189 |
+
with gr.Column(scale=1):
|
190 |
+
out2 = gr.Image(label='Out-Left')
|
191 |
+
with gr.Row():
|
192 |
+
with gr.Column(scale=1):
|
193 |
+
img3 = gr.Image(shape=(1000, 1000), source='webcam', label='Right')
|
194 |
+
with gr.Column(scale=1):
|
195 |
+
out3 = gr.Image(label='Out-Right')
|
196 |
+
with gr.Row():
|
197 |
+
with gr.Column(scale=1):
|
198 |
+
img4 = gr.Image(shape=(1000, 1000), source='webcam', label='Up')
|
199 |
+
with gr.Column(scale=1):
|
200 |
+
out4 = gr.Image(label='Out-Up')
|
201 |
+
with gr.Row():
|
202 |
+
with gr.Column(scale=1):
|
203 |
+
img5 = gr.Image(shape=(1000, 1000), source='webcam', label='Down')
|
204 |
+
with gr.Column(scale=1):
|
205 |
+
out5 = gr.Image(label='Down-Right')
|
206 |
+
|
207 |
+
b = gr.Button("Go!")
|
208 |
+
|
209 |
+
gr.Markdown(
|
210 |
+
"""
|
211 |
+
Pupil Locations:
|
212 |
+
""")
|
213 |
+
pupilData = gr.Dataframe(pupilLocation)
|
214 |
+
|
215 |
+
gr.Markdown(
|
216 |
+
"""
|
217 |
+
# Left eye results (in mm):
|
218 |
+
""")
|
219 |
+
movementDataLeft = gr.Dataframe(movementLeft)
|
220 |
+
|
221 |
+
gr.Markdown(
|
222 |
+
"""
|
223 |
+
# Right eye results (in mm):
|
224 |
+
""")
|
225 |
+
movementDataRight = gr.Dataframe(movementRight)
|
226 |
+
|
227 |
+
inp = [img1, img2, img3, img4, img5]
|
228 |
+
out = [out1, out2, out3, out4, out5, pupilData, movementDataLeft, movementDataRight]
|
229 |
+
b.click(fn=findIris, inputs=inp, outputs=out)
|
230 |
+
|
231 |
+
|
232 |
+
demo.launch(auth=("Andrew", "Andrew"), share=True)
|