Ziv Pollak commited on
Commit
ec720d7
1 Parent(s): daf377d

first version

Browse files
Files changed (2) hide show
  1. app.py +230 -0
  2. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import cv2
4
+ import mediapipe as mp
5
+ import os
6
+ from statistics import mean
7
+ # Record video
8
+ # Save video?
9
+ # Break video into images
10
+ # Run facemesh on all images and save locations
11
+ # Run exterme locations
12
+ # Run analysis on those compare to the first frame
13
+
14
+ # Run simple face mesh
15
+ mp_face_mesh = mp.solutions.face_mesh
16
+ mp_drawing = mp.solutions.drawing_utils
17
+ drawing_spec = mp_drawing.DrawingSpec(thickness=2, circle_radius=3)
18
+
19
+ global pupilLocation
20
+ pupilLocation = pd.DataFrame()
21
+ pupil_sizes = []
22
+ ExteremeDistanceLeftEye = pd.DataFrame()
23
+ ExteremeDistanceRightEye = pd.DataFrame()
24
+
25
+ def video_identity(video):
26
+ return video
27
+
28
+
29
+ #demo = gr.Interface(video_identity,
30
+ # gr.Video(shape = (1000,1000), source="webcam"),
31
+ # "playable_video")
32
+
33
+ def findIrisInFrame(image, counter):
34
+ global pupilLocation, pupil_sizes
35
+ #pupilLocation = pd.DataFrame() # Make sure it is empty
36
+ with mp_face_mesh.FaceMesh(max_num_faces=1, refine_landmarks=True,
37
+ static_image_mode=True,
38
+ min_detection_confidence=0.45) as face_mesh:
39
+
40
+ results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
41
+ if not results.multi_face_landmarks:
42
+ return None
43
+
44
+ annotated_image = image.copy()
45
+ for face_landmarks in results.multi_face_landmarks:
46
+ height, width, _ = annotated_image.shape
47
+ nose = [int(face_landmarks.landmark[168].x * width), int(face_landmarks.landmark[168].y * height)]
48
+ cv2.circle(annotated_image, (nose[0], nose[1]), 3, (0, 0, 255), -1)
49
+
50
+ leftIrisPoints = [474, 475, 476, 477]
51
+ rightIrisPoints = [469, 470, 471, 472]
52
+ # right, top, left, bottom
53
+
54
+ left_iris = []
55
+ for p in leftIrisPoints:
56
+ point = [int(face_landmarks.landmark[p].x * width), int(face_landmarks.landmark[p].y * height)]
57
+ left_iris.append(point)
58
+ cv2.circle(annotated_image, point, 1, (255, 0, 255), 2)
59
+
60
+ right_iris = []
61
+ for p in rightIrisPoints:
62
+ point = [int(face_landmarks.landmark[p].x * width), int(face_landmarks.landmark[p].y * height)]
63
+ right_iris.append(point)
64
+ cv2.circle(annotated_image, point, 1, (255, 0, 255), 2)
65
+
66
+ leftIris_leftside = (int(left_iris[2][0]), int(left_iris[2][1]))
67
+ leftIris_rightside = (int(left_iris[0][0]), int(left_iris[0][1]))
68
+ leftIris_top = (int(left_iris[1][0]), int(left_iris[1][1]))
69
+ leftIris_bottom = (int(left_iris[3][0]), int(left_iris[3][1]))
70
+ rightIris_leftside = (int(right_iris[2][0]), int(right_iris[2][1]))
71
+ rightIris_rightside = (int(right_iris[0][0]), int(right_iris[0][1]))
72
+ rightIris_top = (int(right_iris[1][0]), int(right_iris[1][1]))
73
+ rightIris_bottom = (int(right_iris[3][0]), int(right_iris[3][1]))
74
+
75
+ cv2.circle(annotated_image,
76
+ (int((leftIris_leftside[0] + leftIris_rightside[0]) / 2),
77
+ int((leftIris_top[1] + leftIris_bottom[1]) / 2)),
78
+ # int(abs(leftIris_leftside[0] - leftIris_rightside[0])/2
79
+ 1,
80
+ (0, 255, 255), 2)
81
+
82
+ cv2.circle(annotated_image,
83
+ (int((rightIris_leftside[0] + rightIris_rightside[0]) / 2),
84
+ int((rightIris_top[1] + rightIris_bottom[1]) / 2)),
85
+ # int(abs(rightIris_leftside[0] - rightIris_rightside[0]) / 2
86
+ 1,
87
+ (0, 255, 255), 2)
88
+
89
+ pupil_sizes.append(abs(leftIris_leftside[0] - leftIris_rightside[0]))
90
+ pupil_sizes.append(abs(rightIris_leftside[0] - rightIris_rightside[0]))
91
+
92
+ name = "frame%d.jpg" % counter
93
+ newRow = pd.Series([name,
94
+ leftIris_leftside[0] - nose[0],
95
+ leftIris_top[1] - nose[1],
96
+ leftIris_rightside[0] - nose[0],
97
+ leftIris_bottom[1] - nose[1],
98
+ rightIris_leftside[0] - nose[0],
99
+ rightIris_top[1] - nose[1],
100
+ rightIris_rightside[0] - nose[0],
101
+ rightIris_bottom[1] - nose[1]
102
+ ])
103
+ newRow = newRow.to_frame().T
104
+ pupilLocation = pd.concat([pupilLocation, newRow], axis=0, ignore_index=True)
105
+
106
+ return newRow
107
+
108
+
109
+
110
+ def handleVideo(input_video):
111
+ global ExteremeDistanceLeftEye, ExteremeDistanceRightEye, pupilLocation, pupil_sizes
112
+ pupilLocation = pd.DataFrame() # Make sure it is empty to begin with
113
+ pupil_sizes = []
114
+ vidcap = cv2.VideoCapture(input_video)
115
+ success, image = vidcap.read()
116
+ count = 0
117
+ if not os.path.exists('Images'):
118
+ os.makedirs('Images')
119
+ #os.chdir('Images')
120
+
121
+ # Slide video into frames and find iris in each frame
122
+ while success:
123
+ cv2.imwrite("Images/frame%d.jpg" % count, image) # save frame as JPEG file
124
+ findIrisInFrame(image, count)
125
+ success, image = vidcap.read()
126
+ count += 1
127
+
128
+ print("file counter=", count)
129
+ # Convert pupilLocation to pupilDiff
130
+ pupilDiff = pupilLocation.copy()
131
+ pupilDiff = pupilDiff.drop(pupilDiff.columns[0], axis=1) # Remove file name
132
+ for i in range(pupilDiff.shape[0] - 1): # Calculate deltas
133
+ pupilDiff.loc[i + 1] = (pupilDiff.loc[i + 1] - pupilDiff.loc[0])
134
+ pupilDiff = pupilDiff.drop(0, axis=0) # Remove the first row
135
+ print(pupilDiff)
136
+ # Find extreme iris locations (images and measurements)
137
+
138
+ # Left eye
139
+ LeftEyeLookingRight = pd.to_numeric(pupilDiff[1]).idxmin()
140
+ LeftEyeLookingDown = pd.to_numeric(pupilDiff[2]).idxmax()
141
+ LeftEyeLookingLeft = pd.to_numeric(pupilDiff[3]).idxmax()
142
+ LeftEyeLookingUp = pd.to_numeric(pupilDiff[4]).idxmin()
143
+
144
+ # Right eye
145
+ RightEyeLookingRight = pd.to_numeric(pupilDiff[5]).idxmin()
146
+ RightEyeLookingDown = pd.to_numeric(pupilDiff[6]).idxmax()
147
+ RightEyeLookingLeft = pd.to_numeric(pupilDiff[7]).idxmax()
148
+ RightEyeLookingUp = pd.to_numeric(pupilDiff[8]).idxmin()
149
+
150
+ print("Left eye images = ", LeftEyeLookingRight, LeftEyeLookingDown, LeftEyeLookingLeft, LeftEyeLookingUp)
151
+ print("Right eye images = ", RightEyeLookingRight, RightEyeLookingDown, RightEyeLookingLeft, RightEyeLookingUp)
152
+
153
+ ExtermeImageLeftEye = list([cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % LeftEyeLookingRight), cv2.COLOR_BGR2RGB),
154
+ cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % LeftEyeLookingDown), cv2.COLOR_BGR2RGB),
155
+ cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % LeftEyeLookingLeft), cv2.COLOR_BGR2RGB),
156
+ cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % LeftEyeLookingUp), cv2.COLOR_BGR2RGB)])
157
+
158
+ ExtermeImageRightEye = list([cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % RightEyeLookingRight), cv2.COLOR_BGR2RGB),
159
+ cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % RightEyeLookingDown), cv2.COLOR_BGR2RGB),
160
+ cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % RightEyeLookingLeft), cv2.COLOR_BGR2RGB),
161
+ cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % RightEyeLookingUp), cv2.COLOR_BGR2RGB)])
162
+
163
+
164
+ # return the distances
165
+ # Find average pupil size
166
+ for i in range(10):
167
+ pupil_sizes.remove(max(pupil_sizes))
168
+ pupil_sizes.remove(min(pupil_sizes))
169
+ pupil_average = mean(pupil_sizes) # this should be 11.7 mm
170
+ pixels = 11.7 / pupil_average
171
+ print("pixels (In MM) = ", pixels)
172
+
173
+ d = { 'direction': ['Right', 'Down', 'Left', 'Up'] ,
174
+ 'mm' : [round(pd.to_numeric(pupilDiff[1]).min() * pixels,1),
175
+ round(pd.to_numeric(pupilDiff[2]).max() * pixels,1),
176
+ round(pd.to_numeric(pupilDiff[3]).max() * pixels,1),
177
+ round(pd.to_numeric(pupilDiff[4]).min() * pixels,1)
178
+ ]}
179
+ ExteremeDistanceLeftEye = pd.DataFrame(data=d)
180
+
181
+ d = {'direction': ['Right', 'Down', 'Left', 'Up'],
182
+ 'mm': [round(pd.to_numeric(pupilDiff[5]).min() * pixels, 1),
183
+ round(pd.to_numeric(pupilDiff[6]).max() * pixels, 1),
184
+ round(pd.to_numeric(pupilDiff[7]).max() * pixels, 1),
185
+ round(pd.to_numeric(pupilDiff[8]).min() * pixels, 1)
186
+ ]}
187
+ ExteremeDistanceRightEye = pd.DataFrame(data=d)
188
+
189
+
190
+
191
+ print() #.idxmax(axis=0))
192
+ # Upmost buttom limbus
193
+ #
194
+ return ExteremeDistanceLeftEye, ExteremeDistanceRightEye, ExtermeImageLeftEye, ExtermeImageRightEye
195
+
196
+ with gr.Blocks() as demo:
197
+ gr.Markdown(
198
+ """
199
+ # Range of Motion Video Analysis
200
+ Capture a video of the following looks: stright, left, right, up & down
201
+ """)
202
+ video1 = gr.Video(shape = (1000,1000), source="webcam")
203
+
204
+ b = gr.Button("Analyze Video")
205
+
206
+ gr.Markdown(
207
+ """
208
+ # Left eye results (in mm):
209
+ """)
210
+ LeftEyeGallery = gr.Gallery(
211
+ label="Left eye", show_label=False, elem_id="left_eye_gallery"
212
+ ).style(grid=[4], height="auto")
213
+ movementDataLeft = gr.Dataframe(ExteremeDistanceLeftEye)
214
+
215
+
216
+ gr.Markdown(
217
+ """
218
+ # Right eye results (in mm):
219
+ """)
220
+ RightEyeGallery = gr.Gallery(
221
+ label="Right eye", show_label=False, elem_id="right_eye_gallery"
222
+ ).style(grid=[4], height="auto")
223
+ movementDataRight = gr.Dataframe(ExteremeDistanceRightEye)
224
+
225
+
226
+
227
+ out = [movementDataLeft, movementDataRight, LeftEyeGallery, RightEyeGallery]
228
+ b.click(fn=handleVideo, inputs=video1, outputs=out)
229
+
230
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ numpy
2
+ pandas
3
+ Pillow
4
+ opencv-python-headless
5
+ mediapipe