zivpollak commited on
Commit
f0d897c
1 Parent(s): cdeeede

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -424
app.py DELETED
@@ -1,424 +0,0 @@
1
- import gradio as gr
2
- import pandas as pd
3
- import cv2
4
- import mediapipe as mp
5
- import os
6
- from statistics import mean
7
- import numpy as np
8
- from mediapipe.tasks import python
9
- from mediapipe.tasks.python import vision
10
- from mediapipe.framework.formats import landmark_pb2
11
- from mediapipe import solutions
12
-
13
-
14
- import matplotlib
15
- matplotlib.use("Agg")
16
- import matplotlib.pyplot as plt
17
-
18
- # Record video
19
- # Save video?
20
- # Break video into images
21
- # Run facemesh on all images and save locations
22
- # Run exterme locations
23
- # Run analysis on those compare to the first frame
24
-
25
- # Create a FaceLandmarker object.
26
- base_options = python.BaseOptions(model_asset_path='face_landmarker_v2_with_blendshapes.task')
27
- options = vision.FaceLandmarkerOptions(base_options=base_options,
28
- output_face_blendshapes=True,
29
- output_facial_transformation_matrixes=True,
30
- num_faces=1)
31
- detector = vision.FaceLandmarker.create_from_options(options)
32
-
33
-
34
- global pupilLocation
35
- pupilLocation = pd.DataFrame()
36
- pupil_sizes = []
37
- ExteremeDistanceLeftEye = pd.DataFrame()
38
- ExteremeDistanceRightEye = pd.DataFrame()
39
-
40
- def video_identity(video):
41
- return video
42
-
43
- # To do
44
- # 1. Filter out closed eye frames
45
- # 2. Smooth persuit from video POC
46
-
47
- def isEyeOpen(image):
48
- image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
49
- hist = cv2.calcHist([image], [0], None, [256], [0, 256])
50
- colors = np.where(hist > 10)
51
-
52
- #print ("colors=", np.mean(colors) )
53
- if np.mean(colors) > 15:
54
- return True
55
- else:
56
- return False
57
-
58
-
59
-
60
- #demo = gr.Interface(video_identity,
61
- # gr.Video(shape = (1000,1000), source="webcam"),
62
- # "playable_video")
63
-
64
- def findIrisInFrame(image, counter):
65
- global pupilLocation, pupil_sizes
66
- #pupilLocation = pd.DataFrame() # Make sure it is empty
67
-
68
- image = mp.Image.create_from_file("image.jpg")
69
-
70
- # STEP 4: Detect face landmarks from the input image.
71
- detection_result = detector.detect(image)
72
-
73
- # STEP 5: Process the detection result. In this case, visualize it.
74
- #annotated_image = draw_landmarks_on_image(image.numpy_view(), detection_result)
75
-
76
- annotated_image = image.numpy_view().copy()
77
-
78
- face_landmarks_list = detection_result.face_landmarks
79
-
80
- '''
81
- # Loop through the detected faces to visualize.
82
- for idx in range(len(face_landmarks_list)):
83
- face_landmarks = face_landmarks_list[idx]
84
-
85
- # Draw the face landmarks.
86
- face_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
87
- face_landmarks_proto.landmark.extend([
88
- landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in face_landmarks
89
- ])
90
- solutions.drawing_utils.draw_landmarks(
91
- image=annotated_image,
92
- landmark_list=face_landmarks_proto,
93
- connections=mp.solutions.face_mesh.FACEMESH_IRISES,
94
- landmark_drawing_spec=None,
95
- connection_drawing_spec=mp.solutions.drawing_styles
96
- .get_default_face_mesh_iris_connections_style())
97
- '''
98
- #FACEMESH_LEFT_IRIS = (474, 475, 476, 477)
99
- #FACEMESH_RIGHT_IRIS = (469, 470, 471, 472)
100
- #(lm_left_iris.x, lm_left_iris.y, lm_left_iris.z) = face_landmarks.landmark[468]
101
- #(lm_right_iris.x, lm_right_iris.y, lm_right_iris.z) = face_landmarks.landmark[473]
102
-
103
- # Draw the face landmarks.
104
- face_landmarks = face_landmarks_list[0]
105
- face_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
106
- face_landmarks_proto.landmark.extend([
107
- landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in face_landmarks
108
- ])
109
-
110
- height, width, _ = annotated_image.shape
111
- nose = [int(face_landmarks_proto.landmark[168].x * width), int(face_landmarks_proto.landmark[168].y * height)]
112
- cv2.circle(annotated_image, (nose[0], nose[1]), 3, (0, 0, 255), -1)
113
-
114
- leftIrisPoints = [474, 475, 476, 477]
115
- rightIrisPoints = [469, 470, 471, 472]
116
- # right, top, left, bottom
117
-
118
- left_iris = []
119
- for p in leftIrisPoints:
120
- point = [int(face_landmarks_proto.landmark[p].x * width), int(face_landmarks_proto.landmark[p].y * height)]
121
- left_iris.append(point)
122
- cv2.circle(annotated_image, point, 1, (255, 0, 255), 1)
123
-
124
- right_iris = []
125
- for p in rightIrisPoints:
126
- point = [int(face_landmarks_proto.landmark[p].x * width), int(face_landmarks_proto.landmark[p].y * height)]
127
- right_iris.append(point)
128
- cv2.circle(annotated_image, point, 1, (255, 0, 255), 1)
129
-
130
- leftIris_leftside = (int(left_iris[2][0]), int(left_iris[2][1]))
131
- leftIris_rightside = (int(left_iris[0][0]), int(left_iris[0][1]))
132
- leftIris_top = (int(left_iris[1][0]), int(left_iris[1][1]))
133
- leftIris_bottom = (int(left_iris[3][0]), int(left_iris[3][1]))
134
- rightIris_leftside = (int(right_iris[2][0]), int(right_iris[2][1]))
135
- rightIris_rightside = (int(right_iris[0][0]), int(right_iris[0][1]))
136
- rightIris_top = (int(right_iris[1][0]), int(right_iris[1][1]))
137
- rightIris_bottom = (int(right_iris[3][0]), int(right_iris[3][1]))
138
-
139
- cv2.imwrite("Images/post-image-%d.jpg" % counter, annotated_image) # save frame as JPEG file
140
-
141
- '''
142
- sizeIncrease = 0
143
- leftEye = annotated_image[leftIris_top[1] - sizeIncrease: leftIris_bottom[1] + sizeIncrease,
144
- leftIris_leftside[0] - sizeIncrease: leftIris_rightside[0] + sizeIncrease]
145
- leftEyeOpen = isEyeOpen(leftEye)
146
-
147
- rightEye = annotated_image[rightIris_top[1] - sizeIncrease: rightIris_bottom[1] + sizeIncrease,
148
- rightIris_leftside[0] - sizeIncrease: rightIris_rightside[0] + sizeIncrease]
149
- rightEyeOpen = isEyeOpen(rightEye)
150
-
151
-
152
- cv2.circle(image,
153
- (int((leftIris_leftside[0] + leftIris_rightside[0]) / 2),
154
- int((leftIris_top[1] + leftIris_bottom[1]) / 2)),
155
- # int(abs(leftIris_leftside[0] - leftIris_rightside[0])/2),
156
- 1,
157
- (0, 255, 255), 1)
158
-
159
- cv2.circle(image,
160
- (int((rightIris_leftside[0] + rightIris_rightside[0]) / 2),
161
- int((rightIris_top[1] + rightIris_bottom[1]) / 2)),
162
- # int(abs(rightIris_leftside[0] - rightIris_rightside[0])/2),
163
- 1,
164
- (0, 255, 255), 1)
165
-
166
- cv2.putText(image, str(counter),
167
- (rightIris_leftside[0] - 100, leftIris_top[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
168
- 1, (255, 0, 0), 1, cv2.LINE_AA)
169
-
170
-
171
- '''
172
- pupil_sizes.append(abs(leftIris_leftside[0] - leftIris_rightside[0]))
173
- pupil_sizes.append(abs(rightIris_leftside[0] - rightIris_rightside[0]))
174
-
175
- name = "frame%d.jpg" % counter
176
- newRow = pd.Series([name,
177
- leftIris_leftside[0] - nose[0],
178
- leftIris_top[1] - nose[1],
179
- leftIris_rightside[0] - nose[0],
180
- leftIris_bottom[1] - nose[1],
181
- rightIris_leftside[0] - nose[0],
182
- rightIris_top[1] - nose[1],
183
- rightIris_rightside[0] - nose[0],
184
- rightIris_bottom[1] - nose[1]
185
- ])
186
- newRow = newRow.to_frame().T
187
- #if (leftEyeOpen & rightEyeOpen):
188
- pupilLocation = pd.concat([pupilLocation, newRow], axis=0, ignore_index=True)
189
- #else:
190
- # print("Ignored frame ", counter, "." , leftEyeOpen , rightEyeOpen)
191
-
192
- return newRow
193
-
194
-
195
- def handleVideo(input_video):
196
- global ExteremeDistanceLeftEye, ExteremeDistanceRightEye, pupilLocation, pupil_sizes
197
-
198
- pupilLocation = pd.DataFrame() # Make sure it is empty to begin with
199
- pupil_sizes = []
200
- vidcap = cv2.VideoCapture(input_video)
201
- success, image = vidcap.read()
202
- fps = vidcap.get(cv2.CAP_PROP_FPS)
203
- frame_count = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
204
- width = vidcap.get(cv2.CAP_PROP_FRAME_WIDTH) # float `width`
205
- height = vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float `height`
206
- print('FPS =', fps)
207
- print('Frame count =', frame_count)
208
- print('Resolution =', width, ' X ', height)
209
-
210
- count = 0
211
- if not os.path.exists('Images'):
212
- os.makedirs('Images')
213
- #os.chdir('Images')
214
-
215
- # Slide video into frames and find iris in each frame
216
- while success:
217
- cv2.imwrite("image.jpg", image) # save frame as JPEG file
218
- print("Image#", count)
219
- image = mp.Image.create_from_file("image.jpg")
220
- findIrisInFrame(image, count)
221
-
222
- #cv2.imwrite("Images/frame%d.jpg" % count, image) # save frame as JPEG file
223
- count += 1
224
- success, image = vidcap.read()
225
-
226
-
227
- # Go over all the pupils. If pupil is too small expand it in all directions
228
- # Find average pupil size
229
- pupil_average = 11.7
230
- if (len(pupil_sizes) > 100):
231
- for i in range(10):
232
- pupil_sizes.remove(max(pupil_sizes))
233
- pupil_sizes.remove(min(pupil_sizes))
234
- pupil_average = mean(pupil_sizes) # this should be 11.7 mm
235
- print("pupil_average=", pupil_average)
236
-
237
- # pupil size need to be kept constant in all pictures
238
- # we find the center of the current pupil and make a circle around it in the size we need
239
-
240
- for index, row in pupilLocation.iterrows():
241
- currentLeftSize = abs(row[1] - row[3])
242
- diffFromLeftAverage = pupil_average - currentLeftSize
243
- currentRightSize = abs(row[5] - row[7])
244
- diffFromAverage = pupil_average - currentRightSize
245
-
246
- #print("(frame#", index, ") left: ", row[1], " right: ", row[3])
247
- #if diffFromAverage > 2:
248
- if (currentRightSize - currentLeftSize > 200):
249
- print("Fixed Left pupil")
250
- row[1] = int(row[1] - diffFromAverage / 2)
251
- row[2] = int(row[2] - diffFromAverage / 2)
252
- row[3] = int(row[3] + diffFromAverage / 2)
253
- row[4] = int(row[4] + diffFromAverage / 2)
254
-
255
- if (currentLeftSize - currentRightSize > 200):
256
- print("Fixed Right pupil")
257
- row[5] = int(row[5] - diffFromAverage / 2)
258
- row[6] = int(row[6] - diffFromAverage / 2)
259
- row[7] = int(row[7] + diffFromAverage / 2)
260
- row[8] = int(row[8] + diffFromAverage / 2)
261
-
262
-
263
- print("file counter=", count)
264
- # Convert pupilLocation to pupilDiff
265
- pupilDiff = pupilLocation.copy()
266
- pupilDiff = pupilDiff.drop(pupilDiff.columns[0], axis=1) # Remove file name
267
- for i in range(pupilDiff.shape[0] - 1): # Calculate deltas
268
- pupilDiff.loc[i + 1] = (pupilDiff.loc[i + 1] - pupilDiff.loc[0])
269
- pupilDiff = pupilDiff.drop(0, axis=0) # Remove the first row
270
-
271
- # Find extreme iris locations (images and measurements)
272
-
273
- #pupilDiff.columns = ['LL', 'LT', 'LR', 'LB', 'RR', 'RT', 'RR', 'RB']
274
-
275
- # Take just the relevant libmus
276
- #if [1] is positive, take it, otherwise take 3
277
-
278
- #pupilDiff[9] = [pupilDiff[1] if x >= 0 else pupilDiff[3] for x in pupilDiff[1]]
279
-
280
- pupilDiff[9] = 0
281
- pupilDiff[10] = 0
282
-
283
- pupilDiff[9] = np.where(pupilDiff[1] >= 0, pupilDiff[1], pupilDiff[3])
284
- #pupilDiff[[1,3]].max(axis=1), pupilDiff[[1,3]].min(axis=1))
285
- pupilDiff[10] = np.where(pupilDiff[1] >= 0, pupilDiff[5], pupilDiff[7])
286
- #pupilDiff[[5,7]].max(axis=1), pupilDiff[[5,7]].min(axis=1))
287
- pupilDiff[11] = np.where(pupilDiff[2] >= 0, pupilDiff[2], pupilDiff[4])
288
- #pupilDiff[[2,4]].max(axis=1), pupilDiff[[2,4]].min(axis=1))
289
- pupilDiff[12] = np.where(pupilDiff[2] >= 0, pupilDiff[6], pupilDiff[8])
290
- #pupilDiff[[6,8]].max(axis=1), pupilDiff[[6,8]].min(axis=1))
291
- print(pupilDiff[[1,3,5,7,9,10]])
292
-
293
- # slope
294
- x1 = (pupilLocation[1] + pupilLocation[3]) / 2
295
- y1 = (pupilLocation[2] + pupilLocation[4]) / 2
296
- x2 = (pupilLocation[5] + pupilLocation[7]) / 2
297
- y2 = (pupilLocation[6] + pupilLocation[8]) / 2
298
- pupilDiff[13] = ((y2 - y1) / (0.001 + x2 - x1))
299
-
300
-
301
- pupilDiff.to_csv('pupil_diff.csv')
302
-
303
-
304
- pixels = 11.7 / pupil_average
305
- print("pixels (In MM) = ", pixels)
306
-
307
-
308
- pupilDiff = round(pupilDiff * pixels,3)
309
-
310
- fig1 = plt.figure()
311
- plt.plot(pupilDiff[[9,10]]) #1,3,5,7
312
- plt.title("Pupil movement X axis")
313
- plt.ylabel("MM of movement")
314
- plt.xlabel("Frame")
315
- plt.ylim(-10, 10)
316
- plt.legend(['Left', 'Right']) #'LL', 'LR', 'RL', 'RR'])
317
-
318
- fig2 = plt.figure()
319
- plt.plot(pupilDiff[[11,12]]) #, df[countries].to_numpy())
320
- plt.ylim(-10, 10)
321
- plt.title("Pupil movement Y axis")
322
- plt.ylabel("MM of movement")
323
- plt.xlabel("Frame")
324
- plt.legend(['Left', 'Right'])
325
-
326
-
327
- # Left eye
328
- LeftEyeLookingRight = pd.to_numeric(pupilDiff[1]).idxmax()
329
- LeftEyeLookingDown = pd.to_numeric(pupilDiff[2]).idxmax()
330
- LeftEyeLookingLeft = pd.to_numeric(pupilDiff[3]).idxmin()
331
- LeftEyeLookingUp = pd.to_numeric(pupilDiff[4]).idxmin()
332
-
333
-
334
- # Right eye
335
- RightEyeLookingRight = pd.to_numeric(pupilDiff[5]).idxmax()
336
- RightEyeLookingDown = pd.to_numeric(pupilDiff[6]).idxmax()
337
- RightEyeLookingLeft = pd.to_numeric(pupilDiff[7]).idxmin()
338
- RightEyeLookingUp = pd.to_numeric(pupilDiff[8]).idxmin()
339
-
340
-
341
- print("Left eye images = ", LeftEyeLookingRight, LeftEyeLookingDown, LeftEyeLookingLeft, LeftEyeLookingUp)
342
- print("Right eye images = ", RightEyeLookingRight, RightEyeLookingDown, RightEyeLookingLeft, RightEyeLookingUp)
343
-
344
- ExtermeImageLeftEye = list([cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % LeftEyeLookingRight), cv2.COLOR_BGR2RGB),
345
- cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % LeftEyeLookingLeft), cv2.COLOR_BGR2RGB),
346
- cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % LeftEyeLookingUp), cv2.COLOR_BGR2RGB),
347
- cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % LeftEyeLookingDown), cv2.COLOR_BGR2RGB)])
348
-
349
- ExtermeImageRightEye = list([cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % RightEyeLookingRight), cv2.COLOR_BGR2RGB),
350
- cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % RightEyeLookingLeft), cv2.COLOR_BGR2RGB),
351
- cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % RightEyeLookingUp), cv2.COLOR_BGR2RGB),
352
- cv2.cvtColor(cv2.imread("Images/frame%d.jpg" % RightEyeLookingDown), cv2.COLOR_BGR2RGB)])
353
-
354
-
355
- # return the distances
356
-
357
-
358
- d = { 'direction': ['Right', 'Left', 'Up', 'Down'] ,
359
- 'mm' : [abs(round(pd.to_numeric(pupilDiff[1]).max(),1)),
360
- abs(round(pd.to_numeric(pupilDiff[3]).min(),1)),
361
- abs(round(pd.to_numeric(pupilDiff[4]).min(),1)),
362
- abs(round(pd.to_numeric(pupilDiff[2]).max(),1))
363
- ]}
364
- ExteremeDistanceLeftEye = pd.DataFrame(data=d)
365
-
366
- d = {'direction': ['Right', 'Left', 'Up', 'Down'],
367
- 'mm': [abs(round(pd.to_numeric(pupilDiff[5]).max(), 1)),
368
- abs(round(pd.to_numeric(pupilDiff[7]).min(), 1)),
369
- abs(round(pd.to_numeric(pupilDiff[8]).min(), 1)),
370
- abs(round(pd.to_numeric(pupilDiff[6]).max(), 1))
371
- ]}
372
- ExteremeDistanceRightEye = pd.DataFrame(data=d)
373
-
374
-
375
-
376
- print() #.idxmax(axis=0))
377
- # Upmost buttom limbus
378
- #
379
- return ExteremeDistanceLeftEye, ExteremeDistanceRightEye, ExtermeImageLeftEye, ExtermeImageRightEye, fig1, fig2 # lines
380
-
381
-
382
- with gr.Blocks() as demo:
383
- gr.Markdown(
384
- """
385
- # Range of Motion Video Analysis
386
- Capture a video of the following looks: stright, left, right, up & down
387
- """)
388
- video1 = gr.Video()#source="webcam")
389
-
390
- b = gr.Button("Analyze Video")
391
-
392
- gr.Markdown(
393
- """
394
- # Left eye results (in mm):
395
- """)
396
- LeftEyeGallery = gr.Gallery(
397
- label="Left eye", show_label=False, elem_id="left_eye_gallery"
398
- ).style(grid=[4], height="auto")
399
- movementDataLeft = gr.Dataframe(ExteremeDistanceLeftEye)
400
-
401
-
402
- gr.Markdown(
403
- """
404
- # Right eye results (in mm):
405
- """)
406
- RightEyeGallery = gr.Gallery(
407
- label="Right eye", show_label=False, elem_id="right_eye_gallery"
408
- ).style(grid=[4], height="auto")
409
- movementDataRight = gr.Dataframe(ExteremeDistanceRightEye)
410
-
411
- plot1 = gr.Plot(label="Plot1")
412
-
413
- plot2 = gr.Plot(label="Plot2")
414
-
415
- out = [movementDataLeft, movementDataRight, LeftEyeGallery, RightEyeGallery, plot1, plot2]
416
- b.click(fn=handleVideo, inputs=video1, outputs=out)
417
-
418
- demo.launch()
419
-
420
-
421
-
422
-
423
-
424
-