EyeCareXV001 / app.py
Ziv Pollak
open eyes detector
506fb74
raw
history blame
No virus
16.4 kB
import gradio as gr
import mediapipe as mp
import cv2
import pandas as pd
from statistics import mean, stdev
import numpy as np
# Run simple face mesh
mp_face_mesh = mp.solutions.face_mesh
mp_drawing = mp.solutions.drawing_utils
drawing_spec = mp_drawing.DrawingSpec(thickness=2, circle_radius=3)
global pupilLocation, movementLeft, movementRight
pupilLocation = pd.DataFrame()
movementLeft = pd.DataFrame(index=['Up', 'Center', 'Down'], columns=['Left', 'Center', 'Right'])
movementRight = pd.DataFrame(index=['Up', 'Center', 'Down'], columns=['Left', 'Center', 'Right'])
# TO DO:
# 1. Calibration screen
def isEyeOpen(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
hist = cv2.calcHist([image], [0], None, [256], [0, 256])
colors = np.where(hist > 10)
return np.mean(colors)
if np.mean(colors) < 15:
return True
else:
return False
def findIris(input_img1, input_img2, input_img3, input_img4, input_img5):
global pupilLocation
pupilLocation = pd.DataFrame() # Make sure it is empty
images = [input_img1, input_img2, input_img3, input_img4, input_img5]
output_images = []
pupil_sizes = []
with mp_face_mesh.FaceMesh(max_num_faces=1, refine_landmarks=True,
static_image_mode=True,
min_detection_confidence=0.45) as face_mesh:
#for image in images:
for id, image in enumerate(images):
if image is None:
continue
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if not results.multi_face_landmarks:
continue
annotated_image = image.copy()
for face_landmarks in results.multi_face_landmarks:
height, width, _ = annotated_image.shape
nose = [int(face_landmarks.landmark[168].x * width), int(face_landmarks.landmark[168].y * height)]
cv2.circle(annotated_image, (nose[0], nose[1]), 3, (0, 0, 255), -1)
leftIrisPoints = [474, 475, 476, 477]
rightIrisPoints = [469, 470, 471, 472]
# right, top, left, bottom
left_iris = []
for p in leftIrisPoints:
point = [int(face_landmarks.landmark[p].x * width), int(face_landmarks.landmark[p].y * height)]
left_iris.append(point)
#cv2.circle(annotated_image, point, 1, (255, 0, 255), 2)
right_iris = []
for p in rightIrisPoints:
point = [int(face_landmarks.landmark[p].x * width), int(face_landmarks.landmark[p].y * height)]
right_iris.append(point)
#cv2.circle(annotated_image, point, 1, (255, 0, 255), 2)
leftIris_leftside = (int(left_iris[2][0]), int(left_iris[2][1]))
leftIris_rightside = (int(left_iris[0][0]), int(left_iris[0][1]))
leftIris_top = (int(left_iris[1][0]), int(left_iris[1][1]))
leftIris_bottom = (int(left_iris[3][0]), int(left_iris[3][1]))
rightIris_leftside = (int(right_iris[2][0]), int(right_iris[2][1]))
rightIris_rightside = (int(right_iris[0][0]), int(right_iris[0][1]))
rightIris_top = (int(right_iris[1][0]), int(right_iris[1][1]))
rightIris_bottom = (int(right_iris[3][0]), int(right_iris[3][1]))
sizeIncrease = 0
leftEye = annotated_image[leftIris_top[1] - sizeIncrease : leftIris_bottom[1] + sizeIncrease,
leftIris_leftside[0] - sizeIncrease : leftIris_rightside[0] + sizeIncrease]
leftEyeOpen = isEyeOpen (leftEye)
rightEye = annotated_image[rightIris_top[1] - sizeIncrease: rightIris_bottom[1] + sizeIncrease,
rightIris_leftside[0] - sizeIncrease: rightIris_rightside[0] + sizeIncrease]
rightEyeOpen = isEyeOpen(rightEye)
'''
cv2.putText(annotated_image, "Left " + str(int(leftEyeOpen)),
(rightIris_leftside[0] - 20, leftIris_top[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 0), 1, cv2.LINE_AA)
'''
if (leftEyeOpen < 15):
cv2.putText(annotated_image, "Left Open " + str(leftEyeOpen),
(rightIris_leftside[0] - 20, leftIris_top[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 0), 1, cv2.LINE_AA)
else:
cv2.putText(annotated_image, "Left Closed " + str(leftEyeOpen),
(rightIris_leftside[0] - 20, leftIris_top[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 0), 1, cv2.LINE_AA)
'''
cv2.putText(annotated_image, "Right " + str(int(rightEyeOpen)),
(rightIris_leftside[0] - 20, rightIris_top[1] + 50), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 0), 1, cv2.LINE_AA)
'''
if (rightEyeOpen < 15):
cv2.putText(annotated_image, "Right Open " + str(rightEyeOpen),
(rightIris_leftside[0] - 20, rightIris_top[1] + 50), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 0), 1, cv2.LINE_AA)
else:
cv2.putText(annotated_image, "Right Closed " + str(rightEyeOpen),
(rightIris_leftside[0] - 20, rightIris_top[1] + 50), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 0), 1, cv2.LINE_AA)
#leftEye = cv2.cvtColor(leftEye, cv2.COLOR_BGR2GRAY)
'''
'''
cv2.circle(annotated_image,
(int((leftIris_leftside[0] + leftIris_rightside[0]) / 2),
int((leftIris_top[1] + leftIris_bottom[1]) / 2)),
# int(abs(leftIris_leftside[0] - leftIris_rightside[0])/2
1,
(0, 255, 255), 2)
cv2.circle(annotated_image,
(int((rightIris_leftside[0] + rightIris_rightside[0]) / 2),
int((rightIris_top[1] + rightIris_bottom[1]) / 2)),
# int(abs(rightIris_leftside[0] - rightIris_rightside[0]) / 2
1,
(0, 255, 255), 2)
name = 'TBD'
newRow = pd.Series([name,
leftIris_leftside[0] - nose[0],
leftIris_top[1] - nose[1],
leftIris_rightside[0] - nose[0],
leftIris_bottom[1] - nose[1],
rightIris_leftside[0] - nose[0],
rightIris_top[1] - nose[1],
rightIris_rightside[0] - nose[0],
rightIris_bottom[1] - nose[1]
])
newRow = newRow.to_frame().T
pupilLocation = pd.concat([pupilLocation, newRow], axis=0, ignore_index=True)
#print("Inside pupil Location = ", pupilLocation)
#filename = directoy_name + 'Analysis/' + name[0:-4] + '-analysis.jpg'
#cv2.imwrite(filename, annotated_image)
left = leftIris_leftside[0] - 150
right = rightIris_rightside[0] + 150 + 150
up = leftIris_top[1] - 50
down = leftIris_bottom[1] + 50
boundryIncrease = 10
print("leftside = ", leftIris_leftside[0])
print("rightside =", leftIris_rightside[0])
#print("left = ", left)
#print("right = ", right)
'''
if (id == 1): # Looking Left so we want the right limbus
print("Id == 1")
annotated_image = annotated_image[
leftIris_top[1] : leftIris_bottom[1],
leftIris_rightside[0] - boundryIncrease : leftIris_rightside[0] + boundryIncrease
]
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2GRAY)
#annotated_image = cv2.GaussianBlur(annotated_image, (3, 3), sigmaX=0, sigmaY=0)
annotated_image = cv2.Canny(annotated_image, 50, 125)
#annotated_image = cv2.Sobel(src=annotated_image, ddepth=cv2.CV_64F, dx=0, dy=1,
# ksize=5) # Sobel Edge Detection on the Y axis
contours = cv2.findContours(image, cv2.RETR_LIST, cv2.RETR_EXTERNAL)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
for i in range(len(contours)):
contour = contours[i]
random.seed(i)
color = (255 * random.random(), 255 * random.random(), 255 * random.random())
cv2.drawContours(img, [contour], -1, color, 3)
elif (id == 2): # Looking Right so we want the left limbus
annotated_image = annotated_image[
leftIris_top[1]: leftIris_bottom[1],
leftIris_leftside[0] - boundryIncrease: leftIris_leftside[0] + boundryIncrease
]
annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2GRAY)
# annotated_image = cv2.GaussianBlur(annotated_image, (3, 3), sigmaX=0, sigmaY=0)
annotated_image = cv2.Canny(annotated_image, 50, 125)
else:
'''
annotated_image = annotated_image[up:down, left:right]
x1 = (leftIris_leftside[0] - nose[0] + leftIris_rightside[0] - nose[0]) / 2
y1 = (leftIris_top[1] - nose[1] + leftIris_bottom[1] - nose[1]) / 2
x2 = (rightIris_leftside[0] - nose[0] + rightIris_rightside[0] - nose[0]) / 2
y2 = (rightIris_top[1] - nose[1] + rightIris_bottom[1] - nose[1]) / 2
print("Slope=", (y2 - y1) / (x2 - x1))
text = "Slope=" + str(round((y2 - y1) / (x2 - x1), 2))
#cv2.putText(annotated_image, text,
# (5, 110), cv2.FONT_HERSHEY_SIMPLEX,
# 1, (255, 255, 0), 1, cv2.LINE_AA)
print("left iris size in pixels = ", abs(leftIris_leftside[0] - leftIris_rightside[0]))
print("Right iris size in pixels = ", abs(rightIris_leftside[0] - rightIris_rightside[0]))
pupil_sizes.append(abs(leftIris_leftside[0] - leftIris_rightside[0]))
pupil_sizes.append(abs(rightIris_leftside[0] - rightIris_rightside[0]))
output_images.append(annotated_image)
# calculate final results from pupilLocations
pupilDiff = pupilLocation.copy()
pupilDiff = pupilDiff.drop(pupilDiff.columns[0], axis=1) # Remove file name
for i in range(pupilDiff.shape[0] - 1): # Calculate deltas
pupilDiff.loc[i + 1] = abs(pupilDiff.loc[i + 1] - pupilDiff.loc[0])
print("pupilDiff=", pupilDiff)
pupilDiff = pupilDiff.drop(0, axis=0) # Remove first row was was used as reference row
#print("pupilDiff (in pixels)=", pupilDiff)
# Find average pupil size
pupil_sizes.remove(max(pupil_sizes))
pupil_sizes.remove(min(pupil_sizes))
pupil_average = mean(pupil_sizes) # this should be 11.7 mm
pixels = 11.7 / pupil_average
print("pixels (In MM) = ", pixels)
# Left Eye movement
movementLeft.iloc[0, 0] = ' '
movementLeft.iloc[0, 2] = ' '
movementLeft.iloc[1, 1] = 0 # reference point
movementLeft.iloc[2, 0] = ' '
movementLeft.iloc[2, 2] = ' '
# Y movement only
movementLeft.iloc[0, 1] = round(abs(pupilLocation.iloc[0, 4] - pupilLocation.iloc[1, 4]) * pixels, 0) # Up
movementLeft.iloc[2, 1] = round(abs(pupilLocation.iloc[0, 2] - pupilLocation.iloc[3, 2]) * pixels, 0) # Down
# X movement only
movementLeft.iloc[1, 0] = round(abs(pupilLocation.iloc[0, 3] - pupilLocation.iloc[1, 3]) * pixels, 1) # Left
movementLeft.iloc[1, 2] = round(abs(pupilLocation.iloc[0, 1] - pupilLocation.iloc[2, 1]) * pixels, 1) # Right
# Right Eye Movement
movementRight.iloc[0, 0] = ' '
movementRight.iloc[0, 2] = ' '
movementRight.iloc[1, 1] = 0 # reference point
movementRight.iloc[2, 0] = ' '
movementRight.iloc[2, 2] = ' '
# Y movement only
movementRight.iloc[0, 1] = round(abs(pupilLocation.iloc[0, 8] - pupilLocation.iloc[1, 8]) * pixels, 0) # Up
movementRight.iloc[2, 1] = round(abs(pupilLocation.iloc[0, 6] - pupilLocation.iloc[3, 6]) * pixels, 0) # Down
# X movement only
movementRight.iloc[1, 0] = round(abs(pupilLocation.iloc[0, 7] - pupilLocation.iloc[1, 7]) * pixels, 0) # Left
movementRight.iloc[1, 2] = round(abs(pupilLocation.iloc[0, 5] - pupilLocation.iloc[2, 5]) * pixels, 0) # Right
return output_images[0], output_images[1], output_images[2], output_images[3], output_images[4], pupilLocation, movementLeft, movementRight
with gr.Blocks() as demo:
gr.Markdown(
"""
# Range of Motion Image Analysis
Take 5 pictures below looking stright, left, right, up & down
""")
with gr.Row():
with gr.Column(scale=1):
img1 = gr.Image(shape=(1000, 1000), source='webcam', label='Front')
with gr.Column(scale=1):
out1 = gr.Image(label='Out-Front')
with gr.Row():
with gr.Column(scale=1):
img2 = gr.Image(shape=(1000, 1000), source='webcam', label='Left')
with gr.Column(scale=1):
out2 = gr.Image(label='Out-Left')
with gr.Row():
with gr.Column(scale=1):
img3 = gr.Image(shape=(1000, 1000), source='webcam', label='Right')
with gr.Column(scale=1):
out3 = gr.Image(label='Out-Right')
with gr.Row():
with gr.Column(scale=1):
img4 = gr.Image(shape=(1000, 1000), source='webcam', label='Up')
with gr.Column(scale=1):
out4 = gr.Image(label='Out-Up')
with gr.Row():
with gr.Column(scale=1):
img5 = gr.Image(shape=(1000, 1000), source='webcam', label='Down')
with gr.Column(scale=1):
out5 = gr.Image(label='Down-Right')
b = gr.Button("Go!")
gr.Markdown(
"""
Pupil Locations:
""")
pupilData = gr.Dataframe(pupilLocation)
gr.Markdown(
"""
# Left eye results (in mm):
""")
movementDataLeft = gr.Dataframe(movementLeft)
gr.Markdown(
"""
# Right eye results (in mm):
""")
movementDataRight = gr.Dataframe(movementRight)
inp = [img1, img2, img3, img4, img5]
out = [out1, out2, out3, out4, out5, pupilData, movementDataLeft, movementDataRight]
b.click(fn=findIris, inputs=inp, outputs=out)
demo.launch(auth=("Andrew", "Andrew")) #, share=True