|
import gradio as gr |
|
import os |
|
import cv2 |
|
import numpy as np |
|
import matplotlib.pyplot as plt |
|
import tensorflow as tf |
|
from sklearn.metrics.pairwise import cosine_similarity |
|
|
|
import matplotlib.patches as patches |
|
from facenet_pytorch import InceptionResnetV1, MTCNN |
|
import mtcnn |
|
import torch |
|
import shutil |
|
from PIL import Image |
|
import ssl |
|
ssl._create_default_https_context = ssl._create_unverified_context |
|
|
|
|
|
abspath = os.path.abspath(__file__) |
|
dname = os.path.dirname(abspath) |
|
os.chdir(dname) |
|
|
|
def save_uploaded_files(uploaded_file_paths, folder): |
|
if not os.path.exists(folder): |
|
os.makedirs(folder) |
|
else: |
|
shutil.rmtree(folder, ignore_errors=True) |
|
os.makedirs(folder) |
|
|
|
for uploaded_file_path in uploaded_file_paths: |
|
shutil.move(uploaded_file_path, os.path.join(folder, os.path.basename(uploaded_file_path))) |
|
|
|
def face_detection(img, threshold=0.9, return_coords=True): |
|
|
|
detector = mtcnn.MTCNN() |
|
faces = detector.detect_faces(img) |
|
|
|
|
|
face_list = [] |
|
face_coords = [] |
|
|
|
|
|
for i, face in enumerate(faces): |
|
if face['confidence']>= threshold: |
|
x, y, width, height = face['box'] |
|
|
|
face_coords.append((x, y, width, height)) |
|
|
|
|
|
rect = patches.Rectangle((x, y), width, height, linewidth=2, edgecolor='orange', facecolor='none') |
|
|
|
|
|
plt.gca().add_patch(rect) |
|
|
|
|
|
face_img = img[y:y+height, x:x+width] |
|
face_list.append(face_img) |
|
else: |
|
continue |
|
|
|
if return_coords: |
|
return face_list, face_coords |
|
else: |
|
return face_list |
|
|
|
def generate_combined_reference(): |
|
image_paths = [] |
|
for image in os.listdir("reference"): |
|
image_paths.append("reference/" + image) |
|
|
|
|
|
max_width, max_height = 0, 0 |
|
images_resized = [] |
|
|
|
for path in image_paths: |
|
image = Image.open(path) |
|
width, height = image.size |
|
max_width = max(max_width, width) |
|
max_height = max(max_height, height) |
|
images_resized.append(image.resize((max_width, max_height))) |
|
|
|
|
|
combined_image = Image.new("RGB", (max_width * len(images_resized), max_height)) |
|
|
|
|
|
for i, image in enumerate(images_resized): |
|
combined_image.paste(image, (i * max_width, 0)) |
|
|
|
|
|
combined_image.save("combined_reference.jpg") |
|
|
|
return "Created combined reference image." |
|
|
|
def img_to_encoding(img): |
|
model = InceptionResnetV1(pretrained='vggface2').eval() |
|
img = cv2.resize(img, (160, 160)) |
|
img = np.expand_dims(img, axis = 0) |
|
img = img / 255.0 |
|
img = torch.tensor(img.transpose(0, 3, 1, 2), dtype = torch.float32) |
|
encoding = model(img) |
|
|
|
return encoding.flatten().detach().numpy() |
|
|
|
def process_image(): |
|
|
|
group_photo_path = "testing/" |
|
group_photo = None |
|
|
|
for image in os.listdir(group_photo_path): |
|
group_photo = cv2.imread(group_photo_path + image) |
|
break |
|
|
|
if group_photo is None: |
|
return "No image found in testing folder." |
|
elif len(os.listdir(group_photo_path)) > 1: |
|
return "Can only process one image at a time." |
|
|
|
|
|
group_photo_faces, group_photo_face_coords = face_detection(group_photo) |
|
|
|
|
|
generate_combined_reference() |
|
reference_photo = plt.imread("combined_reference.jpg") |
|
reference_faces = face_detection(reference_photo, threshold=0.9, return_coords=False) |
|
|
|
|
|
ref_encodings = [img_to_encoding(face) for face in reference_faces] |
|
face_encodings = [img_to_encoding(face) for face in group_photo_faces] |
|
|
|
|
|
similarities = cosine_similarity(ref_encodings, face_encodings) |
|
|
|
|
|
average_similarities = np.mean(similarities, axis=0) |
|
|
|
|
|
max_avg_similarity_idx = np.argmax(average_similarities) |
|
|
|
|
|
(x, y, w, h) = group_photo_face_coords[max_avg_similarity_idx] |
|
|
|
|
|
img_obstruct = cv2.imread(group_photo_path + image) |
|
|
|
if max_avg_similarity_idx >= 0.3: |
|
img_obstruct[y:y+h, x:x+w] = cv2.blur(img_obstruct[y:y+h, x:x+w], (50, 50)) |
|
img_obstruct = cv2.cvtColor(img_obstruct, cv2.COLOR_BGR2RGB) |
|
else: |
|
img_obstruct = cv2.cvtColor(img_obstruct, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
shutil.rmtree('reference', ignore_errors=True) |
|
shutil.rmtree('testing', ignore_errors=True) |
|
|
|
return img_obstruct |
|
|
|
|
|
def interface_fn(reference_images, group_photo): |
|
save_uploaded_files(reference_images, 'reference') |
|
save_uploaded_files(group_photo, 'testing') |
|
return process_image() |
|
|
|
iface = gr.Interface( |
|
fn=interface_fn, |
|
inputs=[ |
|
gr.File(file_types=["image"], file_count="multiple"), |
|
gr.File(file_types=["image"], file_count="multiple") |
|
], |
|
outputs=gr.Image(), |
|
title="HideMe: MTCNN and VGGFace2-Powered Facial Anonymization in Shared Imagery", |
|
description="Secure Your Anonymity with Ease! Built for TSDN 2023, this tool offers smart, automated facial blurring for immediate privacy in shared images. Kindly read the **instructions** below the input boxes to understand how the machine works.", |
|
article = |
|
""" |
|
**Step 1**: Upload Self Photos for Reference <br> |
|
Provide clear, front-facing selfies as reference images — the more, the better. Including photos from different angles improves accuracy. |
|
<br> |
|
**Step 2**: Add a Group Photo <br> |
|
Upload the group photo where you wish to anonymize your face. |
|
<br> |
|
**Step 3**: Receive Your Anonymized Image <br> |
|
Our system will process the images and return the group photo with your face skillfully blurred, ensuring your privacy is maintained. |
|
""" |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |
|
|