import gradio as gr import os import cv2 import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from sklearn.metrics.pairwise import cosine_similarity import matplotlib.patches as patches from facenet_pytorch import InceptionResnetV1, MTCNN import mtcnn import torch import shutil from PIL import Image import ssl ssl._create_default_https_context = ssl._create_unverified_context # Current directory abspath = os.path.abspath(__file__) dname = os.path.dirname(abspath) os.chdir(dname) def save_uploaded_files(uploaded_file_paths, folder): if not os.path.exists(folder): os.makedirs(folder) else: shutil.rmtree(folder, ignore_errors=True) os.makedirs(folder) for uploaded_file_path in uploaded_file_paths: shutil.move(uploaded_file_path, os.path.join(folder, os.path.basename(uploaded_file_path))) def face_detection(img, threshold=0.9, return_coords=True): # Detection detector = mtcnn.MTCNN() faces = detector.detect_faces(img) # Create a list to store face coordinates and cropped faces face_list = [] face_coords = [] # Draw bounding boxes and save each face as a separate image for i, face in enumerate(faces): if face['confidence']>= threshold: x, y, width, height = face['box'] # Append face coordinates to the list face_coords.append((x, y, width, height)) # Create a rectangle patch rect = patches.Rectangle((x, y), width, height, linewidth=2, edgecolor='orange', facecolor='none') # Add the rectangle to the plot plt.gca().add_patch(rect) # Crop the face and append to the list face_img = img[y:y+height, x:x+width] face_list.append(face_img) else: continue if return_coords: return face_list, face_coords else: return face_list def generate_combined_reference(): image_paths = [] for image in os.listdir("reference"): image_paths.append("reference/" + image) # Open each image and resize or pad to the dimensions of the largest image max_width, max_height = 0, 0 images_resized = [] for path in image_paths: image = Image.open(path) width, height = image.size max_width = max(max_width, width) max_height = max(max_height, height) images_resized.append(image.resize((max_width, max_height))) # Create a new blank image with the combined width and the total height combined_image = Image.new("RGB", (max_width * len(images_resized), max_height)) # Paste each image into the combined image for i, image in enumerate(images_resized): combined_image.paste(image, (i * max_width, 0)) # Save the combined image combined_image.save("combined_reference.jpg") return "Created combined reference image." def img_to_encoding(img): model = InceptionResnetV1(pretrained='vggface2').eval() img = cv2.resize(img, (160, 160)) img = np.expand_dims(img, axis = 0) img = img / 255.0 # Normalize pixel values img = torch.tensor(img.transpose(0, 3, 1, 2), dtype = torch.float32) # Adjust image format for PyTorch encoding = model(img) return encoding.flatten().detach().numpy() def process_image(): # Load group photo in "testing" folder group_photo_path = "testing/" group_photo = None for image in os.listdir(group_photo_path): group_photo = cv2.imread(group_photo_path + image) break if group_photo is None: return "No image found in testing folder." elif len(os.listdir(group_photo_path)) > 1: return "Can only process one image at a time." # Face detection group_photo_faces, group_photo_face_coords = face_detection(group_photo) # Generate reference image & do face detection generate_combined_reference() reference_photo = plt.imread("combined_reference.jpg") reference_faces = face_detection(reference_photo, threshold=0.9, return_coords=False) # Convert the reference faces & group photo into 128 dimensional vector ref_encodings = [img_to_encoding(face) for face in reference_faces] face_encodings = [img_to_encoding(face) for face in group_photo_faces] # Calculate cosine similarity between each face in the group photo and each reference face similarities = cosine_similarity(ref_encodings, face_encodings) # compute the average similarity for each face in face_list across all reference faces average_similarities = np.mean(similarities, axis=0) # Blur the face with the highest average similarity in group photo max_avg_similarity_idx = np.argmax(average_similarities) # Coordinates of the face with the highest average similarity (x, y, w, h) = group_photo_face_coords[max_avg_similarity_idx] # Blur the corresponding region in group photo img_obstruct = cv2.imread(group_photo_path + image) if max_avg_similarity_idx >= 0.3: img_obstruct[y:y+h, x:x+w] = cv2.blur(img_obstruct[y:y+h, x:x+w], (50, 50)) img_obstruct = cv2.cvtColor(img_obstruct, cv2.COLOR_BGR2RGB) else: img_obstruct = cv2.cvtColor(img_obstruct, cv2.COLOR_BGR2RGB) # Delete all photos in reference and testing folder after processing shutil.rmtree('reference', ignore_errors=True) shutil.rmtree('testing', ignore_errors=True) return img_obstruct # Setting up Gradio Interface def interface_fn(reference_images, group_photo): save_uploaded_files(reference_images, 'reference') save_uploaded_files(group_photo, 'testing') return process_image() iface = gr.Interface( fn=interface_fn, inputs=[ gr.File(file_types=["image"], file_count="multiple"), gr.File(file_types=["image"], file_count="multiple") ], outputs=gr.Image(), title="HideMe: MTCNN and VGGFace2-Powered Facial Anonymization in Shared Imagery", description="Secure Your Anonymity with Ease! Built for TSDN 2023, this tool offers smart, automated facial blurring for immediate privacy in shared images. Kindly read the **instructions** below the input boxes to understand how the machine works.", article = """ **Step 1**: Upload Self Photos for Reference
Provide clear, front-facing selfies as reference images — the more, the better. Including photos from different angles improves accuracy.
**Step 2**: Add a Group Photo
Upload the group photo where you wish to anonymize your face.
**Step 3**: Receive Your Anonymized Image
Our system will process the images and return the group photo with your face skillfully blurred, ensuring your privacy is maintained. """ ) if __name__ == "__main__": iface.launch()