Spaces:
Running
Running
import gradio as gr | |
import numpy as np | |
from PIL import Image, ImageDraw, ImageFilter, ImageOps | |
from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation, pipeline | |
import torchvision.transforms | |
import torch | |
# pipeline = pipeline(task="image-segmentation", model="shi-labs/oneformer_cityscapes_swin_large", label_ids_to_fuse=[11]) | |
person_processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_cityscapes_swin_large") | |
person_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_cityscapes_swin_large") | |
transform = torchvision.transforms.ToPILImage() | |
def detect_person(image: Image): | |
semantic_inputs = person_processor(images=image, task_inputs=["semantic"], return_tensors="pt") | |
semantic_outputs = person_model(**semantic_inputs) | |
predicted_semantic_map = person_processor.post_process_semantic_segmentation(semantic_outputs, target_sizes=[image.size[::-1]])[0] | |
mask = transform(predicted_semantic_map.to(torch.uint8)) | |
mask = Image.eval(mask, lambda x: 0 if x == 11 else 255) | |
return mask | |
# def detect_person_pipeline(): | |
# results = pipeline(image, label_ids_to_fuse=[11]) | |
# for detection in results: | |
# if detection["label"] == "person": | |
# return ImageOps.invert(detection["mask"]) | |
# return Image.new(mode="L", size=image.size, color=255) | |
def detect_dummy(image: Image): | |
return Image.new(mode="L", size=image.size, color=255) | |
detectors = { | |
"Person": detect_person, | |
# "License Plate": detect_license_plate | |
} | |
def test_gradio(image): | |
masks = [detect_person(image)] | |
combined = np.minimum.reduce([np.array(m) for m in masks]) | |
mask = Image.fromarray(combined) | |
# Apply blur through mask | |
blurred = image.filter(ImageFilter.GaussianBlur(15)) | |
anonymized = Image.composite(image, blurred, mask) | |
return anonymized | |
demo_live = gr.Interface( | |
fn=test_gradio, | |
inputs=gr.Image(source="webcam", type="pil", shape=(640, 480)), | |
outputs=gr.Image(type="pil") | |
) | |
demo_upload = gr.Interface( | |
fn=test_gradio, | |
inputs=gr.Image(type="pil", shape=(640, 480)), | |
outputs=gr.Image(type="pil") | |
) | |
demo = gr.TabbedInterface( | |
interface_list=[demo_live, demo_upload], | |
tab_names=["Webcam", "Bild hochladen"], | |
title="Image Anonymizer" | |
) | |
# if __name__ == "__main__": | |
# demo.launch() | |
# | |
print(__name__) | |
demo.launch(server_name="localhost", server_port=8080) | |