Spaces:
Running
Running
File size: 2,259 Bytes
83b4101 cdcfead 0c5cb38 7c6362c 8529105 83b4101 8529105 cdcfead 83b4101 0c5cb38 cdcfead 7c6362c 0c5cb38 cdcfead 0c5cb38 cdcfead 0c5cb38 cdcfead 0c5cb38 cdcfead 0c5cb38 cdcfead 7c6362c cdcfead 7c6362c cdcfead 7c6362c 85e1065 9ebd811 85e1065 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import gradio as gr
from ultralytics import YOLO
import numpy as np
from PIL import Image, ImageDraw, ImageFilter, ImageOps
import torchvision.transforms
import torch
transform = torchvision.transforms.ToPILImage()
seg_model = YOLO("yolov8m-seg.pt")
lp_model = YOLO("yolov8m_lp.pt")
def detect_person(image: Image):
result = seg_model(image, device="CPU")[0]
masks = result.masks.data
clss = result.boxes.cls
person_indices = torch.where(clss == 0)
person_masks = masks[person_indices]
people_mask = torch.any(person_masks, dim=0).to(torch.uint8) * 255
mask = transform(~people_mask)
mask = mask.resize((image.width, image.height), resample=Image.Resampling.BILINEAR)
return mask
def detect_license_plate(image: Image):
result = lp_model(image, imgsz=(image.height, image.width), device="cpu")[0]
boxes = result.boxes.data[:, :4]
mask = Image.new(mode="L", size=image.size, color=255)
draw = ImageDraw.Draw(mask)
for box in boxes:
draw.rectangle(list(box), fill=0)
return mask
def detect_dummy(image: Image):
return Image.new(mode="L", size=image.size, color=255)
detectors = {
"Person": detect_person,
"License Plate": detect_license_plate
}
def test_gradio(image):
masks = [detect_person(image), detect_license_plate(image)]
combined = np.minimum.reduce([np.array(m) for m in masks])
mask = Image.fromarray(combined)
# Apply blur through mask
blurred = image.filter(ImageFilter.GaussianBlur(30))
anonymized = Image.composite(image, blurred, mask)
## TODO: Tempfile statt einem generischen File
anonymized.save("anon.JPG")
return "anon.JPG"
# demo_live = gr.Interface(
# fn=test_gradio,
# inputs=gr.Image(source="webcam", type="pil", shape=(640, 480)),
# outputs=gr.Image(type="pil")
# )
demo_upload = gr.Interface(
fn=test_gradio,
inputs=gr.Image(type="pil"),
outputs=gr.Image()
)
# demo = gr.TabbedInterface(
# interface_list=[demo_live, demo_upload],
# tab_names=["Webcam", "Bild hochladen"],
# title="Image Anonymizer"
# )
# print(__name__)
# demo_upload.launch(server_name="localhost", server_port=8080)
# demo.launch(server_name="localhost", server_port=8080)
demo_upload.launch()
|