Spaces:
Running
Running
File size: 3,997 Bytes
83b4101 cdcfead 0c5cb38 7c6362c 8529105 83b4101 8529105 cdcfead 83b4101 e76c1b2 cdcfead e76c1b2 3c61106 e76c1b2 3c61106 e76c1b2 3c61106 e76c1b2 9af0a64 e76c1b2 3c61106 e76c1b2 3c61106 8652b05 3c61106 d09e0d9 8652b05 e76c1b2 3c61106 e76c1b2 d09e0d9 e76c1b2 d09e0d9 cdcfead 0c5cb38 cdcfead d09e0d9 0c5cb38 e76c1b2 7c6362c 0a514d8 e76c1b2 3c61106 d09e0d9 f6517b9 e76c1b2 0a514d8 07f158a f6517b9 0a514d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import gradio as gr
from ultralytics import YOLO
import numpy as np
from PIL import Image, ImageDraw, ImageFilter, ImageOps
import torchvision.transforms
import torch
transform = torchvision.transforms.ToPILImage()
seg_model = YOLO("yolov8m-seg.pt")
lp_model = YOLO("yolov8m_lp.pt")
def detect(image):
seg_result = seg_model(image, device="CPU")[0]
seg_masks = seg_result.masks.data
seg_clss = seg_result.boxes.cls
seg_boxes = seg_result.boxes.data
person_indices = torch.where(seg_clss == 0)
person_masks = seg_masks[person_indices]
people_mask = torch.any(person_masks, dim=0).to(torch.uint8) * 255
people_mask = transform(~people_mask)
people_mask = people_mask.resize((image.width, image.height), resample=Image.Resampling.BILINEAR)
vehicle_classes = [2, 3, 5, 7] # Classes: car (2), motorcycle (3), bus (5) and truck (7)
license_plates = list()
vehicle_boxes = []
for seg_box in seg_boxes:
if seg_box[5] in vehicle_classes:
vehicle_box = seg_box[:4].to(torch.int32)
vehicle_boxes.append(vehicle_box)
vehicle_crop = image.crop(vehicle_box.tolist())
imgsz = (vehicle_crop.height, vehicle_crop.width) if vehicle_crop.width < 640 and vehicle_crop.height < 640 else (640, 640)
lp_result = lp_model(vehicle_crop, imgsz=imgsz, device="cpu")[0]
lp_boxes = lp_result.boxes.data[:, :4]
vehicle_offset = torch.cat((vehicle_box[:2], vehicle_box[:2]))
for lp_box in lp_boxes:
license_plates.append(torch.add(lp_box, vehicle_offset))
lp_mask = Image.new(mode="L", size=image.size, color=255)
lp_draw = ImageDraw.Draw(lp_mask)
for license_plate in license_plates:
lp_draw.rectangle(license_plate.tolist(), fill = 0)
vehicle_mask = Image.new(mode="L", size=image.size, color=255)
vehicle_draw = ImageDraw.Draw(vehicle_mask)
for vehicle_box in vehicle_boxes:
vehicle_draw.rectangle(vehicle_box.tolist(), outline = 0, width=5)
person_box_mask = Image.new(mode="L", size=image.size, color=255)
person_box_draw = ImageDraw.Draw(person_box_mask)
for person_box in seg_boxes[person_indices][:, :4]:
person_box_draw.rectangle(person_box.tolist(), outline = 0, width=5)
#TODO: move combination to caller function
combined_mask = Image.fromarray(np.minimum.reduce([np.array(m) for m in [people_mask, lp_mask]]))
return combined_mask, people_mask, lp_mask, vehicle_mask, person_box_mask
def test_comb(image):
mask, people_mask, lp_mask, vm, pbm = detect(image)
blurred = image.filter(ImageFilter.GaussianBlur(30))
anonymized = Image.composite(image, blurred, mask)
## TODO: Tempfile statt einem generischen File
anonymized.save("anon.JPG")
box_list = [(1 - np.asarray(pbm) / 255, "Person (Rahmen)"), (1 - np.asarray(vm) / 255, "Fahrzeug")]
anon_list = [(1 - np.asarray(people_mask) / 255, "Person (Umriss)"), (1 - np.asarray(lp_mask) / 255, "Kennzeichen")]
return "anon.JPG", (image, box_list), (image, anon_list)
css = """
P { text-align: center }
H3 { text-align: center }
"""
description = """
### ML-Prototyp zur Anonymisierung von Bildern
Es werden Personen sowie Kennzeichen zensiert.
Große Bilder können einige Zeit benötigen.
"""
article = """
Nutzt YOLOv8-Modelle zur Erkennung / Segmentierung der Bilder.
Code: https://huggingface.co/spaces/it-at-m/image-anonymizer/tree/main
Ein Prototyp des it@M InnovationLab (itm.innolab@muenchen.de)
"""
demo = gr.Interface(
fn=test_comb,
inputs=gr.Image(type="pil", label="Zu anonymisierendes Bild"),
outputs=[gr.Image(label="Anonymisiertes Bild"), gr.AnnotatedImage(label="Detektierte Umrisse"), gr.AnnotatedImage(label="Erkannte Objekte")],
title="Bild auswählen / hochladen",
allow_flagging="never",
examples="examples",
description=description,
article=article,
css=css,
theme=gr.themes.Soft()
)
demo.launch()
|