File size: 2,802 Bytes
83b4101
cdcfead
0c5cb38
7c6362c
8529105
 
83b4101
8529105
cdcfead
 
83b4101
e76c1b2
 
 
 
 
 
 
 
 
cdcfead
e76c1b2
 
 
 
 
 
 
 
 
 
9af0a64
 
e76c1b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cdcfead
0c5cb38
cdcfead
 
 
0c5cb38
e76c1b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c6362c
 
e76c1b2
 
cdcfead
e76c1b2
 
 
 
 
 
7c6362c
 
85e1065
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import gradio as gr
from ultralytics import YOLO
import numpy as np
from PIL import Image, ImageDraw, ImageFilter, ImageOps
import torchvision.transforms
import torch

transform = torchvision.transforms.ToPILImage()
seg_model = YOLO("yolov8m-seg.pt")
lp_model = YOLO("yolov8m_lp.pt")


def detect(image):
    seg_result = seg_model(image, device="CPU")[0]
    seg_masks = seg_result.masks.data
    seg_clss = seg_result.boxes.cls
    seg_boxes = seg_result.boxes.data

    person_indices = torch.where(seg_clss == 0)
    person_masks = seg_masks[person_indices]
    people_mask = torch.any(person_masks, dim=0).to(torch.uint8) * 255
    people_mask = transform(~people_mask)
    people_mask = people_mask.resize((image.width, image.height), resample=Image.Resampling.BILINEAR)

    vehicle_classes = [2, 3, 5, 7] 
    license_plates = list()

    for seg_box in seg_boxes:
        if seg_box[5] in vehicle_classes:
            vehicle_box = seg_box[:4].to(torch.int32)
            vehicle_crop = image.crop(vehicle_box.tolist())
            imgsz = (vehicle_crop.height, vehicle_crop.width) if vehicle_crop.width < 640 and vehicle_crop.height < 640 else (640, 640)
            lp_result = lp_model(vehicle_crop, imgsz=imgsz, device="cpu")[0]
            lp_boxes = lp_result.boxes.data[:, :4]
            vehicle_offset = torch.cat((vehicle_box[:2], vehicle_box[:2]))
            for lp_box in lp_boxes:
                license_plates.append(torch.add(lp_box, vehicle_offset))            

    lp_mask = Image.new(mode="L", size=image.size, color=255)
    draw = ImageDraw.Draw(lp_mask)

    for license_plate in license_plates:
        draw.rectangle(license_plate.tolist(), fill = 0)

    combined_mask = Image.fromarray(np.minimum.reduce([np.array(m) for m in [people_mask, lp_mask]]))
    return combined_mask


def test_comb(image):
    mask = detect(image)
    blurred = image.filter(ImageFilter.GaussianBlur(30))
    anonymized = Image.composite(image, blurred, mask)
    ## TODO: Tempfile statt einem generischen File
    anonymized.save("anon.JPG")
    return "anon.JPG"


css = """
P { text-align: center }
H3 { text-align: center }
"""

description = """
### ML-Prototyp zur Anonymisierung von Bildern
Es werden Personen sowie Kennzeichen zensiert.
Große Bilder können einige Zeit benötigen.
"""

article = """
Nutzt YOLOv8-Modelle zur Erkennung / Segmentierung der Bilder.

Code: https://huggingface.co/spaces/it-at-m/image-anonymizer/tree/main

Ein Prototyp des it@M InnovationLab (itm.innolab@muenchen.de)
"""

demo_upload = gr.Interface(
    title="Image Anonymizer",
    fn=test_comb, 
    inputs=gr.Image(type="pil"), 
    outputs=gr.Image(),
    allow_flagging="never",
    examples="examples",
    description=description,
    article=article,
    css=css
)

demo_upload.launch()