freinold's picture
Changes for gradio 4.0 image component
0a514d8
raw
history blame
3.59 kB
import gradio as gr
from ultralytics import YOLO
import numpy as np
from PIL import Image, ImageDraw, ImageFilter, ImageOps
import torchvision.transforms
import torch
transform = torchvision.transforms.ToPILImage()
seg_model = YOLO("yolov8m-seg.pt")
lp_model = YOLO("yolov8m_lp.pt")
def detect(image):
seg_result = seg_model(image, device="CPU")[0]
seg_masks = seg_result.masks.data
seg_clss = seg_result.boxes.cls
seg_boxes = seg_result.boxes.data
person_indices = torch.where(seg_clss == 0)
person_masks = seg_masks[person_indices]
people_mask = torch.any(person_masks, dim=0).to(torch.uint8) * 255
people_mask = transform(~people_mask)
people_mask = people_mask.resize((image.width, image.height), resample=Image.Resampling.BILINEAR)
vehicle_classes = [2, 3, 5, 7] # Classes: car (2), motorcycle (3), bus (5) and truck (7)
license_plates = list()
vehicle_boxes = []
for seg_box in seg_boxes:
if seg_box[5] in vehicle_classes:
vehicle_box = seg_box[:4].to(torch.int32)
vehicle_boxes.append(vehicle_box)
vehicle_crop = image.crop(vehicle_box.tolist())
imgsz = (vehicle_crop.height, vehicle_crop.width) if vehicle_crop.width < 640 and vehicle_crop.height < 640 else (640, 640)
lp_result = lp_model(vehicle_crop, imgsz=imgsz, device="cpu")[0]
lp_boxes = lp_result.boxes.data[:, :4]
vehicle_offset = torch.cat((vehicle_box[:2], vehicle_box[:2]))
for lp_box in lp_boxes:
license_plates.append(torch.add(lp_box, vehicle_offset))
lp_mask = Image.new(mode="L", size=image.size, color=255)
lp_draw = ImageDraw.Draw(lp_mask)
for license_plate in license_plates:
lp_draw.rectangle(license_plate.tolist(), fill = 0)
vehicle_mask = Image.new(mode="L", size=image.size, color=255)
vehicle_draw = ImageDraw.Draw(vehicle_mask)
for vehicle_box in vehicle_boxes:
vehicle_draw.rectangle(vehicle_box.tolist(), outline = 0, width=5)
#TODO: move combination to caller function
combined_mask = Image.fromarray(np.minimum.reduce([np.array(m) for m in [people_mask, lp_mask]]))
return combined_mask, people_mask, lp_mask, vehicle_mask
def test_comb(image):
mask, people_mask, lp_mask, vm = detect(image)
blurred = image.filter(ImageFilter.GaussianBlur(30))
anonymized = Image.composite(image, blurred, mask)
## TODO: Tempfile statt einem generischen File
anonymized.save("anon.JPG")
annotation_list = [(1 - np.asarray(people_mask) / 255, "Person"), (1 - np.asarray(vm) / 255, "Fahrzeug"), (1 - np.asarray(lp_mask) / 255, "Kennzeichen")]
return "anon.JPG", (image, annotation_list)
css = """
P { text-align: center }
H3 { text-align: center }
"""
description = """
### ML-Prototyp zur Anonymisierung von Bildern
Es werden Personen sowie Kennzeichen zensiert.
Große Bilder können einige Zeit benötigen.
"""
article = """
Nutzt YOLOv8-Modelle zur Erkennung / Segmentierung der Bilder.
Code: https://huggingface.co/spaces/it-at-m/image-anonymizer/tree/main
Ein Prototyp des it@M InnovationLab (itm.innolab@muenchen.de)
"""
demo = gr.Interface(
fn=test_comb,
inputs=gr.Image(type="pil", label="Zu anonymisierendes Bild"),
outputs=[gr.Image(label="Anonymisiertes Bild"), gr.AnnotatedImage(label="Erkannte Regionen")],
title="Bild auswählen / hochladen",
allow_flagging="never",
examples="examples",
description=description,
article=article,
css=css,
theme=gr.Themes.Soft()
)
demo.launch()