Spaces:
Running
Running
new YoloV8 Models.
Browse files- app.py +40 -41
- requirements.txt +2 -2
- yolov8m-seg.pt +3 -0
- yolov8m_lp.pt +3 -0
app.py
CHANGED
@@ -1,32 +1,34 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
import numpy as np
|
3 |
from PIL import Image, ImageDraw, ImageFilter, ImageOps
|
4 |
from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation, pipeline
|
5 |
import torchvision.transforms
|
6 |
import torch
|
7 |
|
8 |
-
|
9 |
-
# pipeline = pipeline(task="image-segmentation", model="shi-labs/oneformer_cityscapes_swin_large", label_ids_to_fuse=[11])
|
10 |
-
|
11 |
-
person_processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
|
12 |
-
person_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
|
13 |
transform = torchvision.transforms.ToPILImage()
|
|
|
|
|
14 |
|
15 |
def detect_person(image: Image):
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
30 |
|
31 |
|
32 |
def detect_dummy(image: Image):
|
@@ -35,40 +37,37 @@ def detect_dummy(image: Image):
|
|
35 |
|
36 |
detectors = {
|
37 |
"Person": detect_person,
|
38 |
-
|
39 |
}
|
40 |
|
41 |
def test_gradio(image):
|
42 |
-
masks = [detect_person(image)]
|
43 |
combined = np.minimum.reduce([np.array(m) for m in masks])
|
44 |
mask = Image.fromarray(combined)
|
45 |
# Apply blur through mask
|
46 |
-
blurred = image.filter(ImageFilter.GaussianBlur(
|
47 |
anonymized = Image.composite(image, blurred, mask)
|
48 |
-
|
|
|
|
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
)
|
56 |
|
57 |
demo_upload = gr.Interface(
|
58 |
fn=test_gradio,
|
59 |
-
inputs=gr.Image(type="pil"
|
60 |
-
outputs=gr.Image(
|
61 |
-
)
|
62 |
-
|
63 |
-
demo = gr.TabbedInterface(
|
64 |
-
interface_list=[demo_live, demo_upload],
|
65 |
-
tab_names=["Webcam", "Bild hochladen"],
|
66 |
-
title="Image Anonymizer"
|
67 |
)
|
68 |
|
69 |
-
#
|
70 |
-
|
71 |
-
|
|
|
|
|
72 |
|
73 |
print(__name__)
|
74 |
-
|
|
|
1 |
import gradio as gr
|
2 |
+
from ultralytics import YOLO
|
3 |
import numpy as np
|
4 |
from PIL import Image, ImageDraw, ImageFilter, ImageOps
|
5 |
from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation, pipeline
|
6 |
import torchvision.transforms
|
7 |
import torch
|
8 |
|
|
|
|
|
|
|
|
|
|
|
9 |
transform = torchvision.transforms.ToPILImage()
|
10 |
+
seg_model = YOLO("yolov8m-seg.pt")
|
11 |
+
lp_model = YOLO("yolov8m_lp.pt")
|
12 |
|
13 |
def detect_person(image: Image):
|
14 |
+
result = seg_model(image, device="CPU")[0]
|
15 |
+
masks = result.masks.data
|
16 |
+
clss = result.boxes.cls
|
17 |
+
person_indices = torch.where(clss == 0)
|
18 |
+
person_masks = masks[person_indices]
|
19 |
+
people_mask = torch.any(person_masks, dim=0).to(torch.uint8) * 255
|
20 |
+
mask = transform(~people_mask)
|
21 |
+
mask = mask.resize((image.width, image.height), resample=Image.Resampling.BILINEAR)
|
22 |
+
return mask
|
23 |
+
|
24 |
+
def detect_license_plate(image: Image):
|
25 |
+
result = lp_model(image, imgsz=(image.height, image.width), device="cpu")[0]
|
26 |
+
boxes = result.boxes.data[:, :4]
|
27 |
+
mask = Image.new(mode="L", size=image.size, color=255)
|
28 |
+
draw = ImageDraw.Draw(mask)
|
29 |
+
for box in boxes:
|
30 |
+
draw.rectangle(list(box), fill=0)
|
31 |
+
return mask
|
32 |
|
33 |
|
34 |
def detect_dummy(image: Image):
|
|
|
37 |
|
38 |
detectors = {
|
39 |
"Person": detect_person,
|
40 |
+
"License Plate": detect_license_plate
|
41 |
}
|
42 |
|
43 |
def test_gradio(image):
|
44 |
+
masks = [detect_person(image), detect_license_plate(image)]
|
45 |
combined = np.minimum.reduce([np.array(m) for m in masks])
|
46 |
mask = Image.fromarray(combined)
|
47 |
# Apply blur through mask
|
48 |
+
blurred = image.filter(ImageFilter.GaussianBlur(30))
|
49 |
anonymized = Image.composite(image, blurred, mask)
|
50 |
+
## TODO: Tempfile statt einem generischen File
|
51 |
+
anonymized.save("anon.JPG")
|
52 |
+
return "anon.JPG"
|
53 |
|
54 |
+
# demo_live = gr.Interface(
|
55 |
+
# fn=test_gradio,
|
56 |
+
# inputs=gr.Image(source="webcam", type="pil", shape=(640, 480)),
|
57 |
+
# outputs=gr.Image(type="pil")
|
58 |
+
# )
|
|
|
59 |
|
60 |
demo_upload = gr.Interface(
|
61 |
fn=test_gradio,
|
62 |
+
inputs=gr.Image(type="pil"),
|
63 |
+
outputs=gr.Image()
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
)
|
65 |
|
66 |
+
# demo = gr.TabbedInterface(
|
67 |
+
# interface_list=[demo_live, demo_upload],
|
68 |
+
# tab_names=["Webcam", "Bild hochladen"],
|
69 |
+
# title="Image Anonymizer"
|
70 |
+
# )
|
71 |
|
72 |
print(__name__)
|
73 |
+
demo_upload.launch(server_name="localhost", server_port=8080)
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
|
2 |
-
|
3 |
Pillow
|
4 |
numpy
|
5 |
torch
|
|
|
1 |
+
gradio
|
2 |
+
ultralytics
|
3 |
Pillow
|
4 |
numpy
|
5 |
torch
|
yolov8m-seg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f9fc740ca0824e14b44681d491dc601efa664ec6ecea9a870acf876053826448
|
3 |
+
size 54899779
|
yolov8m_lp.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e57cd2006a46470b779990048e82cf1b29b2130d989a1ecdc5c610d48f19c8ca
|
3 |
+
size 52033983
|