freinold commited on
Commit
7c6362c
1 Parent(s): d6054e5

Added tab for upload and resized image to ensure no runtime errors for Out of Memory.

Browse files
Files changed (1) hide show
  1. app.py +37 -20
app.py CHANGED
@@ -1,10 +1,13 @@
1
  import gradio as gr
2
  import numpy as np
3
- from PIL import Image, ImageDraw, ImageFilter
4
- from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
5
  import torchvision.transforms
6
  import torch
7
 
 
 
 
8
  person_processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
9
  person_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
10
  transform = torchvision.transforms.ToPILImage()
@@ -18,6 +21,14 @@ def detect_person(image: Image):
18
  return mask
19
 
20
 
 
 
 
 
 
 
 
 
21
  def detect_dummy(image: Image):
22
  return Image.new(mode="L", size=image.size, color=255)
23
 
@@ -27,21 +38,6 @@ detectors = {
27
  # "License Plate": detect_license_plate
28
  }
29
 
30
-
31
- def anonymize(path: str, detectors: list):
32
- # Read image
33
- image = Image.open(path)
34
- # Run requested detectors
35
- masks = [implemented_detectors.get(det, detect_dummy)(image) for det in detectors]
36
- # Combine masks
37
- combined = np.minimum.reduce([np.array(m) for m in masks])
38
- mask = Image.fromarray(combined)
39
- # Apply blur through mask
40
- blurred = image.filter(ImageFilter.GaussianBlur(15))
41
- anonymized = Image.composite(image, blurred, mask)
42
- return anonymized
43
-
44
-
45
  def test_gradio(image):
46
  masks = [detect_person(image)]
47
  combined = np.minimum.reduce([np.array(m) for m in masks])
@@ -52,6 +48,27 @@ def test_gradio(image):
52
  return anonymized
53
 
54
 
55
- demo = gr.Interface(fn=test_gradio, inputs=gr.Image(source="webcam", type="pil"), outputs=gr.Image(type="pil"))
56
- demo.launch()
57
- # demo.launch(server_name="localhost", server_port=8080)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
+ from PIL import Image, ImageDraw, ImageFilter, ImageOps
4
+ from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation, pipeline
5
  import torchvision.transforms
6
  import torch
7
 
8
+
9
+ # pipeline = pipeline(task="image-segmentation", model="shi-labs/oneformer_cityscapes_swin_large", label_ids_to_fuse=[11])
10
+
11
  person_processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
12
  person_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
13
  transform = torchvision.transforms.ToPILImage()
 
21
  return mask
22
 
23
 
24
+ # def detect_person_pipeline():
25
+ # results = pipeline(image, label_ids_to_fuse=[11])
26
+ # for detection in results:
27
+ # if detection["label"] == "person":
28
+ # return ImageOps.invert(detection["mask"])
29
+ # return Image.new(mode="L", size=image.size, color=255)
30
+
31
+
32
  def detect_dummy(image: Image):
33
  return Image.new(mode="L", size=image.size, color=255)
34
 
 
38
  # "License Plate": detect_license_plate
39
  }
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  def test_gradio(image):
42
  masks = [detect_person(image)]
43
  combined = np.minimum.reduce([np.array(m) for m in masks])
 
48
  return anonymized
49
 
50
 
51
+ demo_live = gr.Interface(
52
+ fn=test_gradio,
53
+ inputs=gr.Image(source="webcam", type="pil", shape=(640, 480)),
54
+ outputs=gr.Image(type="pil")
55
+ )
56
+
57
+ demo_upload = gr.Interface(
58
+ fn=test_gradio,
59
+ inputs=gr.Image(type="pil", shape=(640, 480)),
60
+ outputs=gr.Image(type="pil")
61
+ )
62
+
63
+ demo = gr.TabbedInterface(
64
+ interface_list=[demo_live, demo_upload],
65
+ tab_names=["Webcam", "Bild hochladen"],
66
+ title="Image Anonymizer"
67
+ )
68
+
69
+ # if __name__ == "__main__":
70
+ # demo.launch()
71
+ #
72
+
73
+ print(__name__)
74
+ demo.launch(server_name="localhost", server_port=8080)