it@M InnovationLab commited on
Commit
0c5cb38
1 Parent(s): 83b4101

First try at anonymizer app.

Browse files
Files changed (3) hide show
  1. README.md +3 -3
  2. app.py +69 -4
  3. requirements.txt +4 -0
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  title: Image Anonymizer
3
- emoji: 😻
4
- colorFrom: purple
5
- colorTo: red
6
  sdk: gradio
7
  sdk_version: 3.27.0
8
  app_file: app.py
 
1
  ---
2
  title: Image Anonymizer
3
+ emoji: 🕶
4
+ colorFrom: yellow
5
+ colorTo: black
6
  sdk: gradio
7
  sdk_version: 3.27.0
8
  app_file: app.py
app.py CHANGED
@@ -1,7 +1,72 @@
1
  import gradio as gr
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- interface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import yolov5
3
+ import numpy as np
4
+ from PIL import Image, ImageDraw, ImageFilter
5
+ from transformers import OneFormerProcessor, OneFormerForUniversalSegmentation
6
 
7
+ person_processor = OneFormerProcessor.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
8
+ person_model = OneFormerForUniversalSegmentation.from_pretrained("shi-labs/oneformer_cityscapes_swin_large")
9
 
10
+ lp_model = yolov5.load('keremberke/yolov5m-license-plate')
11
+ lp_model.conf = 0.25 # NMS confidence threshold
12
+ lp_model.iou = 0.45 # NMS IoU threshold
13
+ lp_model.agnostic = False # NMS class-agnostic
14
+ lp_model.multi_label = False # NMS multiple labels per box
15
+ lp_model.max_det = 1000 # maximum number of detections per image
16
+
17
+ def detect_person(image: Image):
18
+ semantic_inputs = person_processor(images=image, task_inputs=["semantic"], return_tensors="pt")
19
+ semantic_outputs = person_model(**semantic_inputs)
20
+ predicted_semantic_map = person_processor.post_process_semantic_segmentation(semantic_outputs, target_sizes=[image.size[::-1]])[0]
21
+ mask = transform(predicted_semantic_map.to(torch.uint8))
22
+ mask = Image.eval(mask, lambda x: 0 if x == 11 else 255)
23
+ return mask
24
+
25
+
26
+ def detect_license_plate(image: Image):
27
+ results = lp_model(image, size=image.size[0])
28
+ predictions = results.pred[0]
29
+ boxes = predictions[:, :4]
30
+ from PIL import Image, ImageDraw, ImageFilter
31
+ mask = Image.new(mode="L", size=image.size, color=255)
32
+ draw = ImageDraw.Draw(mask)
33
+ for box in boxes:
34
+ draw.rectangle(list(box), fill=0)
35
+ return mask
36
+
37
+
38
+ def detect_dummy(image: Image):
39
+ return Image.new(mode="L", size=image.size, color=255)
40
+
41
+
42
+ detectors = {
43
+ "Person": detect_person,
44
+ "License Plate": detect_license_plate
45
+ }
46
+
47
+
48
+ def anonymize(path: str, detectors: list):
49
+ # Read image
50
+ image = Image.open(path)
51
+ # Run requested detectors
52
+ masks = [implemented_detectors.get(det, detect_dummy)(image) for det in detectors]
53
+ # Combine masks
54
+ combined = np.minimum.reduce([np.array(m) for m in masks])
55
+ mask = Image.fromarray(combined)
56
+ # Apply blur through mask
57
+ blurred = image.filter(ImageFilter.GaussianBlur(15))
58
+ anonymized = Image.composite(image, blurred, mask)
59
+ return anonymized
60
+
61
+
62
+ def test_gradio(image):
63
+ masks = [detect_person(image), detect_license_plate(image)]
64
+ combined = np.minimum.reduce([np.array(m) for m in masks])
65
+ mask = Image.fromarray(combined)
66
+ # Apply blur through mask
67
+ blurred = image.filter(ImageFilter.GaussianBlur(15))
68
+ anonymized = Image.composite(image, blurred, mask)
69
+ return anonymized
70
+
71
+ interface = gr.Interface(fn=test_gradio, inputs=gr.Image(type="pil"), outputs="image")
72
+ interface.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ yolov5
2
+ transformers
3
+ Pillow
4
+ numpy