Spaces:
Sleeping
Sleeping
initial commit
Browse files- app.py +113 -0
- city_1.jpg +0 -0
- city_2.jpg +0 -0
- city_3.jpg +0 -0
- city_4.jpg +0 -0
- city_5.jpg +0 -0
- city_6.jpg +0 -0
- city_7.jpg +0 -0
- city_8.jpg +0 -0
- labels.txt +19 -0
- requirements.txt +6 -0
app.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
from matplotlib import gridspec
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
+
import numpy as np
|
6 |
+
from PIL import Image
|
7 |
+
import tensorflow as tf
|
8 |
+
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
9 |
+
|
10 |
+
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
11 |
+
"nvidia/segformer-b4-finetuned-cityscapes-1024-1024"
|
12 |
+
)
|
13 |
+
model = TFSegformerForSemanticSegmentation.from_pretrained(
|
14 |
+
"nvidia/segformer-b4-finetuned-cityscapes-1024-1024"
|
15 |
+
)
|
16 |
+
|
17 |
+
def ade_palette():
|
18 |
+
"""ADE20K palette that maps each class to RGB values."""
|
19 |
+
return [
|
20 |
+
[0, 0, 0], # black
|
21 |
+
[140, 140, 140], # gray
|
22 |
+
[95, 0, 255], # purple
|
23 |
+
[221, 126, 255], # light purple
|
24 |
+
[1, 0, 255], # blue
|
25 |
+
[0, 216, 255], # light blue
|
26 |
+
[35, 164, 26], # green
|
27 |
+
[29, 219, 22], # light green
|
28 |
+
[255, 228, 0], # yellow
|
29 |
+
[255, 187, 0], # light orange
|
30 |
+
[255, 94, 0], # orange
|
31 |
+
[255, 0, 0], # red
|
32 |
+
[255, 167, 167], # pink
|
33 |
+
[153, 56, 0], # brown
|
34 |
+
[207, 166, 54],
|
35 |
+
[180, 40, 180],
|
36 |
+
[120, 56, 123],
|
37 |
+
[45, 56, 28],
|
38 |
+
[67, 56, 123],
|
39 |
+
]
|
40 |
+
|
41 |
+
labels_list = []
|
42 |
+
|
43 |
+
with open(r'labels.txt', 'r') as fp:
|
44 |
+
for line in fp:
|
45 |
+
labels_list.append(line[:-1])
|
46 |
+
|
47 |
+
colormap = np.asarray(ade_palette())
|
48 |
+
|
49 |
+
def label_to_color_image(label):
|
50 |
+
if label.ndim != 2:
|
51 |
+
raise ValueError("Expect 2-D input label")
|
52 |
+
|
53 |
+
if np.max(label) >= len(colormap):
|
54 |
+
raise ValueError("label value too large.")
|
55 |
+
return colormap[label]
|
56 |
+
|
57 |
+
def draw_plot(pred_img, seg):
|
58 |
+
fig = plt.figure(figsize=(20, 15))
|
59 |
+
|
60 |
+
grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])
|
61 |
+
|
62 |
+
plt.subplot(grid_spec[0])
|
63 |
+
plt.imshow(pred_img)
|
64 |
+
plt.axis('off')
|
65 |
+
LABEL_NAMES = np.asarray(labels_list)
|
66 |
+
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
|
67 |
+
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
|
68 |
+
|
69 |
+
unique_labels = np.unique(seg.numpy().astype("uint8"))
|
70 |
+
ax = plt.subplot(grid_spec[1])
|
71 |
+
plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
|
72 |
+
ax.yaxis.tick_right()
|
73 |
+
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
|
74 |
+
plt.xticks([], [])
|
75 |
+
ax.tick_params(width=0.0, labelsize=25)
|
76 |
+
return fig
|
77 |
+
|
78 |
+
def sepia(input_img):
|
79 |
+
input_img = Image.fromarray(input_img)
|
80 |
+
|
81 |
+
inputs = feature_extractor(images=input_img, return_tensors="tf")
|
82 |
+
outputs = model(**inputs)
|
83 |
+
logits = outputs.logits
|
84 |
+
|
85 |
+
logits = tf.transpose(logits, [0, 2, 3, 1])
|
86 |
+
logits = tf.image.resize(
|
87 |
+
logits, input_img.size[::-1]
|
88 |
+
) # We reverse the shape of `image` because `image.size` returns width and height.
|
89 |
+
seg = tf.math.argmax(logits, axis=-1)[0]
|
90 |
+
|
91 |
+
color_seg = np.zeros(
|
92 |
+
(seg.shape[0], seg.shape[1], 3), dtype=np.uint8
|
93 |
+
) # height, width, 3
|
94 |
+
for label, color in enumerate(colormap):
|
95 |
+
color_seg[seg.numpy() == label, :] = color
|
96 |
+
|
97 |
+
# Show image + mask
|
98 |
+
pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
|
99 |
+
pred_img = pred_img.astype(np.uint8)
|
100 |
+
|
101 |
+
fig = draw_plot(pred_img, seg)
|
102 |
+
return fig
|
103 |
+
|
104 |
+
demo = gr.Interface(fn=sepia,
|
105 |
+
inputs=gr.Image(shape=(400, 600)),
|
106 |
+
outputs=['plot'],
|
107 |
+
examples=["city_1.jpg", "city_2.jpg", "city_3.jpg",
|
108 |
+
"city_4.jpg", "city_5.jpg", "city_6.jpg",
|
109 |
+
"city_7.jpg", "city_8.jpg"],
|
110 |
+
allow_flagging='never')
|
111 |
+
|
112 |
+
|
113 |
+
demo.launch()
|
city_1.jpg
ADDED
city_2.jpg
ADDED
city_3.jpg
ADDED
city_4.jpg
ADDED
city_5.jpg
ADDED
city_6.jpg
ADDED
city_7.jpg
ADDED
city_8.jpg
ADDED
labels.txt
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
road
|
2 |
+
sidewalk
|
3 |
+
building
|
4 |
+
wall
|
5 |
+
fence
|
6 |
+
pole
|
7 |
+
traffic light
|
8 |
+
traffic sign
|
9 |
+
vegetation
|
10 |
+
terrain
|
11 |
+
sky
|
12 |
+
person
|
13 |
+
rider
|
14 |
+
car
|
15 |
+
truck
|
16 |
+
bus
|
17 |
+
train
|
18 |
+
motorcycle
|
19 |
+
bicycle
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
transformers
|
3 |
+
tensorflow
|
4 |
+
numpy
|
5 |
+
Image
|
6 |
+
matplotlib
|