Spaces:
Running
on
T4
Running
on
T4
praeclarumjj3
commited on
Commit
•
3701f72
1
Parent(s):
b4c4058
Fix runtime commands
Browse files- Dockerfile +4 -5
- gradio_app.py +42 -19
Dockerfile
CHANGED
@@ -30,18 +30,17 @@ RUN chmod -R 777 $WORKDIR
|
|
30 |
|
31 |
|
32 |
COPY requirements.txt $WORKDIR/requirements.txt
|
33 |
-
COPY . .
|
34 |
-
|
35 |
RUN pip install --no-cache-dir --upgrade -r $WORKDIR/requirements.txt
|
36 |
|
37 |
-
|
38 |
|
|
|
39 |
|
40 |
RUN pip install ninja
|
41 |
|
42 |
USER root
|
43 |
-
RUN chown -R user:user /usr
|
44 |
-
RUN chmod -R 777 /usr
|
45 |
RUN chown -R user:user $HOME
|
46 |
RUN chmod -R 777 $HOME
|
47 |
RUN chown -R user:user $WORKDIR
|
|
|
30 |
|
31 |
|
32 |
COPY requirements.txt $WORKDIR/requirements.txt
|
|
|
|
|
33 |
RUN pip install --no-cache-dir --upgrade -r $WORKDIR/requirements.txt
|
34 |
|
35 |
+
COPY . .
|
36 |
|
37 |
+
ARG TORCH_CUDA_ARCH_LIST=7.5+PTX
|
38 |
|
39 |
RUN pip install ninja
|
40 |
|
41 |
USER root
|
42 |
+
# RUN chown -R user:user /usr
|
43 |
+
# RUN chmod -R 777 /usr
|
44 |
RUN chown -R user:user $HOME
|
45 |
RUN chmod -R 777 $HOME
|
46 |
RUN chown -R user:user $WORKDIR
|
gradio_app.py
CHANGED
@@ -77,11 +77,32 @@ PREDICTORS = {
|
|
77 |
}
|
78 |
}
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
def setup_cfg(dataset, backbone):
|
87 |
# load config from file and command-line arguments
|
@@ -102,19 +123,19 @@ def setup_cfg(dataset, backbone):
|
|
102 |
cfg.freeze()
|
103 |
return cfg
|
104 |
|
105 |
-
def setup_modules(dataset, backbone):
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
|
117 |
-
|
118 |
|
119 |
def panoptic_run(img, predictor, metadata):
|
120 |
visualizer = Visualizer(img[:, :, ::-1], metadata=metadata, instance_mode=ColorMode.IMAGE)
|
@@ -153,7 +174,9 @@ def semantic_run(img, predictor, metadata):
|
|
153 |
TASK_INFER = {"the task is panoptic": panoptic_run, "the task is instance": instance_run, "the task is semantic": semantic_run}
|
154 |
|
155 |
def segment(path, task, dataset, backbone):
|
156 |
-
predictor, metadata = setup_modules(dataset, backbone)
|
|
|
|
|
157 |
img = cv2.imread(path)
|
158 |
width = WIDTH_DICT[KEY_DICT[dataset]]
|
159 |
img = imutils.resize(img, width=width)
|
@@ -175,7 +198,7 @@ description = "<p style='color: #E0B941; font-size: 16px; font-weight: w600; tex
|
|
175 |
|
176 |
# css = ".image-preview {height: 32rem; width: auto;} .output-image {height: 32rem; width: auto;} .panel-buttons { display: flex; flex-direction: row;}"
|
177 |
|
178 |
-
|
179 |
|
180 |
gradio_inputs = [gr.Image(source="upload", tool=None, label="Input Image",type="filepath"),
|
181 |
gr.Radio(choices=["the task is panoptic" ,"the task is instance", "the task is semantic"], type="value", value="the task is panoptic", label="Task Token Input"),
|
|
|
77 |
}
|
78 |
}
|
79 |
|
80 |
+
METADATA = {
|
81 |
+
"DiNAT-L": {
|
82 |
+
"Cityscapes (19 classes)": None,
|
83 |
+
"COCO (133 classes)": None,
|
84 |
+
"ADE20K (150 classes)": None
|
85 |
+
},
|
86 |
+
"Swin-L": {
|
87 |
+
"Cityscapes (19 classes)": None,
|
88 |
+
"COCO (133 classes)": None,
|
89 |
+
"ADE20K (150 classes)": None
|
90 |
+
}
|
91 |
+
}
|
92 |
+
|
93 |
+
def setup_modules():
|
94 |
+
for dataset in ["Cityscapes (19 classes)", "COCO (133 classes)", "ADE20K (150 classes)"]:
|
95 |
+
for backbone in ["DiNAT-L", "Swin-L"]:
|
96 |
+
cfg = setup_cfg(dataset, backbone)
|
97 |
+
metadata = MetadataCatalog.get(
|
98 |
+
cfg.DATASETS.TEST_PANOPTIC[0] if len(cfg.DATASETS.TEST_PANOPTIC) else "__unused"
|
99 |
+
)
|
100 |
+
if 'cityscapes_fine_sem_seg_val' in cfg.DATASETS.TEST_PANOPTIC[0]:
|
101 |
+
from cityscapesscripts.helpers.labels import labels
|
102 |
+
stuff_colors = [k.color for k in labels if k.trainId != 255]
|
103 |
+
metadata = metadata.set(stuff_colors=stuff_colors)
|
104 |
+
PREDICTORS[backbone][dataset] = DefaultPredictor(cfg)
|
105 |
+
METADATA[backbone][dataset] = metadata
|
106 |
|
107 |
def setup_cfg(dataset, backbone):
|
108 |
# load config from file and command-line arguments
|
|
|
123 |
cfg.freeze()
|
124 |
return cfg
|
125 |
|
126 |
+
# def setup_modules(dataset, backbone):
|
127 |
+
# cfg = setup_cfg(dataset, backbone)
|
128 |
+
# predictor = DefaultPredictor(cfg)
|
129 |
+
# # predictor = PREDICTORS[backbone][dataset]
|
130 |
+
# metadata = MetadataCatalog.get(
|
131 |
+
# cfg.DATASETS.TEST_PANOPTIC[0] if len(cfg.DATASETS.TEST_PANOPTIC) else "__unused"
|
132 |
+
# )
|
133 |
+
# if 'cityscapes_fine_sem_seg_val' in cfg.DATASETS.TEST_PANOPTIC[0]:
|
134 |
+
# from cityscapesscripts.helpers.labels import labels
|
135 |
+
# stuff_colors = [k.color for k in labels if k.trainId != 255]
|
136 |
+
# metadata = metadata.set(stuff_colors=stuff_colors)
|
137 |
|
138 |
+
# return predictor, metadata
|
139 |
|
140 |
def panoptic_run(img, predictor, metadata):
|
141 |
visualizer = Visualizer(img[:, :, ::-1], metadata=metadata, instance_mode=ColorMode.IMAGE)
|
|
|
174 |
TASK_INFER = {"the task is panoptic": panoptic_run, "the task is instance": instance_run, "the task is semantic": semantic_run}
|
175 |
|
176 |
def segment(path, task, dataset, backbone):
|
177 |
+
# predictor, metadata = setup_modules(dataset, backbone)
|
178 |
+
predictor = PREDICTORS[backbone][dataset]
|
179 |
+
metadata = METADATA[backbone][dataset]
|
180 |
img = cv2.imread(path)
|
181 |
width = WIDTH_DICT[KEY_DICT[dataset]]
|
182 |
img = imutils.resize(img, width=width)
|
|
|
198 |
|
199 |
# css = ".image-preview {height: 32rem; width: auto;} .output-image {height: 32rem; width: auto;} .panel-buttons { display: flex; flex-direction: row;}"
|
200 |
|
201 |
+
setup_modules()
|
202 |
|
203 |
gradio_inputs = [gr.Image(source="upload", tool=None, label="Input Image",type="filepath"),
|
204 |
gr.Radio(choices=["the task is panoptic" ,"the task is instance", "the task is semantic"], type="value", value="the task is panoptic", label="Task Token Input"),
|