Fabrice-TIERCELIN
commited on
Commit
•
943f713
1
Parent(s):
ba43c72
Downscale
Browse files- gradio_demo.py +68 -60
gradio_demo.py
CHANGED
@@ -147,6 +147,67 @@ def stage2_process(
|
|
147 |
output_format,
|
148 |
allocation
|
149 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
if allocation == 1:
|
151 |
return restore_in_1min(
|
152 |
noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
|
@@ -229,7 +290,7 @@ def restore_in_10min(*args, **kwargs):
|
|
229 |
return restore(*args, **kwargs)
|
230 |
|
231 |
def restore(
|
232 |
-
|
233 |
denoise_image,
|
234 |
prompt,
|
235 |
a_prompt,
|
@@ -260,63 +321,10 @@ def restore(
|
|
260 |
):
|
261 |
start = time.time()
|
262 |
print('stage2_process ==>>')
|
263 |
-
print("noisy_image: " + str(noisy_image))
|
264 |
-
print("denoise_image: " + str(denoise_image))
|
265 |
-
print("prompt: " + str(prompt))
|
266 |
-
print("a_prompt: " + str(a_prompt))
|
267 |
-
print("n_prompt: " + str(n_prompt))
|
268 |
-
print("num_samples: " + str(num_samples))
|
269 |
-
print("min_size: " + str(min_size))
|
270 |
-
print("downscale: " + str(downscale))
|
271 |
-
print("upscale: " + str(upscale))
|
272 |
-
print("edm_steps: " + str(edm_steps))
|
273 |
-
print("s_stage1: " + str(s_stage1))
|
274 |
-
print("s_stage2: " + str(s_stage2))
|
275 |
-
print("s_cfg: " + str(s_cfg))
|
276 |
-
print("randomize_seed: " + str(randomize_seed))
|
277 |
-
print("seed: " + str(seed))
|
278 |
-
print("s_churn: " + str(s_churn))
|
279 |
-
print("s_noise: " + str(s_noise))
|
280 |
-
print("color_fix_type: " + str(color_fix_type))
|
281 |
-
print("diff_dtype: " + str(diff_dtype))
|
282 |
-
print("ae_dtype: " + str(ae_dtype))
|
283 |
-
print("gamma_correction: " + str(gamma_correction))
|
284 |
-
print("linear_CFG: " + str(linear_CFG))
|
285 |
-
print("linear_s_stage2: " + str(linear_s_stage2))
|
286 |
-
print("spt_linear_CFG: " + str(spt_linear_CFG))
|
287 |
-
print("spt_linear_s_stage2: " + str(spt_linear_s_stage2))
|
288 |
-
print("model_select: " + str(model_select))
|
289 |
-
print("output_format: " + str(output_format))
|
290 |
-
print("GPU time allocation: " + str(allocation) + " min")
|
291 |
|
292 |
if torch.cuda.device_count() == 0:
|
293 |
gr.Warning('Set this space to GPU config to make it work.')
|
294 |
-
return [
|
295 |
-
|
296 |
-
if output_format == "input":
|
297 |
-
if noisy_image is None:
|
298 |
-
output_format = "png"
|
299 |
-
else:
|
300 |
-
output_format = noisy_image.format
|
301 |
-
|
302 |
-
if prompt is None:
|
303 |
-
prompt = ""
|
304 |
-
|
305 |
-
if a_prompt is None:
|
306 |
-
a_prompt = ""
|
307 |
-
|
308 |
-
if n_prompt is None:
|
309 |
-
n_prompt = ""
|
310 |
-
|
311 |
-
if prompt != "" and a_prompt != "":
|
312 |
-
a_prompt = prompt + ", " + a_prompt
|
313 |
-
else:
|
314 |
-
a_prompt = prompt + a_prompt
|
315 |
-
print("Final prompt: " + str(a_prompt))
|
316 |
-
input_image = noisy_image if denoise_image is None else denoise_image
|
317 |
-
if 1 < downscale:
|
318 |
-
input_height, input_width, input_channel = np.array(input_image).shape
|
319 |
-
input_image = input_image.resize((input_width // downscale, input_height // downscale), Image.LANCZOS)
|
320 |
torch.cuda.set_device(SUPIR_device)
|
321 |
event_id = str(time.time_ns())
|
322 |
event_dict = {'event_id': event_id, 'localtime': time.ctime(), 'prompt': prompt, 'a_prompt': a_prompt,
|
@@ -382,7 +390,7 @@ def restore(
|
|
382 |
hours = math.floor(minutes / 60)
|
383 |
minutes = minutes - (hours * 60)
|
384 |
information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
|
385 |
-
"Wait " + str(allocation) + " min before a new run to avoid
|
386 |
"The image(s) has(ve) been generated in " + \
|
387 |
((str(hours) + " h, ") if hours != 0 else "") + \
|
388 |
((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
|
@@ -393,7 +401,7 @@ def restore(
|
|
393 |
print(information)
|
394 |
|
395 |
# Only one image can be shown in the slider
|
396 |
-
return [
|
397 |
|
398 |
def load_and_reset(param_setting):
|
399 |
print('load_and_reset ==>>')
|
@@ -463,7 +471,7 @@ title_html = """
|
|
463 |
The content added by SUPIR is <b><u>imagination, not real-world information</u></b>.
|
464 |
The aim of SUPIR is the beauty and the illustration.
|
465 |
Most of the processes only last few minutes.
|
466 |
-
This demo can handle huge images but the process will be aborted if it lasts more than
|
467 |
Please leave a message in discussion if you encounter issues.
|
468 |
|
469 |
<p><center><a href="https://arxiv.org/abs/2401.13627">Paper</a>   <a href="http://supir.xpixel.group/">Project Page</a>   <a href="https://github.com/Fanghua-Yu/SUPIR/blob/master/assets/DemoGuide.png">How to play</a>   <a href="https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai">Local Install Guide</a></center></p>
|
@@ -499,7 +507,7 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
499 |
prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible; I advise you to write in English as other languages may not be handled", value="", placeholder="A person, walking, in a town, Summer, photorealistic", lines=3)
|
500 |
prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
501 |
upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
502 |
-
allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min", 8], ["9 min", 9], ["10 min
|
503 |
output_format = gr.Radio([["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="png", interactive=True)
|
504 |
|
505 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|
@@ -627,7 +635,7 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
627 |
"Group of people, walking, happy, in the street, photorealistic, 8k, extremely detailled",
|
628 |
"Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
|
629 |
"painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, bokeh, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
|
630 |
-
|
631 |
1024,
|
632 |
1,
|
633 |
8,
|
|
|
147 |
output_format,
|
148 |
allocation
|
149 |
):
|
150 |
+
print("noisy_image: " + str(noisy_image))
|
151 |
+
print("denoise_image: " + str(denoise_image))
|
152 |
+
print("prompt: " + str(prompt))
|
153 |
+
print("a_prompt: " + str(a_prompt))
|
154 |
+
print("n_prompt: " + str(n_prompt))
|
155 |
+
print("num_samples: " + str(num_samples))
|
156 |
+
print("min_size: " + str(min_size))
|
157 |
+
print("downscale: " + str(downscale))
|
158 |
+
print("upscale: " + str(upscale))
|
159 |
+
print("edm_steps: " + str(edm_steps))
|
160 |
+
print("s_stage1: " + str(s_stage1))
|
161 |
+
print("s_stage2: " + str(s_stage2))
|
162 |
+
print("s_cfg: " + str(s_cfg))
|
163 |
+
print("randomize_seed: " + str(randomize_seed))
|
164 |
+
print("seed: " + str(seed))
|
165 |
+
print("s_churn: " + str(s_churn))
|
166 |
+
print("s_noise: " + str(s_noise))
|
167 |
+
print("color_fix_type: " + str(color_fix_type))
|
168 |
+
print("diff_dtype: " + str(diff_dtype))
|
169 |
+
print("ae_dtype: " + str(ae_dtype))
|
170 |
+
print("gamma_correction: " + str(gamma_correction))
|
171 |
+
print("linear_CFG: " + str(linear_CFG))
|
172 |
+
print("linear_s_stage2: " + str(linear_s_stage2))
|
173 |
+
print("spt_linear_CFG: " + str(spt_linear_CFG))
|
174 |
+
print("spt_linear_s_stage2: " + str(spt_linear_s_stage2))
|
175 |
+
print("model_select: " + str(model_select))
|
176 |
+
print("output_format: " + str(output_format))
|
177 |
+
print("GPU time allocation: " + str(allocation) + " min")
|
178 |
+
|
179 |
+
if output_format == "input":
|
180 |
+
if noisy_image is None:
|
181 |
+
output_format = "png"
|
182 |
+
else:
|
183 |
+
output_format = noisy_image.format
|
184 |
+
|
185 |
+
if prompt is None:
|
186 |
+
prompt = ""
|
187 |
+
|
188 |
+
if a_prompt is None:
|
189 |
+
a_prompt = ""
|
190 |
+
|
191 |
+
if n_prompt is None:
|
192 |
+
n_prompt = ""
|
193 |
+
|
194 |
+
if prompt != "" and a_prompt != "":
|
195 |
+
a_prompt = prompt + ", " + a_prompt
|
196 |
+
else:
|
197 |
+
a_prompt = prompt + a_prompt
|
198 |
+
print("Final prompt: " + str(a_prompt))
|
199 |
+
noisy_image = noisy_image if denoise_image is None else denoise_image
|
200 |
+
if 1 < downscale:
|
201 |
+
input_height, input_width, input_channel = np.array(noisy_image).shape
|
202 |
+
print('downscale')
|
203 |
+
print(downscale)
|
204 |
+
print(input_width)
|
205 |
+
print(input_height)
|
206 |
+
print(input_width // downscale)
|
207 |
+
print(input_height // downscale)
|
208 |
+
print((input_width // downscale, input_height // downscale))
|
209 |
+
noisy_image = np.array(Image.fromarray(noisy_image).resize((input_width // downscale, input_height // downscale), Image.LANCZOS))
|
210 |
+
|
211 |
if allocation == 1:
|
212 |
return restore_in_1min(
|
213 |
noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select, output_format, allocation
|
|
|
290 |
return restore(*args, **kwargs)
|
291 |
|
292 |
def restore(
|
293 |
+
input_image,
|
294 |
denoise_image,
|
295 |
prompt,
|
296 |
a_prompt,
|
|
|
321 |
):
|
322 |
start = time.time()
|
323 |
print('stage2_process ==>>')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
324 |
|
325 |
if torch.cuda.device_count() == 0:
|
326 |
gr.Warning('Set this space to GPU config to make it work.')
|
327 |
+
return [input_image] * 2, [input_image] * 2, None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
328 |
torch.cuda.set_device(SUPIR_device)
|
329 |
event_id = str(time.time_ns())
|
330 |
event_dict = {'event_id': event_id, 'localtime': time.ctime(), 'prompt': prompt, 'a_prompt': a_prompt,
|
|
|
390 |
hours = math.floor(minutes / 60)
|
391 |
minutes = minutes - (hours * 60)
|
392 |
information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
|
393 |
+
"Wait " + str(allocation) + " min before a new run to avoid quota penalty. " + \
|
394 |
"The image(s) has(ve) been generated in " + \
|
395 |
((str(hours) + " h, ") if hours != 0 else "") + \
|
396 |
((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
|
|
|
401 |
print(information)
|
402 |
|
403 |
# Only one image can be shown in the slider
|
404 |
+
return [input_image] + [results[0]], gr.update(format = output_format, value = [input_image] + results), gr.update(value = information, visible = True), event_id
|
405 |
|
406 |
def load_and_reset(param_setting):
|
407 |
print('load_and_reset ==>>')
|
|
|
471 |
The content added by SUPIR is <b><u>imagination, not real-world information</u></b>.
|
472 |
The aim of SUPIR is the beauty and the illustration.
|
473 |
Most of the processes only last few minutes.
|
474 |
+
This demo can handle huge images but the process will be aborted if it lasts more than 10 min.
|
475 |
Please leave a message in discussion if you encounter issues.
|
476 |
|
477 |
<p><center><a href="https://arxiv.org/abs/2401.13627">Paper</a>   <a href="http://supir.xpixel.group/">Project Page</a>   <a href="https://github.com/Fanghua-Yu/SUPIR/blob/master/assets/DemoGuide.png">How to play</a>   <a href="https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai">Local Install Guide</a></center></p>
|
|
|
507 |
prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible; I advise you to write in English as other languages may not be handled", value="", placeholder="A person, walking, in a town, Summer, photorealistic", lines=3)
|
508 |
prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
509 |
upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
510 |
+
allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min", 8], ["9 min", 9], ["10 min", 10]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=6, interactive=True)
|
511 |
output_format = gr.Radio([["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="png", interactive=True)
|
512 |
|
513 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|
|
|
635 |
"Group of people, walking, happy, in the street, photorealistic, 8k, extremely detailled",
|
636 |
"Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
|
637 |
"painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, bokeh, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
|
638 |
+
2,
|
639 |
1024,
|
640 |
1,
|
641 |
8,
|