Fabrice-TIERCELIN
commited on
Commit
•
320e40a
1
Parent(s):
b0e514f
New layout
Browse files- gradio_demo.py +90 -96
gradio_demo.py
CHANGED
@@ -83,7 +83,7 @@ def stage1_process(input_image, gamma_correction):
|
|
83 |
print('Start stage1_process')
|
84 |
if torch.cuda.device_count() == 0:
|
85 |
gr.Warning('Set this space to GPU config to make it work.')
|
86 |
-
return None
|
87 |
torch.cuda.set_device(SUPIR_device)
|
88 |
LQ = HWC3(input_image)
|
89 |
LQ = fix_resize(LQ, 512)
|
@@ -98,7 +98,7 @@ def stage1_process(input_image, gamma_correction):
|
|
98 |
LQ *= 255.0
|
99 |
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
100 |
print('End stage1_process')
|
101 |
-
return LQ
|
102 |
|
103 |
@spaces.GPU(duration=240)
|
104 |
def llave_process(input_image, temperature, top_p, qs=None):
|
@@ -145,7 +145,7 @@ def stage2_process(
|
|
145 |
print('Start stage2_process')
|
146 |
if torch.cuda.device_count() == 0:
|
147 |
gr.Warning('Set this space to GPU config to make it work.')
|
148 |
-
return None, None
|
149 |
input_image = noisy_image if denoise_image is None else denoise_image
|
150 |
torch.cuda.set_device(SUPIR_device)
|
151 |
event_id = str(time.time_ns())
|
@@ -201,7 +201,7 @@ def stage2_process(
|
|
201 |
for i, result in enumerate(results):
|
202 |
Image.fromarray(result).save(f'./history/{event_id[:5]}/{event_id[5:]}/HQ_{i}.png')
|
203 |
print('End stage2_process')
|
204 |
-
return [input_image] + results, [input_image] + results, event_id
|
205 |
|
206 |
def load_and_reset(param_setting):
|
207 |
print('Start load_and_reset')
|
@@ -257,7 +257,7 @@ title_html = """
|
|
257 |
<big><center>Upscale your images up to x8 freely, without account, without watermark and download it</center></big>
|
258 |
<br/>
|
259 |
|
260 |
-
<p>SUPIR is a practicing model scaling for photo-realistic image restoration. It is still a research project under tested and is not yet a stable commercial product.
|
261 |
|
262 |
<p><center><a href="https://arxiv.org/abs/2401.13627">Paper</a>   <a href="http://supir.xpixel.group/">Project Page</a>   <a href="https://github.com/Fanghua-Yu/SUPIR/blob/master/assets/DemoGuide.png">How to play</a>   <a href="https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai">Local Install Guide</a></center></p>
|
263 |
"""
|
@@ -283,96 +283,88 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
283 |
You can't use SUPIR directly here because this space runs on a CPU, which is not enough for SUPIR. This is a template space. Please provide feedback if you have issues.
|
284 |
</big></big></big></p>
|
285 |
""")
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
with gr.Column():
|
369 |
-
diffusion_button = gr.Button(value="🚀 Upscale/Restore", variant = "primary", elem_id="process_button")
|
370 |
-
|
371 |
-
with gr.Accordion("Feedback", open=True, visible=False):
|
372 |
-
fb_score = gr.Slider(label="Feedback Score", minimum=1, maximum=5, value=3, step=1,
|
373 |
-
interactive=True)
|
374 |
-
fb_text = gr.Textbox(label="Feedback Text", value="", placeholder='Please enter your feedback here.')
|
375 |
-
submit_button = gr.Button(value="Submit Feedback")
|
376 |
with gr.Row():
|
377 |
gr.Markdown(claim_md)
|
378 |
event_id = gr.Textbox(label="Event ID", value="", visible=False)
|
@@ -383,7 +375,8 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
383 |
input_image,
|
384 |
gamma_correction
|
385 |
], outputs=[
|
386 |
-
denoise_image
|
|
|
387 |
])
|
388 |
|
389 |
llave_button.click(fn = check, inputs = [
|
@@ -434,6 +427,7 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
434 |
], outputs = [
|
435 |
result_slider,
|
436 |
result_gallery,
|
|
|
437 |
event_id
|
438 |
])
|
439 |
|
|
|
83 |
print('Start stage1_process')
|
84 |
if torch.cuda.device_count() == 0:
|
85 |
gr.Warning('Set this space to GPU config to make it work.')
|
86 |
+
return None, None
|
87 |
torch.cuda.set_device(SUPIR_device)
|
88 |
LQ = HWC3(input_image)
|
89 |
LQ = fix_resize(LQ, 512)
|
|
|
98 |
LQ *= 255.0
|
99 |
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
100 |
print('End stage1_process')
|
101 |
+
return LQ, gr.update(visible = True)
|
102 |
|
103 |
@spaces.GPU(duration=240)
|
104 |
def llave_process(input_image, temperature, top_p, qs=None):
|
|
|
145 |
print('Start stage2_process')
|
146 |
if torch.cuda.device_count() == 0:
|
147 |
gr.Warning('Set this space to GPU config to make it work.')
|
148 |
+
return None, None, None
|
149 |
input_image = noisy_image if denoise_image is None else denoise_image
|
150 |
torch.cuda.set_device(SUPIR_device)
|
151 |
event_id = str(time.time_ns())
|
|
|
201 |
for i, result in enumerate(results):
|
202 |
Image.fromarray(result).save(f'./history/{event_id[:5]}/{event_id[5:]}/HQ_{i}.png')
|
203 |
print('End stage2_process')
|
204 |
+
return [input_image] + results, [input_image] + results, gr.update(visible = True), event_id
|
205 |
|
206 |
def load_and_reset(param_setting):
|
207 |
print('Start load_and_reset')
|
|
|
257 |
<big><center>Upscale your images up to x8 freely, without account, without watermark and download it</center></big>
|
258 |
<br/>
|
259 |
|
260 |
+
<p>SUPIR is a practicing model scaling for photo-realistic image restoration. It is still a research project under tested and is not yet a stable commercial product. LlaVa is not integrated in this demo. The content added by SUPIR is imagination, not real-world information. The aim of SUPIR is the beauty and the illustration.
|
261 |
|
262 |
<p><center><a href="https://arxiv.org/abs/2401.13627">Paper</a>   <a href="http://supir.xpixel.group/">Project Page</a>   <a href="https://github.com/Fanghua-Yu/SUPIR/blob/master/assets/DemoGuide.png">How to play</a>   <a href="https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai">Local Install Guide</a></center></p>
|
263 |
"""
|
|
|
283 |
You can't use SUPIR directly here because this space runs on a CPU, which is not enough for SUPIR. This is a template space. Please provide feedback if you have issues.
|
284 |
</big></big></big></p>
|
285 |
""")
|
286 |
+
gr.HTML(title_html)
|
287 |
+
|
288 |
+
input_image = gr.Image(label="Input", show_label=True, type="numpy", height=600, elem_id="image-input")
|
289 |
+
prompt = gr.Textbox(label="Image description for LlaVa", value="", placeholder="A person, walking, in a town, Summer, photorealistic", lines=3, visible=False)
|
290 |
+
upscale = gr.Radio([1, 2, 3, 4, 5, 6, 7, 8], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
291 |
+
a_prompt = gr.Textbox(label="Image description",
|
292 |
+
info="The prompt that describes what the image represents",
|
293 |
+
value='Cinematic, High Contrast, highly detailed, taken using a Canon EOS R '
|
294 |
+
'camera, hyper detailed photo - realistic maximum detail, 32k, Color '
|
295 |
+
'Grading, ultra HD, extreme meticulous detailing, skin pore detailing, '
|
296 |
+
'hyper sharpness, perfect without deformations.',
|
297 |
+
lines=3)
|
298 |
+
a_prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
299 |
+
|
300 |
+
with gr.Accordion("Pre-denoising (optional)", open=False):
|
301 |
+
gamma_correction = gr.Slider(label="Gamma Correction", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
|
302 |
+
denoise_button = gr.Button(value="Pre-denoise")
|
303 |
+
denoise_image = gr.Image(label="Denoised image", show_label=True, type="numpy", height=600, elem_id="image-s1")
|
304 |
+
denoise_information = gr.HTML(value="If present, the denoised image will be used for the restoration instead of the input image.", visible=False)
|
305 |
+
|
306 |
+
with gr.Accordion("LLaVA options", open=False, visible=False):
|
307 |
+
temperature = gr.Slider(label="Temperature", info = "lower=Always similar, higher=More creative", minimum=0., maximum=1.0, value=0.2, step=0.1)
|
308 |
+
top_p = gr.Slider(label="Top P", info = "Percent of tokens shortlisted", minimum=0., maximum=1.0, value=0.7, step=0.1)
|
309 |
+
qs = gr.Textbox(label="Question", info="Ask LLaVa what description you want", value="Describe the image and its style in a very detailed manner. The image is a realistic photography, not an art painting.", lines=3)
|
310 |
+
|
311 |
+
with gr.Accordion("Advanced options", open=False):
|
312 |
+
n_prompt = gr.Textbox(label="Anti image description",
|
313 |
+
info="Disambiguate by listing what the image does NOT represent",
|
314 |
+
value='painting, oil painting, illustration, drawing, art, sketch, anime, '
|
315 |
+
'cartoon, CG Style, 3D render, unreal engine, blurring, bokeh, ugly, dirty, messy, '
|
316 |
+
'worst quality, low quality, frames, watermark, signature, jpeg artifacts, '
|
317 |
+
'deformed, lowres, over-smooth',
|
318 |
+
lines=3)
|
319 |
+
edm_steps = gr.Slider(label="Steps", info="lower=faster, higher=more details", minimum=1, maximum=200, value=default_setting.edm_steps if torch.cuda.device_count() > 0 else 1, step=1)
|
320 |
+
num_samples = gr.Slider(label="Num Samples", info="Number of generated results; I discourage to increase because the process is limited to 4 min", minimum=1, maximum=4 if not args.use_image_slider else 1
|
321 |
+
, value=1, step=1)
|
322 |
+
with gr.Row():
|
323 |
+
with gr.Column():
|
324 |
+
model_select = gr.Radio(["v0-Q", "v0-F"], label="Model Selection", info="Q=Quality, F=Fidelity", value="v0-Q",
|
325 |
+
interactive=True)
|
326 |
+
with gr.Column():
|
327 |
+
color_fix_type = gr.Radio(["None", "AdaIn", "Wavelet"], label="Color-Fix Type", info="AdaIn=Adaptive Instance Normalization, Wavelet=For JPEG artifacts", value="Wavelet",
|
328 |
+
interactive=True)
|
329 |
+
s_cfg = gr.Slider(label="Text Guidance Scale", info="lower=follow the image, higher=follow the prompt", minimum=1.0, maximum=15.0,
|
330 |
+
value=default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.1)
|
331 |
+
s_stage2 = gr.Slider(label="Restoring Guidance Strength", minimum=0., maximum=1., value=1., step=0.05)
|
332 |
+
s_stage1 = gr.Slider(label="Pre-denoising Guidance Strength", minimum=-1.0, maximum=6.0, value=-1.0, step=1.0)
|
333 |
+
s_churn = gr.Slider(label="S-Churn", minimum=0, maximum=40, value=5, step=1)
|
334 |
+
s_noise = gr.Slider(label="S-Noise", minimum=1.0, maximum=1.1, value=1.003, step=0.001)
|
335 |
+
with gr.Row():
|
336 |
+
with gr.Column():
|
337 |
+
linear_CFG = gr.Checkbox(label="Linear CFG", value=True)
|
338 |
+
spt_linear_CFG = gr.Slider(label="CFG Start", minimum=1.0,
|
339 |
+
maximum=9.0, value=default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.5)
|
340 |
+
with gr.Column():
|
341 |
+
linear_s_stage2 = gr.Checkbox(label="Linear Restoring Guidance", value=False)
|
342 |
+
spt_linear_s_stage2 = gr.Slider(label="Guidance Start", minimum=0.,
|
343 |
+
maximum=1., value=0., step=0.05)
|
344 |
+
with gr.Column():
|
345 |
+
diff_dtype = gr.Radio(['fp32', 'fp16', 'bf16'], label="Diffusion Data Type", value="fp16",
|
346 |
+
interactive=True)
|
347 |
+
with gr.Column():
|
348 |
+
ae_dtype = gr.Radio(['fp32', 'bf16'], label="Auto-Encoder Data Type", value="bf16",
|
349 |
+
interactive=True)
|
350 |
+
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
|
351 |
+
seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True)
|
352 |
+
with gr.Group():
|
353 |
+
param_setting = gr.Radio(["Quality", "Fidelity"], interactive=True, label="Presetting", value="Quality")
|
354 |
+
restart_button = gr.Button(value="Apply presetting")
|
355 |
+
|
356 |
+
|
357 |
+
llave_button = gr.Button(value="Generate description by LlaVa (disabled)", visible=False)
|
358 |
+
diffusion_button = gr.Button(value="🚀 Upscale/Restore", variant = "primary", elem_id="process_button")
|
359 |
+
restore_information = gr.HTML(value="Restart the process to get another result.", visible=False)
|
360 |
+
result_slider = ImageSlider(label='Output', show_label=True, elem_id="slider1")
|
361 |
+
result_gallery = gr.Gallery(label='Output', show_label=True, elem_id="gallery1")
|
362 |
+
|
363 |
+
with gr.Accordion("Feedback", open=True, visible=False):
|
364 |
+
fb_score = gr.Slider(label="Feedback Score", minimum=1, maximum=5, value=3, step=1,
|
365 |
+
interactive=True)
|
366 |
+
fb_text = gr.Textbox(label="Feedback Text", value="", placeholder='Please enter your feedback here.')
|
367 |
+
submit_button = gr.Button(value="Submit Feedback")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
368 |
with gr.Row():
|
369 |
gr.Markdown(claim_md)
|
370 |
event_id = gr.Textbox(label="Event ID", value="", visible=False)
|
|
|
375 |
input_image,
|
376 |
gamma_correction
|
377 |
], outputs=[
|
378 |
+
denoise_image,
|
379 |
+
denoise_information
|
380 |
])
|
381 |
|
382 |
llave_button.click(fn = check, inputs = [
|
|
|
427 |
], outputs = [
|
428 |
result_slider,
|
429 |
result_gallery,
|
430 |
+
restore_information,
|
431 |
event_id
|
432 |
])
|
433 |
|