Fabrice-TIERCELIN
commited on
Commit
•
af4572f
1
Parent(s):
6f178a2
Rollback
Browse files
app.py
CHANGED
@@ -63,39 +63,6 @@ def update_seed(is_randomize_seed, seed):
|
|
63 |
return random.randint(0, max_64_bit_int)
|
64 |
return seed
|
65 |
|
66 |
-
def reset():
|
67 |
-
return [
|
68 |
-
None,
|
69 |
-
0,
|
70 |
-
None,
|
71 |
-
None,
|
72 |
-
"Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
|
73 |
-
"painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, bokeh, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
|
74 |
-
1,
|
75 |
-
1024,
|
76 |
-
1,
|
77 |
-
2,
|
78 |
-
50,
|
79 |
-
-1.0,
|
80 |
-
1.,
|
81 |
-
default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0,
|
82 |
-
True,
|
83 |
-
random.randint(0, max_64_bit_int),
|
84 |
-
5,
|
85 |
-
1.003,
|
86 |
-
"Wavelet",
|
87 |
-
"fp32",
|
88 |
-
"fp32",
|
89 |
-
1.0,
|
90 |
-
True,
|
91 |
-
False,
|
92 |
-
default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0,
|
93 |
-
0.,
|
94 |
-
"v0-Q",
|
95 |
-
"input",
|
96 |
-
6
|
97 |
-
]
|
98 |
-
|
99 |
def check(input_image):
|
100 |
if input_image is None:
|
101 |
raise gr.Error("Please provide an image to restore.")
|
@@ -241,7 +208,7 @@ def restore_in_Xmin(
|
|
241 |
|
242 |
if torch.cuda.device_count() == 0:
|
243 |
gr.Warning('Set this space to GPU config to make it work.')
|
244 |
-
return [noisy_image, denoise_image], [denoise_image], None,
|
245 |
|
246 |
if model_select != model.current_model:
|
247 |
print('load ' + model_select)
|
@@ -391,7 +358,6 @@ def restore(
|
|
391 |
results = [x_samples[i] for i in range(num_samples)]
|
392 |
|
393 |
# All the results have the same size
|
394 |
-
input_height, input_width, input_channel = np.array(input_image).shape
|
395 |
result_height, result_width, result_channel = np.array(results[0]).shape
|
396 |
|
397 |
print('<<== restore')
|
@@ -412,20 +378,9 @@ def restore(
|
|
412 |
" pixels large and " + str(result_height) + \
|
413 |
" pixels high, so a resolution of " + f'{result_width * result_height:,}' + " pixels."
|
414 |
print(information)
|
415 |
-
try:
|
416 |
-
print("Estimated minutes: " + str(math.log(result_width * result_height * input_width * input_height * edm_steps * num_samples)))
|
417 |
-
except Exception as e:
|
418 |
-
print('Exception of Estimation')
|
419 |
-
try:
|
420 |
-
unique_name = str(uuid.uuid4()) + "." + output_format
|
421 |
-
image_copy = Image.fromarray(np.array(results[0]))
|
422 |
-
image_copy.save(unique_name)
|
423 |
-
print(unique_name)
|
424 |
-
except Exception as e:
|
425 |
-
print('Exception printing the path: ' + str(e))
|
426 |
|
427 |
# Only one image can be shown in the slider
|
428 |
-
return [noisy_image] + [results[0]], gr.update(label="Downloadable results in *." + output_format + " format", format = output_format, value = results), gr.update(value = information, visible = True)
|
429 |
|
430 |
def load_and_reset(param_setting):
|
431 |
print('load_and_reset ==>>')
|
@@ -470,9 +425,6 @@ def log_information(result_gallery):
|
|
470 |
|
471 |
def on_select_result(result_slider, result_gallery, evt: gr.SelectData):
|
472 |
print('on_select_result')
|
473 |
-
if result_gallery is not None:
|
474 |
-
for i, result in enumerate(result_gallery):
|
475 |
-
print(result[0])
|
476 |
return [result_slider[0], result_gallery[evt.index][0]]
|
477 |
|
478 |
title_html = """
|
@@ -558,7 +510,7 @@ with gr.Blocks() as interface:
|
|
558 |
model_select = gr.Radio([["💃 Quality (v0-Q)", "v0-Q"], ["🎯 Fidelity (v0-F)", "v0-F"]], label="Model Selection", info="Pretrained model", value="v0-Q",
|
559 |
interactive=True)
|
560 |
with gr.Column():
|
561 |
-
color_fix_type = gr.Radio([
|
562 |
interactive=True)
|
563 |
s_cfg = gr.Slider(label="Text Guidance Scale", info="lower=follow the image, higher=follow the prompt", minimum=1.0, maximum=15.0,
|
564 |
value=default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.1)
|
@@ -584,16 +536,15 @@ with gr.Blocks() as interface:
|
|
584 |
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
|
585 |
seed = gr.Slider(label="Seed", minimum=0, maximum=max_64_bit_int, step=1, randomize=True)
|
586 |
with gr.Group():
|
587 |
-
param_setting = gr.Radio(["Quality", "Fidelity"], interactive=True, label="Presetting", value
|
588 |
restart_button = gr.Button(value="Apply presetting")
|
589 |
|
590 |
-
with gr.
|
591 |
-
diffusion_button = gr.Button(value="🚀 Upscale/Restore", variant = "primary", elem_id
|
592 |
-
reset_btn = gr.Button(value="🧹 Reinit page", variant="stop", elem_id="reset_button", visible = False)
|
593 |
|
594 |
-
restore_information = gr.HTML(value
|
595 |
-
result_slider = ImageSlider(label
|
596 |
-
result_gallery = gr.Gallery(label
|
597 |
|
598 |
gr.Examples(
|
599 |
run_on_click = True,
|
@@ -632,8 +583,7 @@ with gr.Blocks() as interface:
|
|
632 |
outputs = [
|
633 |
result_slider,
|
634 |
result_gallery,
|
635 |
-
restore_information
|
636 |
-
reset_btn
|
637 |
],
|
638 |
examples = [
|
639 |
[
|
@@ -825,13 +775,11 @@ with gr.Blocks() as interface:
|
|
825 |
], outputs = [
|
826 |
result_slider,
|
827 |
result_gallery,
|
828 |
-
restore_information
|
829 |
-
|
830 |
-
]).success(fn = log_information, inputs = [
|
831 |
result_gallery
|
832 |
], outputs = [], queue = False, show_progress = False)
|
833 |
|
834 |
-
result_gallery.change(on_select_result, [result_slider, result_gallery], result_slider)
|
835 |
result_gallery.select(on_select_result, [result_slider, result_gallery], result_slider)
|
836 |
|
837 |
restart_button.click(fn = load_and_reset, inputs = [
|
@@ -852,37 +800,5 @@ with gr.Blocks() as interface:
|
|
852 |
spt_linear_s_stage2,
|
853 |
model_select
|
854 |
])
|
855 |
-
|
856 |
-
reset_btn.click(fn = reset, inputs = [], outputs = [
|
857 |
-
input_image,
|
858 |
-
rotation,
|
859 |
-
denoise_image,
|
860 |
-
prompt,
|
861 |
-
a_prompt,
|
862 |
-
n_prompt,
|
863 |
-
num_samples,
|
864 |
-
min_size,
|
865 |
-
downscale,
|
866 |
-
upscale,
|
867 |
-
edm_steps,
|
868 |
-
s_stage1,
|
869 |
-
s_stage2,
|
870 |
-
s_cfg,
|
871 |
-
randomize_seed,
|
872 |
-
seed,
|
873 |
-
s_churn,
|
874 |
-
s_noise,
|
875 |
-
color_fix_type,
|
876 |
-
diff_dtype,
|
877 |
-
ae_dtype,
|
878 |
-
gamma_correction,
|
879 |
-
linear_CFG,
|
880 |
-
linear_s_stage2,
|
881 |
-
spt_linear_CFG,
|
882 |
-
spt_linear_s_stage2,
|
883 |
-
model_select,
|
884 |
-
output_format,
|
885 |
-
allocation
|
886 |
-
], queue = False, show_progress = False)
|
887 |
|
888 |
interface.queue(10).launch()
|
|
|
63 |
return random.randint(0, max_64_bit_int)
|
64 |
return seed
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
def check(input_image):
|
67 |
if input_image is None:
|
68 |
raise gr.Error("Please provide an image to restore.")
|
|
|
208 |
|
209 |
if torch.cuda.device_count() == 0:
|
210 |
gr.Warning('Set this space to GPU config to make it work.')
|
211 |
+
return [noisy_image, denoise_image], [denoise_image], None, None
|
212 |
|
213 |
if model_select != model.current_model:
|
214 |
print('load ' + model_select)
|
|
|
358 |
results = [x_samples[i] for i in range(num_samples)]
|
359 |
|
360 |
# All the results have the same size
|
|
|
361 |
result_height, result_width, result_channel = np.array(results[0]).shape
|
362 |
|
363 |
print('<<== restore')
|
|
|
378 |
" pixels large and " + str(result_height) + \
|
379 |
" pixels high, so a resolution of " + f'{result_width * result_height:,}' + " pixels."
|
380 |
print(information)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
381 |
|
382 |
# Only one image can be shown in the slider
|
383 |
+
return [noisy_image] + [results[0]], gr.update(label="Downloadable results in *." + output_format + " format", format = output_format, value = results), gr.update(value = information, visible = True)
|
384 |
|
385 |
def load_and_reset(param_setting):
|
386 |
print('load_and_reset ==>>')
|
|
|
425 |
|
426 |
def on_select_result(result_slider, result_gallery, evt: gr.SelectData):
|
427 |
print('on_select_result')
|
|
|
|
|
|
|
428 |
return [result_slider[0], result_gallery[evt.index][0]]
|
429 |
|
430 |
title_html = """
|
|
|
510 |
model_select = gr.Radio([["💃 Quality (v0-Q)", "v0-Q"], ["🎯 Fidelity (v0-F)", "v0-F"]], label="Model Selection", info="Pretrained model", value="v0-Q",
|
511 |
interactive=True)
|
512 |
with gr.Column():
|
513 |
+
color_fix_type = gr.Radio(["None", "AdaIn", "Wavelet"], label="Color-Fix Type", info="AdaIn=Improve following a style, Wavelet=For JPEG artifacts", value="Wavelet",
|
514 |
interactive=True)
|
515 |
s_cfg = gr.Slider(label="Text Guidance Scale", info="lower=follow the image, higher=follow the prompt", minimum=1.0, maximum=15.0,
|
516 |
value=default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.1)
|
|
|
536 |
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
|
537 |
seed = gr.Slider(label="Seed", minimum=0, maximum=max_64_bit_int, step=1, randomize=True)
|
538 |
with gr.Group():
|
539 |
+
param_setting = gr.Radio(["Quality", "Fidelity"], interactive=True, label="Presetting", value="Quality")
|
540 |
restart_button = gr.Button(value="Apply presetting")
|
541 |
|
542 |
+
with gr.Group():
|
543 |
+
diffusion_button = gr.Button(value="🚀 Upscale/Restore", variant = "primary", elem_id="process_button")
|
|
|
544 |
|
545 |
+
restore_information = gr.HTML(value="Restart the process to get another result.", visible = False)
|
546 |
+
result_slider = ImageSlider(label='Comparator', show_label=False, elem_id="slider1", show_download_button = False)
|
547 |
+
result_gallery = gr.Gallery(label='Downloadable results', show_label=True, elem_id="gallery1")
|
548 |
|
549 |
gr.Examples(
|
550 |
run_on_click = True,
|
|
|
583 |
outputs = [
|
584 |
result_slider,
|
585 |
result_gallery,
|
586 |
+
restore_information
|
|
|
587 |
],
|
588 |
examples = [
|
589 |
[
|
|
|
775 |
], outputs = [
|
776 |
result_slider,
|
777 |
result_gallery,
|
778 |
+
restore_information
|
779 |
+
]).then(fn = log_information, inputs = [
|
|
|
780 |
result_gallery
|
781 |
], outputs = [], queue = False, show_progress = False)
|
782 |
|
|
|
783 |
result_gallery.select(on_select_result, [result_slider, result_gallery], result_slider)
|
784 |
|
785 |
restart_button.click(fn = load_and_reset, inputs = [
|
|
|
800 |
spt_linear_s_stage2,
|
801 |
model_select
|
802 |
])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
803 |
|
804 |
interface.queue(10).launch()
|