Fabrice-TIERCELIN
commited on
Commit
•
77c3bd5
1
Parent(s):
f200691
denoise_image = upscale_image(denoise_image, upscale, unit_resolution=32, min_size=min_size)
Browse files
app.py
CHANGED
@@ -16,6 +16,8 @@ from PIL import Image
|
|
16 |
from SUPIR.util import HWC3, upscale_image, fix_resize, convert_dtype, create_SUPIR_model, load_QF_ckpt
|
17 |
from huggingface_hub import hf_hub_download
|
18 |
|
|
|
|
|
19 |
hf_hub_download(repo_id="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", filename="open_clip_pytorch_model.bin", local_dir="laion_CLIP-ViT-bigG-14-laion2B-39B-b160k")
|
20 |
hf_hub_download(repo_id="camenduru/SUPIR", filename="sd_xl_base_1.0_0.9vae.safetensors", local_dir="yushan777_SUPIR")
|
21 |
hf_hub_download(repo_id="camenduru/SUPIR", filename="SUPIR-v0F.ckpt", local_dir="yushan777_SUPIR")
|
@@ -57,7 +59,7 @@ def check_upload(input_image):
|
|
57 |
|
58 |
def update_seed(is_randomize_seed, seed):
|
59 |
if is_randomize_seed:
|
60 |
-
return random.randint(0,
|
61 |
return seed
|
62 |
|
63 |
def check(input_image):
|
@@ -206,6 +208,7 @@ def restore_in_Xmin(
|
|
206 |
return [noisy_image, denoise_image], [denoise_image], None, None
|
207 |
|
208 |
denoise_image = HWC3(np.array(denoise_image))
|
|
|
209 |
|
210 |
# Allocation
|
211 |
if allocation == 1:
|
@@ -331,8 +334,6 @@ def restore(
|
|
331 |
elif model_select == 'v0-F':
|
332 |
model.load_state_dict(ckpt_F, strict=False)
|
333 |
model.current_model = model_select
|
334 |
-
input_image = upscale_image(input_image, upscale, unit_resolution=32,
|
335 |
-
min_size=min_size)
|
336 |
|
337 |
LQ = np.array(input_image) / 255.0
|
338 |
LQ = np.power(LQ, gamma_correction)
|
@@ -474,7 +475,7 @@ with gr.Blocks() as interface:
|
|
474 |
prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; you can write in any language", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
|
475 |
prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
476 |
upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8], ["x9", 9], ["x10", 10]], label="Upscale factor", info="Resolution x1 to x10", value=2, interactive=True)
|
477 |
-
allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min", 8], ["9 min (discouraged)", 9], ["10 min (discouraged)", 10]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=6, interactive=True)
|
478 |
output_format = gr.Radio([["As input", "input"], ["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="input", interactive=True)
|
479 |
|
480 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|
@@ -526,13 +527,13 @@ with gr.Blocks() as interface:
|
|
526 |
spt_linear_s_stage2 = gr.Slider(label="Guidance Start", minimum=0.,
|
527 |
maximum=1., value=0., step=0.05)
|
528 |
with gr.Column():
|
529 |
-
diff_dtype = gr.Radio([["fp32 (precision)", "fp32"], ["fp16 (medium)", "fp16"], ["bf16 (speed)", "bf16"]], label="Diffusion Data Type", value="
|
530 |
interactive=True)
|
531 |
with gr.Column():
|
532 |
ae_dtype = gr.Radio([["fp32 (precision)", "fp32"], ["bf16 (speed)", "bf16"]], label="Auto-Encoder Data Type", value="fp32",
|
533 |
interactive=True)
|
534 |
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
|
535 |
-
seed = gr.Slider(label="Seed", minimum=0, maximum=
|
536 |
with gr.Group():
|
537 |
param_setting = gr.Radio(["Quality", "Fidelity"], interactive=True, label="Presetting", value="Quality")
|
538 |
restart_button = gr.Button(value="Apply presetting")
|
|
|
16 |
from SUPIR.util import HWC3, upscale_image, fix_resize, convert_dtype, create_SUPIR_model, load_QF_ckpt
|
17 |
from huggingface_hub import hf_hub_download
|
18 |
|
19 |
+
max_64_bit_int = 2**63 - 1
|
20 |
+
|
21 |
hf_hub_download(repo_id="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", filename="open_clip_pytorch_model.bin", local_dir="laion_CLIP-ViT-bigG-14-laion2B-39B-b160k")
|
22 |
hf_hub_download(repo_id="camenduru/SUPIR", filename="sd_xl_base_1.0_0.9vae.safetensors", local_dir="yushan777_SUPIR")
|
23 |
hf_hub_download(repo_id="camenduru/SUPIR", filename="SUPIR-v0F.ckpt", local_dir="yushan777_SUPIR")
|
|
|
59 |
|
60 |
def update_seed(is_randomize_seed, seed):
|
61 |
if is_randomize_seed:
|
62 |
+
return random.randint(0, max_64_bit_int)
|
63 |
return seed
|
64 |
|
65 |
def check(input_image):
|
|
|
208 |
return [noisy_image, denoise_image], [denoise_image], None, None
|
209 |
|
210 |
denoise_image = HWC3(np.array(denoise_image))
|
211 |
+
denoise_image = upscale_image(denoise_image, upscale, unit_resolution=32, min_size=min_size)
|
212 |
|
213 |
# Allocation
|
214 |
if allocation == 1:
|
|
|
334 |
elif model_select == 'v0-F':
|
335 |
model.load_state_dict(ckpt_F, strict=False)
|
336 |
model.current_model = model_select
|
|
|
|
|
337 |
|
338 |
LQ = np.array(input_image) / 255.0
|
339 |
LQ = np.power(LQ, gamma_correction)
|
|
|
475 |
prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; you can write in any language", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
|
476 |
prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
477 |
upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8], ["x9", 9], ["x10", 10]], label="Upscale factor", info="Resolution x1 to x10", value=2, interactive=True)
|
478 |
+
allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min (discouraged)", 8], ["9 min (discouraged)", 9], ["10 min (discouraged)", 10]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=6, interactive=True)
|
479 |
output_format = gr.Radio([["As input", "input"], ["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="input", interactive=True)
|
480 |
|
481 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|
|
|
527 |
spt_linear_s_stage2 = gr.Slider(label="Guidance Start", minimum=0.,
|
528 |
maximum=1., value=0., step=0.05)
|
529 |
with gr.Column():
|
530 |
+
diff_dtype = gr.Radio([["fp32 (precision)", "fp32"], ["fp16 (medium)", "fp16"], ["bf16 (speed)", "bf16"]], label="Diffusion Data Type", value="fp32",
|
531 |
interactive=True)
|
532 |
with gr.Column():
|
533 |
ae_dtype = gr.Radio([["fp32 (precision)", "fp32"], ["bf16 (speed)", "bf16"]], label="Auto-Encoder Data Type", value="fp32",
|
534 |
interactive=True)
|
535 |
randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different")
|
536 |
+
seed = gr.Slider(label="Seed", minimum=0, maximum=max_64_bit_int, step=1, randomize=True)
|
537 |
with gr.Group():
|
538 |
param_setting = gr.Radio(["Quality", "Fidelity"], interactive=True, label="Presetting", value="Quality")
|
539 |
restart_button = gr.Button(value="Apply presetting")
|