import os import gradio as gr from gradio_imageslider import ImageSlider import argparse from SUPIR.util import HWC3, upscale_image, fix_resize, convert_dtype import numpy as np import torch from SUPIR.util import create_SUPIR_model, load_QF_ckpt from PIL import Image from llava.llava_agent import LLavaAgent from CKPT_PTH import LLAVA_MODEL_PATH import einops import copy import time import random import spaces from huggingface_hub import hf_hub_download hf_hub_download(repo_id="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", filename="open_clip_pytorch_model.bin", local_dir="laion_CLIP-ViT-bigG-14-laion2B-39B-b160k") hf_hub_download(repo_id="camenduru/SUPIR", filename="sd_xl_base_1.0_0.9vae.safetensors", local_dir="yushan777_SUPIR") hf_hub_download(repo_id="camenduru/SUPIR", filename="SUPIR-v0F.ckpt", local_dir="yushan777_SUPIR") hf_hub_download(repo_id="camenduru/SUPIR", filename="SUPIR-v0Q.ckpt", local_dir="yushan777_SUPIR") hf_hub_download(repo_id="RunDiffusion/Juggernaut-XL-Lightning", filename="Juggernaut_RunDiffusionPhoto2_Lightning_4Steps.safetensors", local_dir="RunDiffusion_Juggernaut-XL-Lightning") parser = argparse.ArgumentParser() parser.add_argument("--opt", type=str, default='options/SUPIR_v0.yaml') parser.add_argument("--ip", type=str, default='127.0.0.1') parser.add_argument("--port", type=int, default='6688') parser.add_argument("--no_llava", action='store_true', default=True)#False parser.add_argument("--use_image_slider", action='store_true', default=True)#False parser.add_argument("--log_history", action='store_true', default=False) parser.add_argument("--loading_half_params", action='store_true', default=False)#False parser.add_argument("--use_tile_vae", action='store_true', default=True)#False parser.add_argument("--encoder_tile_size", type=int, default=512) parser.add_argument("--decoder_tile_size", type=int, default=64) parser.add_argument("--load_8bit_llava", action='store_true', default=False) args = parser.parse_args() use_llava = not args.no_llava if torch.cuda.device_count() > 0: if torch.cuda.device_count() >= 2: SUPIR_device = 'cuda:0' LLaVA_device = 'cuda:1' elif torch.cuda.device_count() == 1: SUPIR_device = 'cuda:0' LLaVA_device = 'cuda:0' else: SUPIR_device = 'cpu' LLaVA_device = 'cpu' # load SUPIR model, default_setting = create_SUPIR_model(args.opt, SUPIR_sign='Q', load_default_setting=True) if args.loading_half_params: model = model.half() if args.use_tile_vae: model.init_tile_vae(encoder_tile_size=args.encoder_tile_size, decoder_tile_size=args.decoder_tile_size) model = model.to(SUPIR_device) model.first_stage_model.denoise_encoder_s1 = copy.deepcopy(model.first_stage_model.denoise_encoder) model.current_model = 'v0-Q' ckpt_Q, ckpt_F = load_QF_ckpt(args.opt) # load LLaVA if use_llava: llava_agent = LLavaAgent(LLAVA_MODEL_PATH, device=LLaVA_device, load_8bit=args.load_8bit_llava, load_4bit=False) else: llava_agent = None def update_seed(is_randomize_seed, seed): if is_randomize_seed: return random.randint(0, 2147483647) return seed def check(input_image): if input_image is None: raise gr.Error("Please provide an image to restore.") def reset_feedback(): return 3, '' @spaces.GPU(duration=240) def stage1_process(input_image, gamma_correction): print('Start stage1_process') if torch.cuda.device_count() == 0: gr.Warning('Set this space to GPU config to make it work.') return None, None torch.cuda.set_device(SUPIR_device) LQ = HWC3(input_image) LQ = fix_resize(LQ, 512) # stage1 LQ = np.array(LQ) / 255 * 2 - 1 LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :] LQ = model.batchify_denoise(LQ, is_stage1=True) LQ = (LQ[0].permute(1, 2, 0) * 127.5 + 127.5).cpu().numpy().round().clip(0, 255).astype(np.uint8) # gamma correction LQ = LQ / 255.0 LQ = np.power(LQ, gamma_correction) LQ *= 255.0 LQ = LQ.round().clip(0, 255).astype(np.uint8) print('End stage1_process') return LQ, gr.update(visible = True) @spaces.GPU(duration=240) def llave_process(input_image, temperature, top_p, qs=None): print('Start llave_process') if torch.cuda.device_count() == 0: gr.Warning('Set this space to GPU config to make it work.') return 'Set this space to GPU config to make it work.' torch.cuda.set_device(LLaVA_device) if use_llava: LQ = HWC3(input_image) LQ = Image.fromarray(LQ.astype('uint8')) captions = llava_agent.gen_image_caption([LQ], temperature=temperature, top_p=top_p, qs=qs) else: captions = ['LLaVA is not available. Please add text manually.'] print('End llave_process') return captions[0] @spaces.GPU(duration=240) def stage2_process( noisy_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select ): start = time.time() print('Start stage2_process') if torch.cuda.device_count() == 0: gr.Warning('Set this space to GPU config to make it work.') return None, None, None input_image = noisy_image if denoise_image is None else denoise_image torch.cuda.set_device(SUPIR_device) event_id = str(time.time_ns()) event_dict = {'event_id': event_id, 'localtime': time.ctime(), 'prompt': prompt, 'a_prompt': a_prompt, 'n_prompt': n_prompt, 'num_samples': num_samples, 'upscale': upscale, 'edm_steps': edm_steps, 's_stage1': s_stage1, 's_stage2': s_stage2, 's_cfg': s_cfg, 'seed': seed, 's_churn': s_churn, 's_noise': s_noise, 'color_fix_type': color_fix_type, 'diff_dtype': diff_dtype, 'ae_dtype': ae_dtype, 'gamma_correction': gamma_correction, 'linear_CFG': linear_CFG, 'linear_s_stage2': linear_s_stage2, 'spt_linear_CFG': spt_linear_CFG, 'spt_linear_s_stage2': spt_linear_s_stage2, 'model_select': model_select} if model_select != model.current_model: print('load ' + model_select) if model_select == 'v0-Q': model.load_state_dict(ckpt_Q, strict=False) elif model_select == 'v0-F': model.load_state_dict(ckpt_F, strict=False) model.current_model = model_select input_image = HWC3(input_image) input_image = upscale_image(input_image, upscale, unit_resolution=32, min_size=1024) LQ = np.array(input_image) / 255.0 LQ = np.power(LQ, gamma_correction) LQ *= 255.0 LQ = LQ.round().clip(0, 255).astype(np.uint8) LQ = LQ / 255 * 2 - 1 LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :] if use_llava: captions = [prompt] else: captions = [''] model.ae_dtype = convert_dtype(ae_dtype) model.model.dtype = convert_dtype(diff_dtype) samples = model.batchify_sample(LQ, captions, num_steps=edm_steps, restoration_scale=s_stage1, s_churn=s_churn, s_noise=s_noise, cfg_scale=s_cfg, control_scale=s_stage2, seed=seed, num_samples=num_samples, p_p=a_prompt, n_p=n_prompt, color_fix_type=color_fix_type, use_linear_CFG=linear_CFG, use_linear_control_scale=linear_s_stage2, cfg_scale_start=spt_linear_CFG, control_scale_start=spt_linear_s_stage2) x_samples = (einops.rearrange(samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().round().clip( 0, 255).astype(np.uint8) results = [x_samples[i] for i in range(num_samples)] if args.log_history: os.makedirs(f'./history/{event_id[:5]}/{event_id[5:]}', exist_ok=True) with open(f'./history/{event_id[:5]}/{event_id[5:]}/logs.txt', 'w') as f: f.write(str(event_dict)) f.close() Image.fromarray(input_image).save(f'./history/{event_id[:5]}/{event_id[5:]}/LQ.png') for i, result in enumerate(results): Image.fromarray(result).save(f'./history/{event_id[:5]}/{event_id[5:]}/HQ_{i}.png') print('End stage2_process') end = time.time() secondes = int(end - start) minutes = secondes // 60 secondes = secondes - (minutes * 60) hours = minutes // 60 minutes = minutes - (hours * 60) information = ("Restart the process to get another result. " if randomize_seed else "") + "The image(s) has(ve) been generated in " + ((str(hours) + " h, ") if hours != 0 else "") + ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + str(secondes) + " sec." return [input_image] + results, [input_image] + results, gr.update(value = information, visible = True), event_id def load_and_reset(param_setting): print('Start load_and_reset') if torch.cuda.device_count() == 0: gr.Warning('Set this space to GPU config to make it work.') return None, None, None, None, None, None, None, None, None, None, None, None, None, None edm_steps = default_setting.edm_steps s_stage2 = 1.0 s_stage1 = -1.0 s_churn = 5 s_noise = 1.003 a_prompt = 'Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - ' \ 'realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore ' \ 'detailing, hyper sharpness, perfect without deformations.' n_prompt = 'painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, ' \ '3D render, unreal engine, blurring, dirty, messy, worst quality, low quality, frames, watermark, ' \ 'signature, jpeg artifacts, deformed, lowres, over-smooth' color_fix_type = 'Wavelet' spt_linear_s_stage2 = 0.0 linear_s_stage2 = False linear_CFG = True if param_setting == "Quality": s_cfg = default_setting.s_cfg_Quality spt_linear_CFG = default_setting.spt_linear_CFG_Quality model_select = "v0-Q" elif param_setting == "Fidelity": s_cfg = default_setting.s_cfg_Fidelity spt_linear_CFG = default_setting.spt_linear_CFG_Fidelity model_select = "v0-F" else: raise NotImplementedError gr.Info('The parameters are reset.') print('End load_and_reset') return edm_steps, s_cfg, s_stage2, s_stage1, s_churn, s_noise, a_prompt, n_prompt, color_fix_type, linear_CFG, \ linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select def submit_feedback(event_id, fb_score, fb_text): if args.log_history: with open(f'./history/{event_id[:5]}/{event_id[5:]}/logs.txt', 'r') as f: event_dict = eval(f.read()) f.close() event_dict['feedback'] = {'score': fb_score, 'text': fb_text} with open(f'./history/{event_id[:5]}/{event_id[5:]}/logs.txt', 'w') as f: f.write(str(event_dict)) f.close() return 'Submit successfully, thank you for your comments!' else: return 'Submit failed, the server is not set to log history.' title_html = """
SUPIR is a practicing model scaling for photo-realistic image restoration. It is still a research project under tested and is not yet a stable commercial product. LlaVa is not integrated in this demo. The content added by SUPIR is imagination, not real-world information. The aim of SUPIR is the beauty and the illustration.
⚠️To use SUPIR, Duplicate this space and set a GPU with 30 GB VRAM. You can't use SUPIR directly here because this space runs on a CPU, which is not enough for SUPIR. This is a template space. Please provide feedback if you have issues.
""") gr.HTML(title_html) with gr.Group(): input_image = gr.Image(label="Input", show_label=True, type="numpy", height=600, elem_id="image-input") prompt = gr.Textbox(label="Image description for LlaVa", value="", placeholder="A person, walking, in a town, Summer, photorealistic", lines=3, visible=False) upscale = gr.Radio([1, 2, 3, 4, 5, 6, 7, 8], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True) a_prompt = gr.Textbox(label="Image description", info="Help the AI to understand what the image represents", value='Cinematic, High Contrast, highly detailed, taken using a Canon EOS R ' 'camera, hyper detailed photo - realistic maximum detail, 32k, Color ' 'Grading, ultra HD, extreme meticulous detailing, skin pore detailing, ' 'hyper sharpness, perfect without deformations.', lines=3) a_prompt_hint = gr.HTML("You can use a LlaVa space to auto-generate the description of your image.") with gr.Accordion("Pre-denoising (optional)", open=False): gamma_correction = gr.Slider(label="Gamma Correction", minimum=0.1, maximum=2.0, value=1.0, step=0.1) denoise_button = gr.Button(value="Pre-denoise") denoise_image = gr.Image(label="Denoised image", show_label=True, type="numpy", height=600, elem_id="image-s1") denoise_information = gr.HTML(value="If present, the denoised image will be used for the restoration instead of the input image.", visible=False) with gr.Accordion("LLaVA options", open=False, visible=False): temperature = gr.Slider(label="Temperature", info = "lower=Always similar, higher=More creative", minimum=0., maximum=1.0, value=0.2, step=0.1) top_p = gr.Slider(label="Top P", info = "Percent of tokens shortlisted", minimum=0., maximum=1.0, value=0.7, step=0.1) qs = gr.Textbox(label="Question", info="Ask LLaVa what description you want", value="Describe the image and its style in a very detailed manner. The image is a realistic photography, not an art painting.", lines=3) with gr.Accordion("Advanced options", open=False): n_prompt = gr.Textbox(label="Anti image description", info="Disambiguate by listing what the image does NOT represent", value='painting, oil painting, illustration, drawing, art, sketch, anime, ' 'cartoon, CG Style, 3D render, unreal engine, blurring, bokeh, ugly, dirty, messy, ' 'worst quality, low quality, frames, watermark, signature, jpeg artifacts, ' 'deformed, lowres, over-smooth', lines=3) edm_steps = gr.Slider(label="Steps", info="lower=faster, higher=more details", minimum=1, maximum=200, value=default_setting.edm_steps if torch.cuda.device_count() > 0 else 1, step=1) num_samples = gr.Slider(label="Num Samples", info="Number of generated results; I discourage to increase because the process is limited to 4 min", minimum=1, maximum=4 if not args.use_image_slider else 1 , value=1, step=1) with gr.Row(): with gr.Column(): model_select = gr.Radio(["v0-Q", "v0-F"], label="Model Selection", info="Q=Quality, F=Fidelity", value="v0-Q", interactive=True) with gr.Column(): color_fix_type = gr.Radio(["None", "AdaIn", "Wavelet"], label="Color-Fix Type", info="AdaIn=Adaptive Instance Normalization, Wavelet=For JPEG artifacts", value="Wavelet", interactive=True) s_cfg = gr.Slider(label="Text Guidance Scale", info="lower=follow the image, higher=follow the prompt", minimum=1.0, maximum=15.0, value=default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.1) s_stage2 = gr.Slider(label="Restoring Guidance Strength", minimum=0., maximum=1., value=1., step=0.05) s_stage1 = gr.Slider(label="Pre-denoising Guidance Strength", minimum=-1.0, maximum=6.0, value=-1.0, step=1.0) s_churn = gr.Slider(label="S-Churn", minimum=0, maximum=40, value=5, step=1) s_noise = gr.Slider(label="S-Noise", minimum=1.0, maximum=1.1, value=1.003, step=0.001) with gr.Row(): with gr.Column(): linear_CFG = gr.Checkbox(label="Linear CFG", value=True) spt_linear_CFG = gr.Slider(label="CFG Start", minimum=1.0, maximum=9.0, value=default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.5) with gr.Column(): linear_s_stage2 = gr.Checkbox(label="Linear Restoring Guidance", value=False) spt_linear_s_stage2 = gr.Slider(label="Guidance Start", minimum=0., maximum=1., value=0., step=0.05) with gr.Column(): diff_dtype = gr.Radio(['fp32', 'fp16', 'bf16'], label="Diffusion Data Type", value="fp16", interactive=True) with gr.Column(): ae_dtype = gr.Radio(['fp32', 'bf16'], label="Auto-Encoder Data Type", value="bf16", interactive=True) randomize_seed = gr.Checkbox(label = "\U0001F3B2 Randomize seed", value = True, info = "If checked, result is always different") seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True) with gr.Group(): param_setting = gr.Radio(["Quality", "Fidelity"], interactive=True, label="Presetting", value="Quality") restart_button = gr.Button(value="Apply presetting") with gr.Group(): llave_button = gr.Button(value="Generate description by LlaVa (disabled)", visible=False) diffusion_button = gr.Button(value="🚀 Upscale/Restore", variant = "primary", elem_id="process_button") restore_information = gr.HTML(value="Restart the process to get another result.", visible=False) result_slider = ImageSlider(label='Output', show_label=True, elem_id="slider1") result_gallery = gr.Gallery(label='Output', show_label=True, elem_id="gallery1") with gr.Accordion("Feedback", open=True, visible=False): fb_score = gr.Slider(label="Feedback Score", minimum=1, maximum=5, value=3, step=1, interactive=True) fb_text = gr.Textbox(label="Feedback Text", value="", placeholder='Please enter your feedback here.') submit_button = gr.Button(value="Submit Feedback") with gr.Row(): gr.Markdown(claim_md) event_id = gr.Textbox(label="Event ID", value="", visible=False) denoise_button.click(fn = check, inputs = [ input_image ], outputs = [], queue = False, show_progress = False).success(fn = stage1_process, inputs = [ input_image, gamma_correction ], outputs=[ denoise_image, denoise_information ]) llave_button.click(fn = check, inputs = [ denoise_image ], outputs = [], queue = False, show_progress = False).success(fn = llave_process, inputs = [ denoise_image, temperature, top_p, qs ], outputs = [ prompt ]) diffusion_button.click(fn = update_seed, inputs = [ randomize_seed, seed ], outputs = [ seed ], queue = False, show_progress = False).then(fn = check, inputs = [ input_image ], outputs = [], queue = False, show_progress = False).success(fn = reset_feedback, inputs = [], outputs = [ fb_score, fb_text ], queue = False, show_progress = False).success(fn=stage2_process, inputs = [ input_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, upscale, edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select ], outputs = [ result_slider, result_gallery, restore_information, event_id ]) restart_button.click(fn = load_and_reset, inputs = [ param_setting ], outputs = [ edm_steps, s_cfg, s_stage2, s_stage1, s_churn, s_noise, a_prompt, n_prompt, color_fix_type, linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select ]) submit_button.click(fn = submit_feedback, inputs = [ event_id, fb_score, fb_text ], outputs = [ fb_text ]) interface.queue(10).launch()