Fabrice-TIERCELIN
commited on
Commit
•
22d45a3
1
Parent(s):
3e69253
Pre-downscale factor
Browse files- gradio_demo.py +16 -10
gradio_demo.py
CHANGED
@@ -125,6 +125,7 @@ def stage2_process(
|
|
125 |
a_prompt,
|
126 |
n_prompt,
|
127 |
num_samples,
|
|
|
128 |
upscale,
|
129 |
edm_steps,
|
130 |
s_stage1,
|
@@ -152,6 +153,9 @@ def stage2_process(
|
|
152 |
gr.Warning('Set this space to GPU config to make it work.')
|
153 |
return None, None, None
|
154 |
input_image = noisy_image if denoise_image is None else denoise_image
|
|
|
|
|
|
|
155 |
torch.cuda.set_device(SUPIR_device)
|
156 |
event_id = str(time.time_ns())
|
157 |
event_dict = {'event_id': event_id, 'localtime': time.ctime(), 'prompt': prompt, 'a_prompt': a_prompt,
|
@@ -320,18 +324,18 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
320 |
gr.HTML(title_html)
|
321 |
|
322 |
input_image = gr.Image(label="Input", show_label=True, type="numpy", height=600, elem_id="image-input")
|
323 |
-
prompt = gr.Textbox(label="Image description for LlaVa", value="", placeholder="A person, walking, in a town, Summer, photorealistic", lines=3, visible=False)
|
324 |
-
upscale = gr.Radio([1, 2, 3, 4, 5, 6, 7, 8], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
325 |
-
a_prompt = gr.Textbox(label="Image description (optional)",
|
326 |
-
info="Help the AI understand what the image represents; describe as much as possible",
|
327 |
-
value='Cinematic, High Contrast, highly detailed, taken using a Canon EOS R '
|
328 |
-
'camera, hyper detailed photo - realistic maximum detail, 32k, Color '
|
329 |
-
'Grading, ultra HD, extreme meticulous detailing, skin pore detailing, '
|
330 |
-
'hyper sharpness, perfect without deformations.',
|
331 |
-
lines=3)
|
332 |
with gr.Group():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
a_prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
334 |
-
|
335 |
|
336 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|
337 |
gamma_correction = gr.Slider(label="Gamma Correction", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
|
@@ -355,6 +359,7 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
355 |
edm_steps = gr.Slider(label="Steps", info="lower=faster, higher=more details", minimum=1, maximum=200, value=default_setting.edm_steps if torch.cuda.device_count() > 0 else 1, step=1)
|
356 |
num_samples = gr.Slider(label="Num Samples", info="Number of generated results", minimum=1, maximum=4 if not args.use_image_slider else 1
|
357 |
, value=1, step=1)
|
|
|
358 |
with gr.Row():
|
359 |
with gr.Column():
|
360 |
model_select = gr.Radio(["v0-Q", "v0-F"], label="Model Selection", info="Q=Quality, F=Fidelity", value="v0-Q",
|
@@ -444,6 +449,7 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
444 |
a_prompt,
|
445 |
n_prompt,
|
446 |
num_samples,
|
|
|
447 |
upscale,
|
448 |
edm_steps,
|
449 |
s_stage1,
|
|
|
125 |
a_prompt,
|
126 |
n_prompt,
|
127 |
num_samples,
|
128 |
+
downscale,
|
129 |
upscale,
|
130 |
edm_steps,
|
131 |
s_stage1,
|
|
|
153 |
gr.Warning('Set this space to GPU config to make it work.')
|
154 |
return None, None, None
|
155 |
input_image = noisy_image if denoise_image is None else denoise_image
|
156 |
+
if 1 < downscale:
|
157 |
+
input_height, input_width, input_channel = np.array(input_image).shape
|
158 |
+
input_image = input_image.resize((input_width // downscale, input_height // downscale), Image.LANCZOS)
|
159 |
torch.cuda.set_device(SUPIR_device)
|
160 |
event_id = str(time.time_ns())
|
161 |
event_dict = {'event_id': event_id, 'localtime': time.ctime(), 'prompt': prompt, 'a_prompt': a_prompt,
|
|
|
324 |
gr.HTML(title_html)
|
325 |
|
326 |
input_image = gr.Image(label="Input", show_label=True, type="numpy", height=600, elem_id="image-input")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
327 |
with gr.Group():
|
328 |
+
prompt = gr.Textbox(label="Image description for LlaVa", value="", placeholder="A person, walking, in a town, Summer, photorealistic", lines=3, visible=False)
|
329 |
+
upscale = gr.Radio([1, 2, 3, 4, 5, 6, 7, 8], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
330 |
+
a_prompt = gr.Textbox(label="Image description",
|
331 |
+
info="Help the AI understand what the image represents; describe as much as possible",
|
332 |
+
value='Cinematic, High Contrast, highly detailed, taken using a Canon EOS R '
|
333 |
+
'camera, hyper detailed photo - realistic maximum detail, 32k, Color '
|
334 |
+
'Grading, ultra HD, extreme meticulous detailing, skin pore detailing, '
|
335 |
+
'hyper sharpness, perfect without deformations.',
|
336 |
+
lines=3)
|
337 |
a_prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
338 |
+
output_format = gr.Radio(["png", "webp", "jpeg", "gif", "bmp"], label="Image format for result", info="File extention", value="png", interactive=True)
|
339 |
|
340 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|
341 |
gamma_correction = gr.Slider(label="Gamma Correction", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
|
|
|
359 |
edm_steps = gr.Slider(label="Steps", info="lower=faster, higher=more details", minimum=1, maximum=200, value=default_setting.edm_steps if torch.cuda.device_count() > 0 else 1, step=1)
|
360 |
num_samples = gr.Slider(label="Num Samples", info="Number of generated results", minimum=1, maximum=4 if not args.use_image_slider else 1
|
361 |
, value=1, step=1)
|
362 |
+
downscale = gr.Radio([1, 2, 3, 4, 5, 6, 7, 8], label="Pre-downscale factor", info="Reducing blurred image reduce the process time", value=1, interactive=True)
|
363 |
with gr.Row():
|
364 |
with gr.Column():
|
365 |
model_select = gr.Radio(["v0-Q", "v0-F"], label="Model Selection", info="Q=Quality, F=Fidelity", value="v0-Q",
|
|
|
449 |
a_prompt,
|
450 |
n_prompt,
|
451 |
num_samples,
|
452 |
+
downscale,
|
453 |
upscale,
|
454 |
edm_steps,
|
455 |
s_stage1,
|