Fabrice-TIERCELIN commited on
Commit
40f1ce7
1 Parent(s): 3f8059f

Format as input

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -14,6 +14,7 @@ import math
14
  import time
15
  import random
16
  import spaces
 
17
  from huggingface_hub import hf_hub_download
18
 
19
  hf_hub_download(repo_id="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", filename="open_clip_pytorch_model.bin", local_dir="laion_CLIP-ViT-bigG-14-laion2B-39B-b160k")
@@ -86,7 +87,7 @@ def stage1_process(
86
  gr.Warning('Set this space to GPU config to make it work.')
87
  return None, None
88
  torch.cuda.set_device(SUPIR_device)
89
- LQ = HWC3(np.array(Image.open(input_image)))
90
  LQ = fix_resize(LQ, 512)
91
  # stage1
92
  LQ = np.array(LQ) / 255 * 2 - 1
@@ -170,7 +171,7 @@ def stage2_process(
170
  if noisy_image is None:
171
  output_format = "png"
172
  else:
173
- output_format = noisy_image.format
174
 
175
  if prompt is None:
176
  prompt = ""
@@ -328,7 +329,7 @@ def restore(
328
  elif model_select == 'v0-F':
329
  model.load_state_dict(ckpt_F, strict=False)
330
  model.current_model = model_select
331
- input_image = HWC3(np.array(input_image))
332
  input_image = upscale_image(input_image, upscale, unit_resolution=32,
333
  min_size=min_size)
334
 
@@ -379,7 +380,7 @@ def restore(
379
  print(information)
380
 
381
  # Only one image can be shown in the slider
382
- return [input_image] + [results[0]], gr.update(format = output_format, value = results), gr.update(value = information, visible = True)
383
 
384
  def load_and_reset(param_setting):
385
  print('load_and_reset ==>>')
@@ -474,7 +475,7 @@ with gr.Blocks() as interface:
474
  prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
475
  upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
476
  allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min", 8]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=6, interactive=True)
477
- output_format = gr.Radio([["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="png", interactive=True)
478
 
479
  with gr.Accordion("Pre-denoising (optional)", open=False):
480
  gamma_correction = gr.Slider(label="Gamma Correction", info = "lower=lighter, higher=darker", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
 
14
  import time
15
  import random
16
  import spaces
17
+ import re
18
  from huggingface_hub import hf_hub_download
19
 
20
  hf_hub_download(repo_id="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", filename="open_clip_pytorch_model.bin", local_dir="laion_CLIP-ViT-bigG-14-laion2B-39B-b160k")
 
87
  gr.Warning('Set this space to GPU config to make it work.')
88
  return None, None
89
  torch.cuda.set_device(SUPIR_device)
90
+ LQ = HWC3(Image.open(input_image))
91
  LQ = fix_resize(LQ, 512)
92
  # stage1
93
  LQ = np.array(LQ) / 255 * 2 - 1
 
171
  if noisy_image is None:
172
  output_format = "png"
173
  else:
174
+ output_format = re.sub(r"^.*\.([^\.]+)$", "\1", noisy_image)
175
 
176
  if prompt is None:
177
  prompt = ""
 
329
  elif model_select == 'v0-F':
330
  model.load_state_dict(ckpt_F, strict=False)
331
  model.current_model = model_select
332
+ input_image = HWC3(input_image)
333
  input_image = upscale_image(input_image, upscale, unit_resolution=32,
334
  min_size=min_size)
335
 
 
380
  print(information)
381
 
382
  # Only one image can be shown in the slider
383
+ return [input_image] + [results[0]], gr.update(label="Downloadable results in *." + output_format + " format", format = output_format, value = results), gr.update(value = information, visible = True)
384
 
385
  def load_and_reset(param_setting):
386
  print('load_and_reset ==>>')
 
475
  prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
476
  upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
477
  allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min", 8]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=6, interactive=True)
478
+ output_format = gr.Radio([["As input", "input"], ["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="png", interactive=True)
479
 
480
  with gr.Accordion("Pre-denoising (optional)", open=False):
481
  gamma_correction = gr.Slider(label="Gamma Correction", info = "lower=lighter, higher=darker", minimum=0.1, maximum=2.0, value=1.0, step=0.1)