Fabrice-TIERCELIN commited on
Commit
b64e08d
1 Parent(s): 3fc78c3
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -139,10 +139,14 @@ def stage2_process(*args, **kwargs):
139
  return restore_in_Xmin(*args, **kwargs)
140
  except Exception as e:
141
  # NO_GPU_MESSAGE_INQUEUE
142
- print("gradio.exceptions.Error: 'No GPU is currently available for you after 60s'")
143
- print('str(type(e)) ' + str(type(e))) # <class 'gradio.exceptions.Error'>
144
- print('str(e) ' + str(e)) # 'No GPU is currently available for you after 60s'
145
- if str(e) == "'No GPU is currently available for you after 60s'":
 
 
 
 
146
  print('Exception identified!!!')
147
  #if str(type(e)) == "<class 'gradio.exceptions.Error'>":
148
  #print('Exception of name ' + type(e).__name__)
@@ -538,7 +542,7 @@ with gr.Blocks() as interface:
538
  with gr.Group():
539
  prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; you can write in any language", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
540
  prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
541
- upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8], ["x9", 9], ["x10", 10], ["x12", 12]], label="Upscale factor", info="Resolution x1 to x10", value=2, interactive=True)
542
  output_format = gr.Radio([["As input", "input"], ["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="input", interactive=True)
543
  allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min (discouraged)", 7], ["8 min (discouraged)", 8], ["9 min (discouraged)", 9], ["10 min (discouraged)", 10]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=5, interactive=True)
544
 
@@ -573,7 +577,7 @@ with gr.Blocks() as interface:
573
  model_select = gr.Radio([["💃 Quality (v0-Q)", "v0-Q"], ["🎯 Fidelity (v0-F)", "v0-F"]], label="Model Selection", info="Pretrained model", value="v0-Q",
574
  interactive=True)
575
  with gr.Column():
576
- color_fix_type = gr.Radio([["None", "None"], ["AdaIn (improve as a photo)", "AdaIn"], ["Wavelet (for JPEG artifacts)", "Wavelet"]], label="Color-Fix Type", info="AdaIn=Improve following a style, Wavelet=For JPEG artifacts", value="Wavelet",
577
  interactive=True)
578
  s_cfg = gr.Slider(label="Text Guidance Scale", info="lower=follow the image, higher=follow the prompt", minimum=1.0, maximum=15.0,
579
  value=default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.1)
 
139
  return restore_in_Xmin(*args, **kwargs)
140
  except Exception as e:
141
  # NO_GPU_MESSAGE_INQUEUE
142
+ print("gradio.exceptions.Error 'No GPU is currently available for you after 60s'")
143
+ print('str(type(e)): ' + str(type(e))) # <class 'gradio.exceptions.Error'>
144
+ print('str(e): ' + str(e)) # You have exceeded your GPU quota...
145
+ try:
146
+ print('e.message: ' + e.message) # No GPU is currently available for you after 60s
147
+ except Exception as e2:
148
+ print('Failure')
149
+ if str(e).startswith("No GPU is currently available for you after 60s"):
150
  print('Exception identified!!!')
151
  #if str(type(e)) == "<class 'gradio.exceptions.Error'>":
152
  #print('Exception of name ' + type(e).__name__)
 
542
  with gr.Group():
543
  prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; you can write in any language", value="", placeholder="A 33 years old man, walking, in the street, Santiago, morning, Summer, photorealistic", lines=3)
544
  prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
545
+ upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8], ["x9", 9], ["x10", 10]], label="Upscale factor", info="Resolution x1 to x10", value=2, interactive=True)
546
  output_format = gr.Radio([["As input", "input"], ["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="input", interactive=True)
547
  allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min (discouraged)", 7], ["8 min (discouraged)", 8], ["9 min (discouraged)", 9], ["10 min (discouraged)", 10]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=5, interactive=True)
548
 
 
577
  model_select = gr.Radio([["💃 Quality (v0-Q)", "v0-Q"], ["🎯 Fidelity (v0-F)", "v0-F"]], label="Model Selection", info="Pretrained model", value="v0-Q",
578
  interactive=True)
579
  with gr.Column():
580
+ color_fix_type = gr.Radio([["None", "None"], ["AdaIn (improve as a photo)", "AdaIn"], ["Wavelet (for JPEG artifacts)", "Wavelet"]], label="Color-Fix Type", info="AdaIn=Improve following a style, Wavelet=For JPEG artifacts", value="AdaIn",
581
  interactive=True)
582
  s_cfg = gr.Slider(label="Text Guidance Scale", info="lower=follow the image, higher=follow the prompt", minimum=1.0, maximum=15.0,
583
  value=default_setting.s_cfg_Quality if torch.cuda.device_count() > 0 else 1.0, step=0.1)