Fabrice-TIERCELIN
commited on
Commit
•
39d2e5b
1
Parent(s):
75a802d
@spaces.GPU(duration=120)
Browse files- gradio_demo.py +4 -4
gradio_demo.py
CHANGED
@@ -67,7 +67,7 @@ if torch.cuda.device_count() > 0:
|
|
67 |
else:
|
68 |
llava_agent = None
|
69 |
|
70 |
-
@spaces.GPU
|
71 |
def stage1_process(input_image, gamma_correction):
|
72 |
if torch.cuda.device_count() == 0:
|
73 |
gr.Warning('Set this space to GPU config to make it work.')
|
@@ -87,7 +87,7 @@ def stage1_process(input_image, gamma_correction):
|
|
87 |
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
88 |
return LQ
|
89 |
|
90 |
-
@spaces.GPU
|
91 |
def llave_process(input_image, temperature, top_p, qs=None):
|
92 |
if torch.cuda.device_count() == 0:
|
93 |
gr.Warning('Set this space to GPU config to make it work.')
|
@@ -101,7 +101,7 @@ def llave_process(input_image, temperature, top_p, qs=None):
|
|
101 |
captions = ['LLaVA is not available. Please add text manually.']
|
102 |
return captions[0]
|
103 |
|
104 |
-
@spaces.GPU
|
105 |
def stage2_process(input_image, prompt, a_prompt, n_prompt, num_samples, upscale, edm_steps, s_stage1, s_stage2,
|
106 |
s_cfg, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction,
|
107 |
linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select):
|
@@ -165,7 +165,7 @@ def stage2_process(input_image, prompt, a_prompt, n_prompt, num_samples, upscale
|
|
165 |
Image.fromarray(result).save(f'./history/{event_id[:5]}/{event_id[5:]}/HQ_{i}.png')
|
166 |
return [input_image] + results, event_id, 3, ''
|
167 |
|
168 |
-
@spaces.GPU
|
169 |
def load_and_reset(param_setting):
|
170 |
if torch.cuda.device_count() == 0:
|
171 |
gr.Warning('Set this space to GPU config to make it work.')
|
|
|
67 |
else:
|
68 |
llava_agent = None
|
69 |
|
70 |
+
@spaces.GPU(duration=120)
|
71 |
def stage1_process(input_image, gamma_correction):
|
72 |
if torch.cuda.device_count() == 0:
|
73 |
gr.Warning('Set this space to GPU config to make it work.')
|
|
|
87 |
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
88 |
return LQ
|
89 |
|
90 |
+
@spaces.GPU(duration=120)
|
91 |
def llave_process(input_image, temperature, top_p, qs=None):
|
92 |
if torch.cuda.device_count() == 0:
|
93 |
gr.Warning('Set this space to GPU config to make it work.')
|
|
|
101 |
captions = ['LLaVA is not available. Please add text manually.']
|
102 |
return captions[0]
|
103 |
|
104 |
+
@spaces.GPU(duration=120)
|
105 |
def stage2_process(input_image, prompt, a_prompt, n_prompt, num_samples, upscale, edm_steps, s_stage1, s_stage2,
|
106 |
s_cfg, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction,
|
107 |
linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select):
|
|
|
165 |
Image.fromarray(result).save(f'./history/{event_id[:5]}/{event_id[5:]}/HQ_{i}.png')
|
166 |
return [input_image] + results, event_id, 3, ''
|
167 |
|
168 |
+
@spaces.GPU(duration=120)
|
169 |
def load_and_reset(param_setting):
|
170 |
if torch.cuda.device_count() == 0:
|
171 |
gr.Warning('Set this space to GPU config to make it work.')
|