jon-pascal
commited on
Commit
•
45daab4
1
Parent(s):
932d159
Update app.py
Browse filesfixing runtime error
app.py
CHANGED
@@ -83,7 +83,6 @@ def update_seed(is_randomize_seed, seed):
|
|
83 |
|
84 |
def reset():
|
85 |
return [
|
86 |
-
None,
|
87 |
None,
|
88 |
"Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
|
89 |
"painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
|
@@ -102,28 +101,81 @@ def reset():
|
|
102 |
"Wavelet",
|
103 |
"fp32",
|
104 |
"fp32",
|
105 |
-
1.0,
|
106 |
True,
|
107 |
False,
|
108 |
default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0,
|
109 |
0.,
|
110 |
"v0-Q",
|
111 |
-
|
112 |
]
|
113 |
|
114 |
def check(input_image):
|
115 |
if input_image is None:
|
116 |
raise gr.Error("Please provide an image to restore.")
|
117 |
|
118 |
-
def stage2_process(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
try:
|
120 |
-
return restore_in_Xmin(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
except Exception as e:
|
122 |
print(f"Exception occurred: {str(e)}")
|
123 |
raise e
|
124 |
|
125 |
def restore_in_Xmin(
|
126 |
-
|
127 |
prompt,
|
128 |
a_prompt,
|
129 |
n_prompt,
|
@@ -151,7 +203,7 @@ def restore_in_Xmin(
|
|
151 |
allocation
|
152 |
):
|
153 |
print("Starting image restoration process...")
|
154 |
-
input_format = re.sub(r"^.*\.([^\.]+)$", r"\1",
|
155 |
|
156 |
if input_format.lower() not in ['png', 'webp', 'jpg', 'jpeg', 'gif', 'bmp', 'heic']:
|
157 |
gr.Warning('Invalid image format. Please use a supported image format.')
|
@@ -174,7 +226,7 @@ def restore_in_Xmin(
|
|
174 |
a_prompt = prompt + a_prompt
|
175 |
print("Final prompt: " + str(a_prompt))
|
176 |
|
177 |
-
denoise_image = np.array(Image.open(
|
178 |
|
179 |
if 1 < downscale:
|
180 |
input_height, input_width, input_channel = denoise_image.shape
|
@@ -184,7 +236,7 @@ def restore_in_Xmin(
|
|
184 |
|
185 |
if torch.cuda.device_count() == 0:
|
186 |
gr.Warning('Set this space to GPU config to make it work.')
|
187 |
-
return [
|
188 |
|
189 |
if model_select != model.current_model:
|
190 |
print('Loading model: ' + model_select)
|
@@ -211,9 +263,9 @@ def restore_in_Xmin(
|
|
211 |
10: restore_in_10min,
|
212 |
}
|
213 |
|
214 |
-
restore_function = allocation_functions.get(allocation,
|
215 |
return restore_function(
|
216 |
-
|
217 |
edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type,
|
218 |
diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG,
|
219 |
spt_linear_s_stage2, model_select, output_format, allocation
|
@@ -260,8 +312,8 @@ def restore_in_10min(*args, **kwargs):
|
|
260 |
return restore_on_gpu(*args, **kwargs)
|
261 |
|
262 |
def restore_on_gpu(
|
263 |
-
|
264 |
-
|
265 |
prompt,
|
266 |
a_prompt,
|
267 |
n_prompt,
|
@@ -295,6 +347,7 @@ def restore_on_gpu(
|
|
295 |
torch.cuda.set_device(SUPIR_device)
|
296 |
|
297 |
with torch.no_grad():
|
|
|
298 |
input_image = upscale_image(input_image, upscale, unit_resolution=32, min_size=min_size)
|
299 |
LQ = np.array(input_image) / 255.0
|
300 |
LQ = np.power(LQ, gamma_correction)
|
@@ -331,7 +384,7 @@ def restore_on_gpu(
|
|
331 |
"The image has been enhanced successfully."
|
332 |
|
333 |
# Only one image can be shown in the slider
|
334 |
-
return [
|
335 |
|
336 |
def load_and_reset(param_setting):
|
337 |
print('Resetting parameters...')
|
@@ -418,6 +471,8 @@ with gr.Blocks() as interface:
|
|
418 |
interactive=True
|
419 |
)
|
420 |
|
|
|
|
|
421 |
with gr.Accordion("Advanced options", open=False):
|
422 |
a_prompt = gr.Textbox(
|
423 |
label="Additional image description",
|
@@ -655,7 +710,7 @@ with gr.Blocks() as interface:
|
|
655 |
color_fix_type,
|
656 |
diff_dtype,
|
657 |
ae_dtype,
|
658 |
-
|
659 |
linear_CFG,
|
660 |
linear_s_stage2,
|
661 |
spt_linear_CFG,
|
@@ -724,7 +779,7 @@ with gr.Blocks() as interface:
|
|
724 |
color_fix_type,
|
725 |
diff_dtype,
|
726 |
ae_dtype,
|
727 |
-
|
728 |
linear_CFG,
|
729 |
linear_s_stage2,
|
730 |
spt_linear_CFG,
|
|
|
83 |
|
84 |
def reset():
|
85 |
return [
|
|
|
86 |
None,
|
87 |
"Cinematic, High Contrast, highly detailed, taken using a Canon EOS R camera, hyper detailed photo - realistic maximum detail, 32k, Color Grading, ultra HD, extreme meticulous detailing, skin pore detailing, hyper sharpness, perfect without deformations.",
|
88 |
"painting, oil painting, illustration, drawing, art, sketch, anime, cartoon, CG Style, 3D render, unreal engine, blurring, aliasing, unsharp, weird textures, ugly, dirty, messy, worst quality, low quality, frames, watermark, signature, jpeg artifacts, deformed, lowres, over-smooth",
|
|
|
101 |
"Wavelet",
|
102 |
"fp32",
|
103 |
"fp32",
|
|
|
104 |
True,
|
105 |
False,
|
106 |
default_setting.spt_linear_CFG_Quality if torch.cuda.device_count() > 0 else 1.0,
|
107 |
0.,
|
108 |
"v0-Q",
|
109 |
+
4
|
110 |
]
|
111 |
|
112 |
def check(input_image):
|
113 |
if input_image is None:
|
114 |
raise gr.Error("Please provide an image to restore.")
|
115 |
|
116 |
+
def stage2_process(
|
117 |
+
input_image,
|
118 |
+
prompt,
|
119 |
+
a_prompt,
|
120 |
+
n_prompt,
|
121 |
+
num_samples,
|
122 |
+
min_size,
|
123 |
+
downscale,
|
124 |
+
upscale,
|
125 |
+
edm_steps,
|
126 |
+
s_stage1,
|
127 |
+
s_stage2,
|
128 |
+
s_cfg,
|
129 |
+
randomize_seed,
|
130 |
+
seed,
|
131 |
+
s_churn,
|
132 |
+
s_noise,
|
133 |
+
color_fix_type,
|
134 |
+
diff_dtype,
|
135 |
+
ae_dtype,
|
136 |
+
gamma_correction,
|
137 |
+
linear_CFG,
|
138 |
+
linear_s_stage2,
|
139 |
+
spt_linear_CFG,
|
140 |
+
spt_linear_s_stage2,
|
141 |
+
model_select,
|
142 |
+
allocation
|
143 |
+
):
|
144 |
try:
|
145 |
+
return restore_in_Xmin(
|
146 |
+
input_image,
|
147 |
+
prompt,
|
148 |
+
a_prompt,
|
149 |
+
n_prompt,
|
150 |
+
num_samples,
|
151 |
+
min_size,
|
152 |
+
downscale,
|
153 |
+
upscale,
|
154 |
+
edm_steps,
|
155 |
+
s_stage1,
|
156 |
+
s_stage2,
|
157 |
+
s_cfg,
|
158 |
+
randomize_seed,
|
159 |
+
seed,
|
160 |
+
s_churn,
|
161 |
+
s_noise,
|
162 |
+
color_fix_type,
|
163 |
+
diff_dtype,
|
164 |
+
ae_dtype,
|
165 |
+
gamma_correction,
|
166 |
+
linear_CFG,
|
167 |
+
linear_s_stage2,
|
168 |
+
spt_linear_CFG,
|
169 |
+
spt_linear_s_stage2,
|
170 |
+
model_select,
|
171 |
+
allocation
|
172 |
+
)
|
173 |
except Exception as e:
|
174 |
print(f"Exception occurred: {str(e)}")
|
175 |
raise e
|
176 |
|
177 |
def restore_in_Xmin(
|
178 |
+
input_image,
|
179 |
prompt,
|
180 |
a_prompt,
|
181 |
n_prompt,
|
|
|
203 |
allocation
|
204 |
):
|
205 |
print("Starting image restoration process...")
|
206 |
+
input_format = re.sub(r"^.*\.([^\.]+)$", r"\1", input_image)
|
207 |
|
208 |
if input_format.lower() not in ['png', 'webp', 'jpg', 'jpeg', 'gif', 'bmp', 'heic']:
|
209 |
gr.Warning('Invalid image format. Please use a supported image format.')
|
|
|
226 |
a_prompt = prompt + a_prompt
|
227 |
print("Final prompt: " + str(a_prompt))
|
228 |
|
229 |
+
denoise_image = np.array(Image.open(input_image))
|
230 |
|
231 |
if 1 < downscale:
|
232 |
input_height, input_width, input_channel = denoise_image.shape
|
|
|
236 |
|
237 |
if torch.cuda.device_count() == 0:
|
238 |
gr.Warning('Set this space to GPU config to make it work.')
|
239 |
+
return [input_image, denoise_image], gr.update(label="Downloadable results", format=output_format, value=[denoise_image]), None, gr.update(visible=True)
|
240 |
|
241 |
if model_select != model.current_model:
|
242 |
print('Loading model: ' + model_select)
|
|
|
263 |
10: restore_in_10min,
|
264 |
}
|
265 |
|
266 |
+
restore_function = allocation_functions.get(allocation, restore_in_4min)
|
267 |
return restore_function(
|
268 |
+
input_image, denoise_image, prompt, a_prompt, n_prompt, num_samples, min_size, downscale, upscale,
|
269 |
edm_steps, s_stage1, s_stage2, s_cfg, randomize_seed, seed, s_churn, s_noise, color_fix_type,
|
270 |
diff_dtype, ae_dtype, gamma_correction, linear_CFG, linear_s_stage2, spt_linear_CFG,
|
271 |
spt_linear_s_stage2, model_select, output_format, allocation
|
|
|
312 |
return restore_on_gpu(*args, **kwargs)
|
313 |
|
314 |
def restore_on_gpu(
|
315 |
+
input_image_path,
|
316 |
+
denoise_image,
|
317 |
prompt,
|
318 |
a_prompt,
|
319 |
n_prompt,
|
|
|
347 |
torch.cuda.set_device(SUPIR_device)
|
348 |
|
349 |
with torch.no_grad():
|
350 |
+
input_image = Image.open(input_image_path)
|
351 |
input_image = upscale_image(input_image, upscale, unit_resolution=32, min_size=min_size)
|
352 |
LQ = np.array(input_image) / 255.0
|
353 |
LQ = np.power(LQ, gamma_correction)
|
|
|
384 |
"The image has been enhanced successfully."
|
385 |
|
386 |
# Only one image can be shown in the slider
|
387 |
+
return [input_image_path] + [results[0]], gr.update(label="Downloadable results", format=output_format, value=results), gr.update(value=information, visible=True), gr.update(visible=True)
|
388 |
|
389 |
def load_and_reset(param_setting):
|
390 |
print('Resetting parameters...')
|
|
|
471 |
interactive=True
|
472 |
)
|
473 |
|
474 |
+
gamma_correction = gr.Number(value=1.0, visible=False) # Hidden component with default value 1.0
|
475 |
+
|
476 |
with gr.Accordion("Advanced options", open=False):
|
477 |
a_prompt = gr.Textbox(
|
478 |
label="Additional image description",
|
|
|
710 |
color_fix_type,
|
711 |
diff_dtype,
|
712 |
ae_dtype,
|
713 |
+
gamma_correction, # Use the hidden gamma_correction component
|
714 |
linear_CFG,
|
715 |
linear_s_stage2,
|
716 |
spt_linear_CFG,
|
|
|
779 |
color_fix_type,
|
780 |
diff_dtype,
|
781 |
ae_dtype,
|
782 |
+
gamma_correction, # Use the hidden gamma_correction component
|
783 |
linear_CFG,
|
784 |
linear_s_stage2,
|
785 |
spt_linear_CFG,
|