Spaces:
Paused
Paused
Fabrice-TIERCELIN
commited on
Commit
•
2c1c611
1
Parent(s):
0ee5b24
Select model without GPU
Browse files
app.py
CHANGED
@@ -210,6 +210,19 @@ def restore_in_Xmin(
|
|
210 |
|
211 |
denoise_image = HWC3(np.array(denoise_image))
|
212 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
213 |
# Allocation
|
214 |
if allocation == 1:
|
215 |
return restore_in_1min(
|
@@ -325,16 +338,6 @@ def restore(
|
|
325 |
start = time.time()
|
326 |
print('restore ==>>')
|
327 |
|
328 |
-
torch.cuda.set_device(SUPIR_device)
|
329 |
-
|
330 |
-
if model_select != model.current_model:
|
331 |
-
print('load ' + model_select)
|
332 |
-
if model_select == 'v0-Q':
|
333 |
-
model.load_state_dict(ckpt_Q, strict=False)
|
334 |
-
elif model_select == 'v0-F':
|
335 |
-
model.load_state_dict(ckpt_F, strict=False)
|
336 |
-
model.current_model = model_select
|
337 |
-
|
338 |
input_image = upscale_image(input_image, upscale, unit_resolution=32, min_size=min_size)
|
339 |
LQ = np.array(input_image) / 255.0
|
340 |
LQ = np.power(LQ, gamma_correction)
|
@@ -344,9 +347,6 @@ def restore(
|
|
344 |
LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
|
345 |
captions = ['']
|
346 |
|
347 |
-
model.ae_dtype = convert_dtype(ae_dtype)
|
348 |
-
model.model.dtype = convert_dtype(diff_dtype)
|
349 |
-
|
350 |
samples = model.batchify_sample(LQ, captions, num_steps=edm_steps, restoration_scale=s_stage1, s_churn=s_churn,
|
351 |
s_noise=s_noise, cfg_scale=s_cfg, control_scale=s_stage2, seed=seed,
|
352 |
num_samples=num_samples, p_p=a_prompt, n_p=n_prompt, color_fix_type=color_fix_type,
|
@@ -379,7 +379,7 @@ def restore(
|
|
379 |
" pixels high, so a resolution of " + f'{result_width * result_height:,}' + " pixels."
|
380 |
print(information)
|
381 |
unique_name = str(uuid.uuid4()) + ".png"
|
382 |
-
|
383 |
print(unique_name)
|
384 |
|
385 |
# Only one image can be shown in the slider
|
|
|
210 |
|
211 |
denoise_image = HWC3(np.array(denoise_image))
|
212 |
|
213 |
+
torch.cuda.set_device(SUPIR_device)
|
214 |
+
|
215 |
+
if model_select != model.current_model:
|
216 |
+
print('load ' + model_select)
|
217 |
+
if model_select == 'v0-Q':
|
218 |
+
model.load_state_dict(ckpt_Q, strict=False)
|
219 |
+
elif model_select == 'v0-F':
|
220 |
+
model.load_state_dict(ckpt_F, strict=False)
|
221 |
+
model.current_model = model_select
|
222 |
+
|
223 |
+
model.ae_dtype = convert_dtype(ae_dtype)
|
224 |
+
model.model.dtype = convert_dtype(diff_dtype)
|
225 |
+
|
226 |
# Allocation
|
227 |
if allocation == 1:
|
228 |
return restore_in_1min(
|
|
|
338 |
start = time.time()
|
339 |
print('restore ==>>')
|
340 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
341 |
input_image = upscale_image(input_image, upscale, unit_resolution=32, min_size=min_size)
|
342 |
LQ = np.array(input_image) / 255.0
|
343 |
LQ = np.power(LQ, gamma_correction)
|
|
|
347 |
LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
|
348 |
captions = ['']
|
349 |
|
|
|
|
|
|
|
350 |
samples = model.batchify_sample(LQ, captions, num_steps=edm_steps, restoration_scale=s_stage1, s_churn=s_churn,
|
351 |
s_noise=s_noise, cfg_scale=s_cfg, control_scale=s_stage2, seed=seed,
|
352 |
num_samples=num_samples, p_p=a_prompt, n_p=n_prompt, color_fix_type=color_fix_type,
|
|
|
379 |
" pixels high, so a resolution of " + f'{result_width * result_height:,}' + " pixels."
|
380 |
print(information)
|
381 |
unique_name = str(uuid.uuid4()) + ".png"
|
382 |
+
results[0].save(unique_name)
|
383 |
print(unique_name)
|
384 |
|
385 |
# Only one image can be shown in the slider
|