Fabrice-TIERCELIN commited on
Commit
9593509
1 Parent(s): a780a8d

log_information

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -204,12 +204,12 @@ def restore_in_Xmin(
204
  input_height, input_width, input_channel = denoise_image.shape
205
  denoise_image = denoise_image.resize((input_width // downscale, input_height // downscale), Image.LANCZOS)
206
 
 
 
207
  if torch.cuda.device_count() == 0:
208
  gr.Warning('Set this space to GPU config to make it work.')
209
  return [noisy_image, denoise_image], [denoise_image], None, None
210
 
211
- denoise_image = HWC3(np.array(denoise_image))
212
-
213
  if model_select != model.current_model:
214
  print('load ' + model_select)
215
  if model_select == 'v0-Q':
@@ -218,6 +218,9 @@ def restore_in_Xmin(
218
  model.load_state_dict(ckpt_F, strict=False)
219
  model.current_model = model_select
220
 
 
 
 
221
  # Allocation
222
  if allocation == 1:
223
  return restore_in_1min(
@@ -335,9 +338,6 @@ def restore(
335
 
336
  torch.cuda.set_device(SUPIR_device)
337
 
338
- model.ae_dtype = convert_dtype(ae_dtype)
339
- model.model.dtype = convert_dtype(diff_dtype)
340
-
341
  input_image = upscale_image(input_image, upscale, unit_resolution=32, min_size=min_size)
342
  LQ = np.array(input_image) / 255.0
343
  LQ = np.power(LQ, gamma_correction)
@@ -776,7 +776,7 @@ with gr.Blocks() as interface:
776
  result_slider,
777
  result_gallery,
778
  restore_information
779
- ]).success(fn = log_information, inputs = [
780
  result_gallery
781
  ], outputs = [], queue = False, show_progress = False)
782
 
 
204
  input_height, input_width, input_channel = denoise_image.shape
205
  denoise_image = denoise_image.resize((input_width // downscale, input_height // downscale), Image.LANCZOS)
206
 
207
+ denoise_image = HWC3(np.array(denoise_image))
208
+
209
  if torch.cuda.device_count() == 0:
210
  gr.Warning('Set this space to GPU config to make it work.')
211
  return [noisy_image, denoise_image], [denoise_image], None, None
212
 
 
 
213
  if model_select != model.current_model:
214
  print('load ' + model_select)
215
  if model_select == 'v0-Q':
 
218
  model.load_state_dict(ckpt_F, strict=False)
219
  model.current_model = model_select
220
 
221
+ model.ae_dtype = convert_dtype(ae_dtype)
222
+ model.model.dtype = convert_dtype(diff_dtype)
223
+
224
  # Allocation
225
  if allocation == 1:
226
  return restore_in_1min(
 
338
 
339
  torch.cuda.set_device(SUPIR_device)
340
 
 
 
 
341
  input_image = upscale_image(input_image, upscale, unit_resolution=32, min_size=min_size)
342
  LQ = np.array(input_image) / 255.0
343
  LQ = np.power(LQ, gamma_correction)
 
776
  result_slider,
777
  result_gallery,
778
  restore_information
779
+ ]).then(fn = log_information, inputs = [
780
  result_gallery
781
  ], outputs = [], queue = False, show_progress = False)
782