Spaces:
Paused
Paused
Fabrice-TIERCELIN
commited on
Commit
•
da3b33e
1
Parent(s):
1f33c65
Remove Llava
Browse files
app.py
CHANGED
@@ -1,13 +1,8 @@
|
|
1 |
import os
|
2 |
-
|
3 |
import gradio as gr
|
4 |
-
from gradio_imageslider import ImageSlider
|
5 |
import argparse
|
6 |
-
from SUPIR.util import HWC3, upscale_image, fix_resize, convert_dtype
|
7 |
import numpy as np
|
8 |
import torch
|
9 |
-
from SUPIR.util import create_SUPIR_model, load_QF_ckpt
|
10 |
-
from PIL import Image
|
11 |
import einops
|
12 |
import copy
|
13 |
import math
|
@@ -15,6 +10,10 @@ import time
|
|
15 |
import random
|
16 |
import spaces
|
17 |
import re
|
|
|
|
|
|
|
|
|
18 |
from huggingface_hub import hf_hub_download
|
19 |
|
20 |
hf_hub_download(repo_id="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", filename="open_clip_pytorch_model.bin", local_dir="laion_CLIP-ViT-bigG-14-laion2B-39B-b160k")
|
@@ -36,20 +35,11 @@ parser.add_argument("--encoder_tile_size", type=int, default=512)
|
|
36 |
parser.add_argument("--decoder_tile_size", type=int, default=64)
|
37 |
parser.add_argument("--load_8bit_llava", action='store_true', default=False)
|
38 |
args = parser.parse_args()
|
39 |
-
use_llava = not args.no_llava
|
40 |
|
41 |
if torch.cuda.device_count() > 0:
|
42 |
-
|
43 |
-
SUPIR_device = 'cuda:0'
|
44 |
-
LLaVA_device = 'cuda:1'
|
45 |
-
elif torch.cuda.device_count() == 1:
|
46 |
-
SUPIR_device = 'cuda:0'
|
47 |
-
LLaVA_device = 'cuda:0'
|
48 |
-
else:
|
49 |
-
SUPIR_device = 'cpu'
|
50 |
-
LLaVA_device = 'cpu'
|
51 |
|
52 |
-
#
|
53 |
model, default_setting = create_SUPIR_model(args.opt, SUPIR_sign='Q', load_default_setting=True)
|
54 |
if args.loading_half_params:
|
55 |
model = model.half()
|
@@ -59,7 +49,6 @@ if torch.cuda.device_count() > 0:
|
|
59 |
model.first_stage_model.denoise_encoder_s1 = copy.deepcopy(model.first_stage_model.denoise_encoder)
|
60 |
model.current_model = 'v0-Q'
|
61 |
ckpt_Q, ckpt_F = load_QF_ckpt(args.opt)
|
62 |
-
llava_agent = None
|
63 |
|
64 |
def check_upload(input_image):
|
65 |
if input_image is None:
|
@@ -349,10 +338,7 @@ def restore(
|
|
349 |
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
350 |
LQ = LQ / 255 * 2 - 1
|
351 |
LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
|
352 |
-
|
353 |
-
captions = [prompt]
|
354 |
-
else:
|
355 |
-
captions = ['']
|
356 |
|
357 |
model.ae_dtype = convert_dtype(ae_dtype)
|
358 |
model.model.dtype = convert_dtype(diff_dtype)
|
@@ -390,7 +376,7 @@ def restore(
|
|
390 |
print(information)
|
391 |
|
392 |
# Only one image can be shown in the slider
|
393 |
-
return [
|
394 |
|
395 |
def load_and_reset(param_setting):
|
396 |
print('load_and_reset ==>>')
|
|
|
1 |
import os
|
|
|
2 |
import gradio as gr
|
|
|
3 |
import argparse
|
|
|
4 |
import numpy as np
|
5 |
import torch
|
|
|
|
|
6 |
import einops
|
7 |
import copy
|
8 |
import math
|
|
|
10 |
import random
|
11 |
import spaces
|
12 |
import re
|
13 |
+
|
14 |
+
from gradio_imageslider import ImageSlider
|
15 |
+
from PIL import Image
|
16 |
+
from SUPIR.util import HWC3, upscale_image, fix_resize, convert_dtype, create_SUPIR_model, load_QF_ckpt
|
17 |
from huggingface_hub import hf_hub_download
|
18 |
|
19 |
hf_hub_download(repo_id="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", filename="open_clip_pytorch_model.bin", local_dir="laion_CLIP-ViT-bigG-14-laion2B-39B-b160k")
|
|
|
35 |
parser.add_argument("--decoder_tile_size", type=int, default=64)
|
36 |
parser.add_argument("--load_8bit_llava", action='store_true', default=False)
|
37 |
args = parser.parse_args()
|
|
|
38 |
|
39 |
if torch.cuda.device_count() > 0:
|
40 |
+
SUPIR_device = 'cuda:0'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
+
# Load SUPIR
|
43 |
model, default_setting = create_SUPIR_model(args.opt, SUPIR_sign='Q', load_default_setting=True)
|
44 |
if args.loading_half_params:
|
45 |
model = model.half()
|
|
|
49 |
model.first_stage_model.denoise_encoder_s1 = copy.deepcopy(model.first_stage_model.denoise_encoder)
|
50 |
model.current_model = 'v0-Q'
|
51 |
ckpt_Q, ckpt_F = load_QF_ckpt(args.opt)
|
|
|
52 |
|
53 |
def check_upload(input_image):
|
54 |
if input_image is None:
|
|
|
338 |
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
339 |
LQ = LQ / 255 * 2 - 1
|
340 |
LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
|
341 |
+
captions = ['']
|
|
|
|
|
|
|
342 |
|
343 |
model.ae_dtype = convert_dtype(ae_dtype)
|
344 |
model.model.dtype = convert_dtype(diff_dtype)
|
|
|
376 |
print(information)
|
377 |
|
378 |
# Only one image can be shown in the slider
|
379 |
+
return [noisy_image] + [results[0]], gr.update(label="Downloadable results in *." + output_format + " format", format = output_format, value = results), gr.update(value = information, visible = True)
|
380 |
|
381 |
def load_and_reset(param_setting):
|
382 |
print('load_and_reset ==>>')
|