Update app.py
Browse files
app.py
CHANGED
@@ -15,10 +15,29 @@ import gradio as gr
|
|
15 |
import os
|
16 |
import random
|
17 |
import gc
|
18 |
-
# ๋ฉ๋ชจ๋ฆฌ ๊ด๋ฆฌ ์ค์ ์ถ๊ฐ
|
19 |
-
import torch.backends.cuda
|
20 |
-
torch.backends.cuda.max_split_size_mb = 128 # ๋ฉ๋ชจ๋ฆฌ ๋ถํ ํฌ๊ธฐ ์ ํ
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
# ์ ์ญ ๋ณ์๋ก ๋ชจ๋ธ๋ค์ ์ ์ธ
|
24 |
fashion_pipe = None
|
@@ -30,6 +49,12 @@ pt_model = None
|
|
30 |
vt_inference = None
|
31 |
pt_inference = None
|
32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
# ์ด๊ธฐํ ํจ์
|
34 |
def initialize_models():
|
35 |
global fashion_pipe
|
@@ -41,20 +66,10 @@ def initialize_models():
|
|
41 |
)
|
42 |
fashion_pipe.to(device)
|
43 |
|
44 |
-
#
|
45 |
initialize_models()
|
46 |
|
47 |
-
|
48 |
-
torch.cuda.empty_cache()
|
49 |
-
gc.collect()
|
50 |
-
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'
|
51 |
-
torch.backends.cudnn.benchmark = True
|
52 |
-
torch.backends.cuda.matmul.allow_tf32 = True
|
53 |
-
def clear_memory():
|
54 |
-
if torch.cuda.is_available():
|
55 |
-
torch.cuda.empty_cache()
|
56 |
-
torch.cuda.synchronize()
|
57 |
-
gc.collect()
|
58 |
|
59 |
# ๋ชจ๋ธ ์ฌ์ฉ ํ ๋ฉ๋ชจ๋ฆฌ ํด์
|
60 |
def unload_models():
|
@@ -66,11 +81,6 @@ def unload_models():
|
|
66 |
vt_model = None
|
67 |
pt_model = None
|
68 |
clear_memory()
|
69 |
-
# ์์ ์ ์
|
70 |
-
MAX_SEED = 2**32 - 1
|
71 |
-
BASE_MODEL = "black-forest-labs/FLUX.1-dev"
|
72 |
-
MODEL_LORA_REPO = "Motas/Flux_Fashion_Photography_Style"
|
73 |
-
CLOTHES_LORA_REPO = "prithivMLmods/Canopus-Clothing-Flux-LoRA"
|
74 |
|
75 |
# Hugging Face ํ ํฐ ์ค์
|
76 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
|
|
15 |
import os
|
16 |
import random
|
17 |
import gc
|
|
|
|
|
|
|
18 |
|
19 |
+
# ์์ ์ ์ (๋งจ ์์ผ๋ก ์ด๋)
|
20 |
+
MAX_SEED = 2**32 - 1
|
21 |
+
BASE_MODEL = "black-forest-labs/FLUX.1-dev"
|
22 |
+
MODEL_LORA_REPO = "Motas/Flux_Fashion_Photography_Style"
|
23 |
+
CLOTHES_LORA_REPO = "prithivMLmods/Canopus-Clothing-Flux-LoRA"
|
24 |
+
|
25 |
+
# ๋ฉ๋ชจ๋ฆฌ ๊ด๋ฆฌ ์ค์
|
26 |
+
torch.cuda.empty_cache()
|
27 |
+
gc.collect()
|
28 |
+
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:128'
|
29 |
+
torch.backends.cudnn.benchmark = True
|
30 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
31 |
+
torch.backends.cuda.max_split_size_mb = 128
|
32 |
+
|
33 |
+
# Hugging Face ํ ํฐ ์ค์
|
34 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
35 |
+
if HF_TOKEN is None:
|
36 |
+
raise ValueError("Please set the HF_TOKEN environment variable")
|
37 |
+
login(token=HF_TOKEN)
|
38 |
+
|
39 |
+
# CUDA ์ค์
|
40 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
41 |
|
42 |
# ์ ์ญ ๋ณ์๋ก ๋ชจ๋ธ๋ค์ ์ ์ธ
|
43 |
fashion_pipe = None
|
|
|
49 |
vt_inference = None
|
50 |
pt_inference = None
|
51 |
|
52 |
+
def clear_memory():
|
53 |
+
if torch.cuda.is_available():
|
54 |
+
torch.cuda.empty_cache()
|
55 |
+
torch.cuda.synchronize()
|
56 |
+
gc.collect()
|
57 |
+
|
58 |
# ์ด๊ธฐํ ํจ์
|
59 |
def initialize_models():
|
60 |
global fashion_pipe
|
|
|
66 |
)
|
67 |
fashion_pipe.to(device)
|
68 |
|
69 |
+
# ์ฌ๊ธฐ์ initialize_models ํธ์ถ
|
70 |
initialize_models()
|
71 |
|
72 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
# ๋ชจ๋ธ ์ฌ์ฉ ํ ๋ฉ๋ชจ๋ฆฌ ํด์
|
75 |
def unload_models():
|
|
|
81 |
vt_model = None
|
82 |
pt_model = None
|
83 |
clear_memory()
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
# Hugging Face ํ ํฐ ์ค์
|
86 |
HF_TOKEN = os.getenv("HF_TOKEN")
|