Spaces:
Running
on
Zero
Running
on
Zero
docker fix
Browse files- .gitignore +3 -1
- Dockerfile.backend +1 -1
- app.py +3 -3
- docker-compose.yml +18 -0
- hf_utils.py +12 -0
- model.py +1 -1
- process_utils.py +18 -21
- requirements.txt +2 -0
.gitignore
CHANGED
@@ -2,4 +2,6 @@ models/
|
|
2 |
__pycache__/
|
3 |
venv/
|
4 |
output/
|
5 |
-
hf_gradio/
|
|
|
|
|
|
2 |
__pycache__/
|
3 |
venv/
|
4 |
output/
|
5 |
+
hf_gradio/
|
6 |
+
hf_cache/
|
7 |
+
wd14_tagger_model/
|
Dockerfile.backend
CHANGED
@@ -32,4 +32,4 @@ RUN pip install --no-dependencies transformers
|
|
32 |
|
33 |
EXPOSE 5000
|
34 |
|
35 |
-
CMD ["python", "app.py"]
|
|
|
32 |
|
33 |
EXPOSE 5000
|
34 |
|
35 |
+
CMD ["python", "app.py", "--use_gpu"]
|
app.py
CHANGED
@@ -185,9 +185,9 @@ def server_error(e):
|
|
185 |
|
186 |
if __name__ == '__main__':
|
187 |
parser = argparse.ArgumentParser(description='Server options.')
|
188 |
-
parser.add_argument('--
|
189 |
-
parser.add_argument('--use_gpu',
|
190 |
args = parser.parse_args()
|
191 |
|
192 |
-
initialize(
|
193 |
socketio.run(app, debug=True, host='0.0.0.0', port=5000)
|
|
|
185 |
|
186 |
if __name__ == '__main__':
|
187 |
parser = argparse.ArgumentParser(description='Server options.')
|
188 |
+
parser.add_argument('--use_local', action='store_true', help='Use local model')
|
189 |
+
parser.add_argument('--use_gpu', action='store_true', help='Set to True to use GPU but if not available, it will use CPU')
|
190 |
args = parser.parse_args()
|
191 |
|
192 |
+
initialize(args.use_local, args.use_gpu)
|
193 |
socketio.run(app, debug=True, host='0.0.0.0', port=5000)
|
docker-compose.yml
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
services:
|
2 |
+
image2body_backend:
|
3 |
+
build:
|
4 |
+
context: .
|
5 |
+
dockerfile: Dockerfile.backend
|
6 |
+
ports:
|
7 |
+
- "5000:5000"
|
8 |
+
volumes:
|
9 |
+
- .:/app
|
10 |
+
env_file:
|
11 |
+
- .env
|
12 |
+
deploy:
|
13 |
+
resources:
|
14 |
+
reservations:
|
15 |
+
devices:
|
16 |
+
- capabilities: [ "gpu" ]
|
17 |
+
count: 1
|
18 |
+
driver: nvidia
|
hf_utils.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from huggingface_hub import hf_hub_download
|
3 |
+
|
4 |
+
def download_file(filename, subfolder=None):
|
5 |
+
print(f'Downloading {filename} from Hugging Face Hub...')
|
6 |
+
return hf_hub_download(
|
7 |
+
repo_id=os.environ['REPO_ID'],
|
8 |
+
filename=filename,
|
9 |
+
subfolder=subfolder,
|
10 |
+
token=os.environ['HF_TOKEN'],
|
11 |
+
cache_dir=os.environ['CACHE_DIR']
|
12 |
+
)
|
model.py
CHANGED
@@ -2,7 +2,7 @@ import torch
|
|
2 |
import torch.nn as nn
|
3 |
import torch.nn.functional as F
|
4 |
import functools
|
5 |
-
from
|
6 |
|
7 |
class UnetGenerator(nn.Module):
|
8 |
"""Create a Unet-based generator"""
|
|
|
2 |
import torch.nn as nn
|
3 |
import torch.nn.functional as F
|
4 |
import functools
|
5 |
+
from hf_utils import download_file
|
6 |
|
7 |
class UnetGenerator(nn.Module):
|
8 |
"""Create a Unet-based generator"""
|
process_utils.py
CHANGED
@@ -10,30 +10,19 @@ import torch
|
|
10 |
from diffusers import StableDiffusionPipeline, StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler, AutoencoderKL
|
11 |
import gc
|
12 |
from peft import PeftModel
|
13 |
-
from huggingface_hub import hf_hub_download
|
14 |
from dotenv import load_dotenv
|
15 |
-
|
16 |
-
load_dotenv()
|
17 |
|
18 |
# グローバル変数
|
19 |
-
|
20 |
model = None
|
21 |
device = None
|
22 |
torch_dtype = None # torch.float16 if device == "cuda" else torch.float32
|
23 |
sotai_gen_pipe = None
|
24 |
refine_gen_pipe = None
|
25 |
|
26 |
-
def
|
27 |
-
|
28 |
-
repo_id=os.environ['REPO_ID'],
|
29 |
-
filename=filename,
|
30 |
-
subfolder=subfolder,
|
31 |
-
token=os.environ['HF_TOKEN'],
|
32 |
-
cache_dir=os.environ['CACHE_DIR']
|
33 |
-
)
|
34 |
-
|
35 |
-
def get_file_path(filename, subfolder=None):
|
36 |
-
if local_model:
|
37 |
return os.path.join(subfolder, filename)
|
38 |
else:
|
39 |
return download_file(filename, subfolder)
|
@@ -43,11 +32,15 @@ def ensure_rgb(image):
|
|
43 |
return image.convert('RGB')
|
44 |
return image
|
45 |
|
46 |
-
def initialize(
|
47 |
-
|
|
|
48 |
device = "cuda" if use_gpu and torch.cuda.is_available() else "cpu"
|
49 |
torch_dtype = torch.float16 if device == "cuda" else torch.float32
|
50 |
-
|
|
|
|
|
|
|
51 |
model = load_wd14_tagger_model()
|
52 |
sotai_gen_pipe = initialize_sotai_model()
|
53 |
refine_gen_pipe = initialize_refine_model()
|
@@ -62,6 +55,7 @@ def initialize_sotai_model():
|
|
62 |
sotai_sd_model_path = get_file_path(os.environ["sotai_sd_model_name"], subfolder=os.environ["sd_models_dir"])
|
63 |
controlnet_path1 = get_file_path(os.environ["controlnet_name1"], subfolder=os.environ["controlnet_dir2"])
|
64 |
controlnet_path2 = get_file_path(os.environ["controlnet_name2"], subfolder=os.environ["controlnet_dir1"])
|
|
|
65 |
|
66 |
# Load the Stable Diffusion model
|
67 |
sd_pipe = StableDiffusionPipeline.from_single_file(
|
@@ -156,7 +150,8 @@ def initialize_refine_model():
|
|
156 |
def get_wd_tags(images: list) -> list:
|
157 |
global model
|
158 |
if model is None:
|
159 |
-
|
|
|
160 |
preprocessed_images = [wd14_preprocess_image(img) for img in images]
|
161 |
preprocessed_images = np.array(preprocessed_images)
|
162 |
return generate_tags(preprocessed_images, os.environ["wd_model_name"], model)
|
@@ -207,7 +202,8 @@ def generate_sotai_image(input_image: Image.Image, output_width: int, output_hei
|
|
207 |
input_image = ensure_rgb(input_image)
|
208 |
global sotai_gen_pipe
|
209 |
if sotai_gen_pipe is None:
|
210 |
-
|
|
|
211 |
|
212 |
prompt = "anime pose, girl, (white background:1.5), (monochrome:1.5), full body, sketch, eyes, breasts, (slim legs, skinny legs:1.2)"
|
213 |
try:
|
@@ -250,7 +246,8 @@ def generate_refined_image(prompt: str, original_image: Image.Image, output_widt
|
|
250 |
original_image = ensure_rgb(original_image)
|
251 |
global refine_gen_pipe
|
252 |
if refine_gen_pipe is None:
|
253 |
-
|
|
|
254 |
|
255 |
try:
|
256 |
original_image_np = np.array(original_image)
|
|
|
10 |
from diffusers import StableDiffusionPipeline, StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler, AutoencoderKL
|
11 |
import gc
|
12 |
from peft import PeftModel
|
|
|
13 |
from dotenv import load_dotenv
|
14 |
+
from hf_utils import download_file
|
|
|
15 |
|
16 |
# グローバル変数
|
17 |
+
use_local = False
|
18 |
model = None
|
19 |
device = None
|
20 |
torch_dtype = None # torch.float16 if device == "cuda" else torch.float32
|
21 |
sotai_gen_pipe = None
|
22 |
refine_gen_pipe = None
|
23 |
|
24 |
+
def get_file_path(filename, subfolder):
|
25 |
+
if use_local:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
return os.path.join(subfolder, filename)
|
27 |
else:
|
28 |
return download_file(filename, subfolder)
|
|
|
32 |
return image.convert('RGB')
|
33 |
return image
|
34 |
|
35 |
+
def initialize(_use_local, use_gpu):
|
36 |
+
load_dotenv()
|
37 |
+
global model, sotai_gen_pipe, refine_gen_pipe, use_local, device, torch_dtype
|
38 |
device = "cuda" if use_gpu and torch.cuda.is_available() else "cpu"
|
39 |
torch_dtype = torch.float16 if device == "cuda" else torch.float32
|
40 |
+
use_local = _use_local
|
41 |
+
print('')
|
42 |
+
print(f"Device: {device}, Local model: {_use_local}")
|
43 |
+
print('')
|
44 |
model = load_wd14_tagger_model()
|
45 |
sotai_gen_pipe = initialize_sotai_model()
|
46 |
refine_gen_pipe = initialize_refine_model()
|
|
|
55 |
sotai_sd_model_path = get_file_path(os.environ["sotai_sd_model_name"], subfolder=os.environ["sd_models_dir"])
|
56 |
controlnet_path1 = get_file_path(os.environ["controlnet_name1"], subfolder=os.environ["controlnet_dir2"])
|
57 |
controlnet_path2 = get_file_path(os.environ["controlnet_name2"], subfolder=os.environ["controlnet_dir1"])
|
58 |
+
print(use_local, controlnet_path1)
|
59 |
|
60 |
# Load the Stable Diffusion model
|
61 |
sd_pipe = StableDiffusionPipeline.from_single_file(
|
|
|
150 |
def get_wd_tags(images: list) -> list:
|
151 |
global model
|
152 |
if model is None:
|
153 |
+
raise ValueError("Model is not initialized")
|
154 |
+
# initialize()
|
155 |
preprocessed_images = [wd14_preprocess_image(img) for img in images]
|
156 |
preprocessed_images = np.array(preprocessed_images)
|
157 |
return generate_tags(preprocessed_images, os.environ["wd_model_name"], model)
|
|
|
202 |
input_image = ensure_rgb(input_image)
|
203 |
global sotai_gen_pipe
|
204 |
if sotai_gen_pipe is None:
|
205 |
+
raise ValueError("Model is not initialized")
|
206 |
+
# initialize()
|
207 |
|
208 |
prompt = "anime pose, girl, (white background:1.5), (monochrome:1.5), full body, sketch, eyes, breasts, (slim legs, skinny legs:1.2)"
|
209 |
try:
|
|
|
246 |
original_image = ensure_rgb(original_image)
|
247 |
global refine_gen_pipe
|
248 |
if refine_gen_pipe is None:
|
249 |
+
raise ValueError("Model is not initialized")
|
250 |
+
# initialize()
|
251 |
|
252 |
try:
|
253 |
original_image_np = np.array(original_image)
|
requirements.txt
CHANGED
@@ -5,6 +5,7 @@ torchaudio==2.2.0
|
|
5 |
diffusers==0.29.1
|
6 |
Flask==3.0.3
|
7 |
Flask-Cors==4.0.0
|
|
|
8 |
gradio==4.36.1
|
9 |
huggingface_hub==0.23.2
|
10 |
kornia==0.7.1
|
@@ -17,3 +18,4 @@ transforms==0.2.1
|
|
17 |
tokenizers
|
18 |
pytorch_lightning
|
19 |
python-dotenv
|
|
|
|
5 |
diffusers==0.29.1
|
6 |
Flask==3.0.3
|
7 |
Flask-Cors==4.0.0
|
8 |
+
Flask-SocketIO==5.3.6
|
9 |
gradio==4.36.1
|
10 |
huggingface_hub==0.23.2
|
11 |
kornia==0.7.1
|
|
|
18 |
tokenizers
|
19 |
pytorch_lightning
|
20 |
python-dotenv
|
21 |
+
peft==0.11.1
|