Spaces:
Configuration error
Configuration error
Update ip_adapter_inpainting.py
Browse files- ip_adapter_inpainting.py +11 -14
ip_adapter_inpainting.py
CHANGED
@@ -5,10 +5,10 @@ import torch
|
|
5 |
|
6 |
from segmentation import get_cropped, get_blurred_mask, init_body as init_body_seg, init_face as init_face_seg
|
7 |
from img2txt import derive_caption, init as init_img2txt
|
8 |
-
from utils import
|
9 |
from adapter_model import MODEL
|
10 |
|
11 |
-
|
12 |
init_body_seg()
|
13 |
init_img2txt()
|
14 |
|
@@ -16,23 +16,20 @@ ip_model = MODEL("inpaint")
|
|
16 |
|
17 |
|
18 |
def generate(img_openpose_gen: Image, img_human: Image, img_clothes: Image, segment_id: int):
|
19 |
-
cropped_clothes = get_cropped(img_openpose_gen, segment_id, False, False).resize(
|
20 |
-
cropped_body = get_cropped(img_human, segment_id, True, False).resize(
|
21 |
|
22 |
composite = Image.alpha_composite(cropped_body.convert('RGBA'),
|
23 |
cropped_clothes.convert('RGBA')
|
24 |
)
|
25 |
composite = alpha_composite_manuel(composite)
|
26 |
|
27 |
-
|
28 |
-
input_clothes = alpha_composite_manuel(input_clothes)
|
29 |
-
|
30 |
-
mask = get_blurred_mask(img_openpose_gen, segment_id)
|
31 |
prompt = derive_caption(img_clothes)
|
32 |
|
33 |
ip_gen = ip_model.model.generate(
|
34 |
prompt=prompt,
|
35 |
-
pil_image=
|
36 |
num_samples=1,
|
37 |
num_inference_steps=50,
|
38 |
seed=42,
|
@@ -43,9 +40,9 @@ def generate(img_openpose_gen: Image, img_human: Image, img_clothes: Image, segm
|
|
43 |
scale=0.8
|
44 |
)[0]
|
45 |
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
torch.cuda.empty_cache()
|
51 |
-
return
|
|
|
5 |
|
6 |
from segmentation import get_cropped, get_blurred_mask, init_body as init_body_seg, init_face as init_face_seg
|
7 |
from img2txt import derive_caption, init as init_img2txt
|
8 |
+
from utils import alpha_composite
|
9 |
from adapter_model import MODEL
|
10 |
|
11 |
+
init_face_seg()
|
12 |
init_body_seg()
|
13 |
init_img2txt()
|
14 |
|
|
|
16 |
|
17 |
|
18 |
def generate(img_openpose_gen: Image, img_human: Image, img_clothes: Image, segment_id: int):
|
19 |
+
cropped_clothes = get_cropped(img_openpose_gen, segment_id, False, False).resize((512, 768))
|
20 |
+
cropped_body = get_cropped(img_human, segment_id, True, False).resize((512, 768))
|
21 |
|
22 |
composite = Image.alpha_composite(cropped_body.convert('RGBA'),
|
23 |
cropped_clothes.convert('RGBA')
|
24 |
)
|
25 |
composite = alpha_composite_manuel(composite)
|
26 |
|
27 |
+
mask = get_blurred_mask(composite, segment_id, False, False)
|
|
|
|
|
|
|
28 |
prompt = derive_caption(img_clothes)
|
29 |
|
30 |
ip_gen = ip_model.model.generate(
|
31 |
prompt=prompt,
|
32 |
+
pil_image=img_clothes,
|
33 |
num_samples=1,
|
34 |
num_inference_steps=50,
|
35 |
seed=42,
|
|
|
40 |
scale=0.8
|
41 |
)[0]
|
42 |
|
43 |
+
cropped_head = get_cropped(composite, 13, False, True)
|
44 |
+
ip_gen_final = Image.alpha_composite(ip_gen.convert("RGBA"),
|
45 |
+
cropped_head.convert("RGBA")
|
46 |
+
)
|
47 |
torch.cuda.empty_cache()
|
48 |
+
return ip_gen_final
|