Spaces:
Configuration error
Configuration error
File size: 1,765 Bytes
d3fbdbe 199974f d3fbdbe 5fd2e11 d3fbdbe bc307ff d3fbdbe b635b0d d3fbdbe dc69b4c fe03383 d3fbdbe dc69b4c d3fbdbe 676a18f d3fbdbe 5fd2e11 4250b44 d3fbdbe 5fd2e11 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
from PIL import Image
import os
import torch
from segmentation import get_cropped, get_blurred_mask, init_body as init_body_seg, init_face as init_face_seg
from img2txt import derive_caption, init as init_img2txt
from utils import alpha_composite_manuel
from adapter_model import MODEL
#init_face_seg()
init_body_seg()
init_img2txt()
ip_model = MODEL("inpaint")
def generate(img_openpose_gen: Image, img_human: Image, img_clothes: Image, segment_id: int):
cropped_clothes = get_cropped(img_openpose_gen, segment_id, False, False).resize(img_openpose_gen.size)
cropped_body = get_cropped(img_human, segment_id, True, False).resize(img_openpose_gen.size)
composite = Image.alpha_composite(cropped_body.convert('RGBA'),
cropped_clothes.convert('RGBA')
)
composite = alpha_composite_manuel(composite)
input_clothes = get_cropped(img_clothes, segment_id, False, False).resize(img_openpose_gen.size)
input_clothes = alpha_composite_manuel(input_clothes)
mask = get_blurred_mask(img_openpose_gen, segment_id)
prompt = derive_caption(img_clothes)
ip_gen = ip_model.model.generate(
prompt=prompt,
pil_image=input_clothes,
num_samples=1,
num_inference_steps=50,
seed=42,
image=composite,
mask_image=mask,
strength=0.8,
guidance_scale=7,
scale=0.8
)[0]
#cropped_head = get_cropped(img_human.resize(img_openpose_gen.size), 13, False, True)
#ip_gen_final = Image.alpha_composite(ip_gen.convert("RGBA"),
# cropped_head.convert("RGBA")
# )
torch.cuda.empty_cache()
return ip_gen
|