Spaces:
Configuration error
Configuration error
File size: 715 Bytes
d3fbdbe acf52ac d3fbdbe acf52ac d3fbdbe 1fef0be d634edc d3fbdbe acf52ac d3fbdbe acf52ac eb756d1 d3fbdbe 1fef0be d3fbdbe b686d76 62805d5 d3fbdbe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
from PIL import Image
import torch
from openpose import get_openpose, init as init_openpose
from adapter_model import MODEL
from img2txt import derive_caption,init as init_img2txt
init_openpose()
init_img2txt()
ip_model = MODEL("pose")
def generate(img_human: Image, img_clothes: Image):
img_openpose = get_openpose(img_human)
prompt = derive_caption(img_clothes)
img_openpose_gen = ip_model.model.generate(
prompt=prompt,
pil_image=img_clothes,
image=img_openpose,
width=512,
height=768,
num_samples=1,
num_inference_steps=30,
seed=42 # 123
)[0]
torch.cuda.empty_cache()
return img_openpose_gen.convert("RGB")
|