File size: 1,092 Bytes
d3fbdbe
 
 
 
 
acf52ac
d3fbdbe
 
acf52ac
 
d3fbdbe
7755ca1
d3fbdbe
 
1fef0be
d634edc
d3fbdbe
acf52ac
 
d3fbdbe
acf52ac
7755ca1
eb756d1
d3fbdbe
1fef0be
 
d3fbdbe
 
7755ca1
62805d5
d3fbdbe
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from PIL import Image
import torch

from openpose import get_openpose, init as init_openpose
from adapter_model import MODEL
from img2txt import derive_caption,init as init_img2txt

init_openpose()
init_img2txt()

ip_model = MODEL("pose")
negative_prompt = "(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, mutated hands and fingers:1.4), (deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation"


def generate(img_human: Image, img_clothes: Image):

    img_openpose = get_openpose(img_human)
    prompt = derive_caption(img_clothes)
    
    img_openpose_gen = ip_model.model.generate(
        prompt=prompt,
        negative_prompt=negative_prompt,
        pil_image=img_clothes,
        image=img_openpose,
        width=512,
        height=768,
        num_samples=1,
        num_inference_steps=30,
        seed=123
    )[0]

    torch.cuda.empty_cache()
    return img_openpose_gen.convert("RGB")