Spaces:
Runtime error
Runtime error
File size: 4,759 Bytes
e23754f fa0e676 3010a51 092c8f9 e23754f e3f1149 3010a51 e23754f 39e14ce e23754f 39e14ce e23754f 39e14ce e23754f b2a11c1 e23754f b2a11c1 092c8f9 e23754f 0d9d7db e23754f fa0e676 e23754f 0d9d7db 66a85d4 e23754f 66a85d4 b2a11c1 e23754f 0d9d7db e23754f 66a85d4 e23754f b2a11c1 e23754f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import torch
from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL
from PIL import Image
from ip_adapter.ip_adapter_faceid import IPAdapterFaceIDPlus
import cv2
from insightface.app import FaceAnalysis
from insightface.utils import face_align
import gradio as gr
from huggingface_hub import hf_hub_download
from datetime import datetime
def download_models():
hf_hub_download(
repo_id='h94/IP-Adapter-FaceID',
filename='ip-adapter-faceid-plus_sd15.bin',
local_dir='IP-Adapter-FaceID')
hf_hub_download(
repo_id='h94/IP-Adapter',
filename='models/image_encoder/config.json',
local_dir='IP-Adapter')
hf_hub_download(
repo_id='h94/IP-Adapter',
filename='models/image_encoder/pytorch_model.bin',
local_dir='IP-Adapter')
def get_ip_model():
download_models()
base_model_path = "SG161222/Realistic_Vision_V4.0_noVAE"
vae_model_path = "stabilityai/sd-vae-ft-mse"
image_encoder_path = "IP-Adapter/models/image_encoder"
ip_ckpt = "IP-Adapter-FaceID/ip-adapter-faceid-plus_sd15.bin"
if torch.cuda.is_available():
device = 'cuda'
torch_dtype = torch.float16
else:
device = 'cpu'
torch_dtype = torch.float32
print(f'Using device: {device}')
noise_scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1,
)
vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch_dtype)
pipe = StableDiffusionPipeline.from_pretrained(
base_model_path,
torch_dtype=torch_dtype,
scheduler=noise_scheduler,
vae=vae,
feature_extractor=None,
safety_checker=None
)
ip_model = IPAdapterFaceIDPlus(pipe, image_encoder_path, ip_ckpt, device, num_tokens=4, torch_dtype=torch_dtype)
return ip_model
ip_model = get_ip_model()
app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
app.prepare(ctx_id=0, det_size=(640, 640), det_thresh=0.2)
def generate_images(prompt, img_filepath,
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality, blurry",
img_prompt_scale=0.5,
num_inference_steps=30,
seed=None, n_images=1):
print(f'{datetime.now().strftime("%Y/%m/%d %H:%M:%S")}: {prompt}')
image = cv2.imread(img_filepath)
faces = app.get(image)
faceid_embeds = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)
face_image = face_align.norm_crop(image, landmark=faces[0].kps, image_size=224) # you can also segment the face
images = ip_model.generate(
prompt=prompt, negative_prompt=negative_prompt, face_image=face_image, faceid_embeds=faceid_embeds,
num_samples=n_images, width=512, height=512, num_inference_steps=num_inference_steps, seed=seed,
scale=img_prompt_scale, # with scale=1 I get weird images
)
return [images[0], Image.fromarray(face_image[..., [2, 1, 0]])]
with gr.Blocks() as demo:
gr.Markdown(
"""
# IP-Adapter-FaceID-plus
Generate images conditioned on a image prompt and a text prompt. Learn more here: https://huggingface.co/h94/IP-Adapter-FaceID
This demo is intended to use on GPU. It will work also on CPU but generating one image could take 900 seconds compared to a few seconds on GPU.
""")
with gr.Row():
with gr.Column():
demo_inputs = []
demo_inputs.append(gr.Textbox(label='text prompt', value='Linkedin profile picture'))
demo_inputs.append(gr.Image(type='filepath', label='image prompt'))
with gr.Accordion(label='Advanced options', open=False):
demo_inputs.append(gr.Textbox(label='negative text prompt', value="monochrome, lowres, bad anatomy, worst quality, low quality, blurry"))
demo_inputs.append(gr.Slider(maximum=1, minimum=0, value=0.5, step=0.05, label='image prompt scale'))
btn = gr.Button("Generate")
with gr.Column():
demo_outputs = []
demo_outputs.append(gr.Image(label='generated image'))
demo_outputs.append(gr.Image(label='detected face', height=224, width=224))
btn.click(generate_images, inputs=demo_inputs, outputs=demo_outputs)
sample_prompts = [
'Linkedin profile picture',
'A singer on stage',
'A politician talking to the people',
'An astronaut in space',
]
gr.Examples(sample_prompts, inputs=demo_inputs[0], label='Sample prompts')
demo.launch(share=True, debug=True) |