|
import gradio as gr |
|
import jax |
|
import jax.numpy as jnp |
|
import numpy as np |
|
from flax.jax_utils import replicate |
|
from flax.training.common_utils import shard |
|
from PIL import Image |
|
from diffusers import FlaxStableDiffusionControlNetPipeline, FlaxControlNetModel |
|
import gc |
|
|
|
report_url = 'https://wandb.ai/john-fozard/dog-cat-pose/runs/kmwcvae5' |
|
sketch_url = 'https://editor.p5js.org/kfahn/full/OshQky7RS' |
|
|
|
def create_key(seed=0): |
|
return jax.random.PRNGKey(seed) |
|
|
|
def addp5sketch(url): |
|
iframe = f'<iframe src ={url} style="border:none;height:495px;width:100%"/frame>' |
|
return gr.HTML(iframe) |
|
|
|
def wandb_report(url): |
|
iframe = f'<iframe src ={url} style="border:none;height:1024px;width:100%"/frame>' |
|
return gr.HTML(iframe) |
|
|
|
control_img = 'myimage.jpg' |
|
|
|
examples = [["a yellow dog in grass", "lowres, two heads, bad muzzle, bad anatomy, missing ears, missing paws", "example1.jpg"]] |
|
|
|
|
|
|
|
controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( |
|
"JFoz/dog-cat-pose", dtype=jnp.bfloat16 |
|
) |
|
pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( |
|
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, revision="flax", dtype=jnp.bfloat16, safety_checker=None, |
|
) |
|
|
|
def infer(prompts, negative_prompts, image): |
|
|
|
params["controlnet"] = controlnet_params |
|
|
|
num_samples = 1 |
|
rng = create_key(0) |
|
rng = jax.random.split(rng, jax.device_count()) |
|
image = Image.fromarray(image) |
|
|
|
prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) |
|
negative_prompt_ids = pipe.prepare_text_inputs([negative_prompts] * num_samples) |
|
processed_image = pipe.prepare_image_inputs([image] * num_samples) |
|
|
|
p_params = replicate(params) |
|
prompt_ids = shard(prompt_ids) |
|
negative_prompt_ids = shard(negative_prompt_ids) |
|
processed_image = shard(processed_image) |
|
|
|
output = pipe( |
|
prompt_ids=prompt_ids, |
|
image=processed_image, |
|
params=p_params, |
|
prng_seed=rng, |
|
num_inference_steps=50, |
|
neg_prompt_ids=negative_prompt_ids, |
|
jit=True, |
|
).images[0,0] |
|
|
|
|
|
del image |
|
del prompt_ids |
|
del negative_prompt_ids |
|
gc.collect() |
|
|
|
output=np.array(output, dtype=np.float32) |
|
return output |
|
|
|
with gr.Blocks(css=".gradio-container {background-image: linear-gradient(to bottom, #206dff 10%, #f8d0ab 90%)};") as demo: |
|
gr.Markdown( |
|
""" |
|
<h1 style="text-align: center; font-size: 32px; color: white;"> |
|
π Animal Pose Control Net π |
|
</h1> |
|
<h3 style="text-align: left; font-size: 20px; color: white;"> This is a demo of Animal Pose ControlNet, which is a model trained on runwayml/stable-diffusion-v1-5 with a new type of conditioning.</h3> |
|
<h3 style="text-align: left; font-size: 20px; color: white;"> While this is definitely a work in progress, you can still try it out by using the p5 sketch to create a keypoint image and using it as the conditioning image.</h3> |
|
<h3 style="text-align: left; font-size: 20px; color: white;"> The model was generated as part of the Hugging Face Jax Diffusers sprint. Thank you to both Hugging Face and Google Cloud who provided the TPUs for training! |
|
<h3 style="text-align: left; font-size: 20px; color: white;"> The dataset was built using the OpenPifPaf Animalpose plugin.</h3> |
|
""") |
|
with gr.Row(): |
|
with gr.Column(): |
|
prompts = gr.Textbox(label="Prompt", placeholder="animal standing, best quality, highres") |
|
negative_prompts = gr.Textbox(label="Negative Prompt", value="lowres, two heads, bad muzzle, bad anatomy, missing ears, missing paws") |
|
conditioning_image = gr.Image(label="Conditioning Image") |
|
|
|
run_btn = gr.Button("Run") |
|
output = gr.Image( |
|
label="Result", |
|
) |
|
|
|
with gr.Column(): |
|
keypoint_tool = addp5sketch(sketch_url) |
|
gr.Markdown( |
|
""" |
|
<h3 style="text-align: left; font-size: 24px;">Additional Information</h3> |
|
<a style = "color: black; font-size: 20px" href="https://openpifpaf.github.io/plugins_animalpose.html">OpenPifPaf Animalpose</a></br> |
|
<a style = "color: black; font-size: 20px" href="https://huggingface.co/datasets/JFoz/dog-cat-pose">Dataset</a></br> |
|
<a style = "color: black; font-size: 20px" href="https://huggingface.co/JFoz/dog-cat-pose">Diffusers model</a></br> |
|
<a style = "color: black; font-size: 20px" href="https://wandb.ai/john-fozard/dog-cat-pose/runs/kmwcvae5"> WANDB Training Report</a></br> |
|
<a style = "color: black; font-size: 20px" href="https://github.com/fi4cr/animalpose/tree/main/scripts">Training Scripts</a></br> |
|
<a style = "color: black; font-size: 20px" href="https://p5js.org">p5.js</a> |
|
""") |
|
|
|
run_btn.click(fn=infer, inputs = [prompts, negative_prompts, conditioning_image], outputs = output) |
|
|
|
|
|
|
|
|
|
|
|
demo.launch(debug=True) |