Spaces:
Running
Running
File size: 3,532 Bytes
a7a3cc5 a42f482 a7a3cc5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_pose2image.py
# The original license file is LICENSE.ControlNet this repo.
import gradio as gr
def create_demo(process):
with gr.Blocks() as demo:
with gr.Row():
gr.Markdown('## Control Stable Diffusion with Human Pose')
with gr.Row():
with gr.Column():
input_image = gr.Image(source='upload', type='numpy')
prompt = gr.Textbox(label='Prompt')
run_button = gr.Button(label='Run')
with gr.Accordion('Advanced options', open=False):
num_samples = gr.Slider(label='Images',
minimum=1,
maximum=12,
value=1,
step=1)
image_resolution = gr.Slider(label='Image Resolution',
minimum=256,
maximum=768,
value=512,
step=256)
detect_resolution = gr.Slider(label='OpenPose Resolution',
minimum=128,
maximum=1024,
value=512,
step=1)
ddim_steps = gr.Slider(label='Steps',
minimum=1,
maximum=100,
value=20,
step=1)
scale = gr.Slider(label='Guidance Scale',
minimum=0.1,
maximum=30.0,
value=9.0,
step=0.1)
seed = gr.Slider(label='Seed',
minimum=-1,
maximum=2147483647,
step=1,
randomize=True)
eta = gr.Number(label='eta (DDIM)', value=0.0)
a_prompt = gr.Textbox(
label='Added Prompt',
value='best quality, extremely detailed')
n_prompt = gr.Textbox(
label='Negative Prompt',
value=
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
)
with gr.Column():
result_gallery = gr.Gallery(label='Output',
show_label=False,
elem_id='gallery').style(
grid=2, height='auto')
ips = [
input_image, prompt, a_prompt, n_prompt, num_samples,
image_resolution, detect_resolution, ddim_steps, scale, seed, eta
]
run_button.click(fn=process,
inputs=ips,
outputs=[result_gallery],
api_name='pose')
return demo
|