Spaces:
Runtime error
Runtime error
File size: 3,854 Bytes
473b850 b44d4b1 473b850 b44d4b1 473b850 e8fee3b 473b850 e8fee3b 473b850 b44d4b1 473b850 b44d4b1 473b850 b44d4b1 473b850 b44d4b1 473b850 b44d4b1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_hed2image.py
# The original license file is LICENSE.ControlNet in this repo.
import gradio as gr
def create_demo(process, max_images=12, default_num_images=3):
with gr.Blocks() as demo:
with gr.Row():
gr.Markdown('## Control Stable Diffusion with HED Maps')
with gr.Row():
with gr.Column():
input_image = gr.Image(source='upload', type='numpy')
prompt = gr.Textbox(label='Prompt')
run_button = gr.Button(label='Run')
with gr.Accordion('Advanced options', open=False):
num_samples = gr.Slider(label='Images',
minimum=1,
maximum=max_images,
value=default_num_images,
step=1)
image_resolution = gr.Slider(label='Image Resolution',
minimum=256,
maximum=512,
value=512,
step=256)
detect_resolution = gr.Slider(label='HED Resolution',
minimum=128,
maximum=512,
value=512,
step=1)
num_steps = gr.Slider(label='Steps',
minimum=1,
maximum=100,
value=20,
step=1)
guidance_scale = gr.Slider(label='Guidance Scale',
minimum=0.1,
maximum=30.0,
value=9.0,
step=0.1)
seed = gr.Slider(label='Seed',
minimum=-1,
maximum=2147483647,
step=1,
randomize=True)
a_prompt = gr.Textbox(
label='Added Prompt',
value='best quality, extremely detailed')
n_prompt = gr.Textbox(
label='Negative Prompt',
value=
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
)
with gr.Column():
result = gr.Gallery(label='Output',
show_label=False,
elem_id='gallery').style(grid=2,
height='auto')
inputs = [
input_image,
prompt,
a_prompt,
n_prompt,
num_samples,
image_resolution,
detect_resolution,
num_steps,
guidance_scale,
seed,
]
prompt.submit(fn=process, inputs=inputs, outputs=result)
run_button.click(fn=process,
inputs=inputs,
outputs=result,
api_name='hed')
return demo
if __name__ == '__main__':
from model import Model
model = Model()
demo = create_demo(model.process_hed)
demo.queue().launch()
|