Spaces:
Running
Running
File size: 4,969 Bytes
5ce9bd4 63074f5 5ce9bd4 b1373ae 5ce9bd4 63074f5 b1373ae dbdb313 b1373ae 75453c0 b1373ae 75453c0 63074f5 bd2c038 687b293 b1373ae 63074f5 b1373ae 63074f5 b1373ae 63074f5 b1373ae 63074f5 b1373ae 75453c0 687b293 b1373ae 63074f5 b1373ae 5ce9bd4 b1373ae 63074f5 b1373ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import gradio as gr
from model import Model
import gradio_utils
import os
on_huggingspace = os.environ.get("SPACE_AUTHOR_NAME") == "PAIR"
examples = [
['Anime DB', "woman1", "Portrait of detailed 1girl, feminine, soldier cinematic shot on canon 5d ultra realistic skin intricate clothes accurate hands Rory Lewis Artgerm WLOP Jeremy Lipking Jane Ansell studio lighting"],
['Arcane DB', "woman1", "Oil painting of a beautiful girl arcane style, masterpiece, a high-quality, detailed, and professional photo"],
['GTA-5 DB', "man1", "gtav style"],
['GTA-5 DB', "woman3", "gtav style"],
['Avatar DB', "woman2", "oil painting of a beautiful girl avatar style"],
]
def load_db_model(evt: gr.SelectData):
db_name = gradio_utils.get_db_name_from_id(evt.index)
return db_name
def canny_select(evt: gr.SelectData):
canny_name = gradio_utils.get_canny_name_from_id(evt.index)
return canny_name
def create_demo(model: Model):
with gr.Blocks() as demo:
with gr.Row():
gr.Markdown(
'## Text, Canny-Edge and DreamBooth Conditional Video Generation')
with gr.Row():
gr.HTML(
"""
<div style="text-align: left; auto;">
<h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
Description: Our current release supports only four predefined DreamBooth models and four "motion edges". So you must choose one DreamBooth model and one "motion edges" shown below, or use the examples. The keywords <b>1girl</b>, <b>arcane style</b>, <b>gtav</b>, and <b>avatar style</b> correspond to the models from left to right.
</h3>
</div>
""")
with gr.Row():
with gr.Column():
# input_video_path = gr.Video(source='upload', format="mp4", visible=False)
gr.Markdown("## Selection")
db_text_field = gr.Markdown('DB Model: **Anime DB** ')
canny_text_field = gr.Markdown('Motion: **woman1**')
prompt = gr.Textbox(label='Prompt')
run_button = gr.Button(label='Run')
with gr.Accordion('Advanced options', open=False):
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
"None"], label="Watermark", value='Picsart AI Research')
chunk_size = gr.Slider(
label="Chunk size", minimum=2, maximum=16, value=2, step=1, visible=not on_huggingspace,
info="Number of frames processed at once. Reduce for lower memory usage.")
merging_ratio = gr.Slider(
label="Merging ratio", minimum=0.0, maximum=0.9, step=0.1, value=0.0, visible=not on_huggingspace,
info="Ratio of how many tokens are merged. The higher the more compression (less memory and faster inference).")
with gr.Column():
result = gr.Image(label="Generated Video").style(height=400)
with gr.Row():
gallery_db = gr.Gallery(label="Db models", value=[('__assets__/db_files/anime.jpg', "anime"), ('__assets__/db_files/arcane.jpg', "Arcane"), (
'__assets__/db_files/gta.jpg', "GTA-5 (Man)"), ('__assets__/db_files/avatar.jpg', "Avatar DB")]).style(grid=[4], height=50)
with gr.Row():
gallery_canny = gr.Gallery(label="Motions", value=[('__assets__/db_files/woman1.gif', "woman1"), ('__assets__/db_files/woman2.gif', "woman2"), (
'__assets__/db_files/man1.gif', "man1"), ('__assets__/db_files/woman3.gif', "woman3")]).style(grid=[4], height=50)
db_selection = gr.Textbox(label="DB Model", visible=False)
canny_selection = gr.Textbox(
label="One of the above defined motions", visible=False)
gallery_db.select(load_db_model, None, db_selection)
gallery_canny.select(canny_select, None, canny_selection)
db_selection.change(on_db_selection_update, None, db_text_field)
canny_selection.change(on_canny_selection_update,
None, canny_text_field)
inputs = [
db_selection,
canny_selection,
prompt,
chunk_size,
watermark,
merging_ratio,
]
gr.Examples(examples=examples,
inputs=inputs,
outputs=result,
fn=model.process_controlnet_canny_db,
cache_examples=on_huggingspace,
)
run_button.click(fn=model.process_controlnet_canny_db,
inputs=inputs,
outputs=result,)
return demo
def on_db_selection_update(evt: gr.EventData):
return f"DB model: **{evt._data}**"
def on_canny_selection_update(evt: gr.EventData):
return f"Motion: **{evt._data}**"
|