Spaces:
Sleeping
Sleeping
HikariDawn777
commited on
Commit
•
10b6682
1
Parent(s):
99e8912
update
Browse files
app.py
CHANGED
@@ -77,10 +77,11 @@ MARKDOWN = \
|
|
77 |
Only GestureNet is provided in this Gradio Demo, you can check the full test code for all pretrained weight available.
|
78 |
|
79 |
### Note: The index we put the gesture point by default here is [4, 10] for two gesture points or [4] for one gesture point.
|
80 |
-
### Note: The result now only support is 256x384
|
81 |
### Note: Click "Clear All" to restart everything; Click "Undo Point" to cancel the point you put
|
|
|
82 |
|
83 |
-
If This&That is helpful, please help star the [GitHub Repo](https://github.com/Kiteretsu77/This_and_That_VDM). Thanks!
|
84 |
"""
|
85 |
|
86 |
|
@@ -383,7 +384,7 @@ if __name__ == '__main__':
|
|
383 |
original_image = gr.State(value=None) # store original input image
|
384 |
with gr.Row():
|
385 |
with gr.Column():
|
386 |
-
gr.Markdown("""<p style="text-align: center; font-size:
|
387 |
input_image = gr.Image(label="Input Image", height=HEIGHT, width=WIDTH, interactive=False, elem_id="input_img")
|
388 |
# gr.Image(type="numpy", label="Click Points", height=HEIGHT, width=WIDTH, interactive=False) # for points clicking
|
389 |
undo_button = gr.Button("Undo point")
|
@@ -393,7 +394,7 @@ if __name__ == '__main__':
|
|
393 |
prompt = gr.Textbox(label="Text Prompt")
|
394 |
|
395 |
with gr.Column():
|
396 |
-
gr.Markdown("""<p style="text-align: center; font-size:
|
397 |
frames = gr.Video(value=None, label="Generate Video", show_label=True, height=HEIGHT, width=WIDTH)
|
398 |
with gr.Row():
|
399 |
run_button = gr.Button("Run")
|
|
|
77 |
Only GestureNet is provided in this Gradio Demo, you can check the full test code for all pretrained weight available.
|
78 |
|
79 |
### Note: The index we put the gesture point by default here is [4, 10] for two gesture points or [4] for one gesture point.
|
80 |
+
### Note: The result now only support is 256x384.
|
81 |
### Note: Click "Clear All" to restart everything; Click "Undo Point" to cancel the point you put
|
82 |
+
### Note: The first run may be long. Click "Clear All" for each run is the safest choice.
|
83 |
|
84 |
+
If **This&That** is helpful, please help star the [GitHub Repo](https://github.com/Kiteretsu77/This_and_That_VDM). Thanks!
|
85 |
"""
|
86 |
|
87 |
|
|
|
384 |
original_image = gr.State(value=None) # store original input image
|
385 |
with gr.Row():
|
386 |
with gr.Column():
|
387 |
+
gr.Markdown("""<p style="text-align: center; font-size: 30px">Click two Points</p>""")
|
388 |
input_image = gr.Image(label="Input Image", height=HEIGHT, width=WIDTH, interactive=False, elem_id="input_img")
|
389 |
# gr.Image(type="numpy", label="Click Points", height=HEIGHT, width=WIDTH, interactive=False) # for points clicking
|
390 |
undo_button = gr.Button("Undo point")
|
|
|
394 |
prompt = gr.Textbox(label="Text Prompt")
|
395 |
|
396 |
with gr.Column():
|
397 |
+
gr.Markdown("""<p style="text-align: center; font-size: 30px">Results</p>""")
|
398 |
frames = gr.Video(value=None, label="Generate Video", show_label=True, height=HEIGHT, width=WIDTH)
|
399 |
with gr.Row():
|
400 |
run_button = gr.Button("Run")
|
svd/pipeline_stable_video_diffusion_controlnet.py
CHANGED
@@ -622,7 +622,7 @@ class StableVideoDiffusionControlNetPipeline(DiffusionPipeline):
|
|
622 |
self._num_timesteps = len(timesteps)
|
623 |
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
624 |
for i, t in enumerate(timesteps):
|
625 |
-
print("This is timestep ", t)
|
626 |
|
627 |
# expand the latents if we are doing classifier free guidance
|
628 |
if use_instructpix2pix:
|
|
|
622 |
self._num_timesteps = len(timesteps)
|
623 |
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
624 |
for i, t in enumerate(timesteps):
|
625 |
+
# print("This is timestep ", t)
|
626 |
|
627 |
# expand the latents if we are doing classifier free guidance
|
628 |
if use_instructpix2pix:
|