Spaces:
Sleeping
Sleeping
saicharan1234
commited on
Commit
•
7b988f1
1
Parent(s):
2e42d43
Update app.py
Browse files
app.py
CHANGED
@@ -9,10 +9,10 @@ import uuid
|
|
9 |
|
10 |
is_shared_ui = True if "fudan-generative-ai/hallo" in os.environ['SPACE_ID'] else False
|
11 |
|
12 |
-
if
|
13 |
hallo_dir = snapshot_download(repo_id="fudan-generative-ai/hallo", local_dir="pretrained_models")
|
14 |
|
15 |
-
def run_inference(source_image, driving_audio, progress=gr.Progress(track_tqdm=True)):
|
16 |
if is_shared_ui:
|
17 |
raise gr.Error("This Space only works in duplicated instances")
|
18 |
|
@@ -23,30 +23,44 @@ def run_inference(source_image, driving_audio, progress=gr.Progress(track_tqdm=T
|
|
23 |
source_image=source_image,
|
24 |
driving_audio=driving_audio,
|
25 |
output=f'output-{unique_id}.mp4',
|
26 |
-
pose_weight=
|
27 |
-
face_weight=
|
28 |
-
lip_weight=
|
29 |
-
face_expand_ratio=
|
30 |
checkpoint=None
|
31 |
)
|
32 |
|
33 |
inference_process(args)
|
34 |
return f'output-{unique_id}.mp4'
|
35 |
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
with gr.Row():
|
39 |
with gr.Column():
|
40 |
-
avatar_face = gr.Image(type="filepath", label="Face")
|
41 |
-
driving_audio = gr.Audio(type="filepath", label="Driving audio")
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
with gr.Column():
|
44 |
-
output_video = gr.Video(label="Your
|
45 |
|
46 |
generate.click(
|
47 |
fn=run_inference,
|
48 |
-
inputs=[avatar_face, driving_audio],
|
49 |
outputs=output_video
|
50 |
)
|
51 |
|
52 |
-
demo.launch()
|
|
|
9 |
|
10 |
is_shared_ui = True if "fudan-generative-ai/hallo" in os.environ['SPACE_ID'] else False
|
11 |
|
12 |
+
if not is_shared_ui:
|
13 |
hallo_dir = snapshot_download(repo_id="fudan-generative-ai/hallo", local_dir="pretrained_models")
|
14 |
|
15 |
+
def run_inference(source_image, driving_audio, pose_weight, face_weight, lip_weight, face_expand_ratio, progress=gr.Progress(track_tqdm=True)):
|
16 |
if is_shared_ui:
|
17 |
raise gr.Error("This Space only works in duplicated instances")
|
18 |
|
|
|
23 |
source_image=source_image,
|
24 |
driving_audio=driving_audio,
|
25 |
output=f'output-{unique_id}.mp4',
|
26 |
+
pose_weight=pose_weight,
|
27 |
+
face_weight=face_weight,
|
28 |
+
lip_weight=lip_weight,
|
29 |
+
face_expand_ratio=face_expand_ratio,
|
30 |
checkpoint=None
|
31 |
)
|
32 |
|
33 |
inference_process(args)
|
34 |
return f'output-{unique_id}.mp4'
|
35 |
|
36 |
+
with gr.Blocks() as demo:
|
37 |
+
gr.Markdown(
|
38 |
+
"""
|
39 |
+
# Talking Head Generation
|
40 |
+
Upload a face image and driving audio, and adjust the weights to generate a talking head video.
|
41 |
+
"""
|
42 |
+
)
|
43 |
+
|
44 |
with gr.Row():
|
45 |
with gr.Column():
|
46 |
+
avatar_face = gr.Image(type="filepath", label="Face", elem_id="face-input")
|
47 |
+
driving_audio = gr.Audio(type="filepath", label="Driving Audio", elem_id="audio-input")
|
48 |
+
|
49 |
+
with gr.Accordion("Advanced Settings", open=False):
|
50 |
+
pose_weight = gr.Slider(minimum=0.0, value=1.5, label="Pose Weight")
|
51 |
+
face_weight = gr.Slider(minimum=0.0, value=1.0, label="Face Weight")
|
52 |
+
lip_weight = gr.Slider(minimum=0.0, value=1.1, label="Lip Weight")
|
53 |
+
face_expand_ratio = gr.Slider(minimum=0.0, value=1.2, label="Face Expand Ratio")
|
54 |
+
|
55 |
+
generate = gr.Button("Generate", elem_id="generate-button")
|
56 |
+
|
57 |
with gr.Column():
|
58 |
+
output_video = gr.Video(label="Your Talking Head", elem_id="output-video")
|
59 |
|
60 |
generate.click(
|
61 |
fn=run_inference,
|
62 |
+
inputs=[avatar_face, driving_audio, pose_weight, face_weight, lip_weight, face_expand_ratio],
|
63 |
outputs=output_video
|
64 |
)
|
65 |
|
66 |
+
demo.launch()
|