Spaces:
Runtime error
Runtime error
import os, sys | |
import uuid | |
import tempfile | |
import pyttsx3 | |
import gradio as gr | |
from src.gradio_demo import SadTalker | |
# from src.utils.text2speech import TTSTalker | |
from huggingface_hub import snapshot_download | |
def get_source_image(image): | |
return image | |
try: | |
import webui # in webui | |
in_webui = True | |
except: | |
in_webui = False | |
def toggle_audio_file(choice): | |
if choice == False: | |
return gr.update(visible=True), gr.update(visible=False) | |
else: | |
return gr.update(visible=False), gr.update(visible=True) | |
def ref_video_fn(path_of_ref_video): | |
if path_of_ref_video is not None: | |
return gr.update(value=True) | |
else: | |
return gr.update(value=False) | |
def download_model(): | |
REPO_ID = 'vinthony/SadTalker-V002rc' | |
snapshot_download(REPO_ID) | |
# language : en_US, de_DE, ... | |
# gender : VoiceGenderFemale, VoiceGenderMale | |
def change_voice(engine, language='ru_ru', gender='male'): | |
selected_voices = [] | |
language = language.lower() if language else '' | |
gender = gender.lower() if gender else '' | |
for voice in engine.getProperty('voices'): | |
voice_appended = False | |
for lang in voice.languages: | |
lang_str = str(lang, 'utf-8') | |
print("lang", lang_str) | |
if lang_str and language in lang_str.lower(): | |
selected_voices.append(voice) | |
print("voice appended by lang", voice, lang_str) | |
voice_appended = True | |
break | |
if voice_appended: | |
continue | |
if voice.id and language in voice.id.lower(): | |
selected_voices.append(voice) | |
print("voice appended by id", voice.id) | |
continue | |
if voice.name and language in voice.name.lower(): | |
selected_voices.append(voice) | |
print("voice appended by name", voice.name) | |
continue | |
for voice in selected_voices: | |
if voice.gender and gender in voice.gender.lower(): | |
engine.setProperty('voice', voice.id) | |
print("voice selected by gender", voice.gender) | |
return True | |
if voice.id and gender in voice.id.lower(): | |
engine.setProperty('voice', voice.id) | |
print("voice selected by id", voice.id) | |
return True | |
if voice.name and gender in voice.name.lower(): | |
engine.setProperty('voice', voice.id) | |
print("voice selected by name", voice.name) | |
return True | |
if len(selected_voices) > 0: | |
engine.setProperty('voice', selected_voices[0].id) | |
print("voice selected by default", selected_voices[0].id) | |
return True | |
return False | |
def play_text_to_speech(text_input, voice_option): | |
engine = pyttsx3.init() | |
change_voice(engine, 'ru', voice_option) | |
print("text_input", text_input) | |
print("voice_option", voice_option) | |
time_tag = str(uuid.uuid4()) | |
save_dir = './results/voice_input' | |
os.makedirs(save_dir, exist_ok=True) | |
file_name = os.path.join(save_dir, os.path.basename(time_tag + '.wav')) | |
open(file_name, "wb").close() | |
engine.say(text_input) | |
engine.save_to_file(text_input, file_name) | |
engine.runAndWait() | |
print("file saved to", file_name) | |
return file_name | |
def sadtalker_demo(): | |
download_model() | |
sad_talker = SadTalker(lazy_load=True) | |
# tts_talker = TTSTalker() | |
with gr.Blocks(analytics_enabled=False) as sadtalker_interface: | |
with gr.Row(): | |
with gr.Column(variant='panel'): | |
with gr.Tabs(elem_id="sadtalker_source_image"): | |
with gr.TabItem('Source image'): | |
with gr.Row(): | |
source_image = gr.Image(label="Source image", source="upload", type="filepath", elem_id="img2img_image") | |
with gr.Tabs(elem_id="sadtalker_driven_audio"): | |
with gr.TabItem('Driving Methods'): | |
with gr.Row(): | |
driven_audio = gr.Audio(label="Input audio", source="upload", type="filepath") | |
driven_audio_no = gr.Audio(label="Use IDLE mode, no audio is required", source="upload", type="filepath", visible=False) | |
with gr.Column(visible=False): | |
use_idle_mode = gr.Checkbox(label="Use Idle Animation", visible=False) | |
length_of_audio = gr.Number(value=5, label="The length(seconds) of the generated video.") | |
use_idle_mode.change(toggle_audio_file, inputs=use_idle_mode, outputs=[driven_audio, driven_audio_no]) # todo | |
with gr.Row(): | |
text_input = gr.Textbox(label="Enter text", multiline=True) | |
voice_option = gr.Radio(['Male', 'Female'], label='Voice Option', value='Female') | |
with gr.Row(): | |
play_button = gr.Button('Text To Speech', variant='primary') | |
play_button.click( | |
fn=play_text_to_speech, | |
inputs=[text_input, voice_option], | |
outputs=[driven_audio] | |
) | |
with gr.Row(): | |
ref_video = gr.Video(label="Reference Video", source="upload", type="filepath", elem_id="vidref") | |
with gr.Column(): | |
use_ref_video = gr.Checkbox(label="Use Reference Video") | |
ref_info = gr.Radio(['pose', 'blink','pose+blink', 'all'], value='pose', label='Reference Video',info="How to borrow from reference Video?((fully transfer, aka, video driving mode))") | |
ref_video.change(ref_video_fn, inputs=ref_video, outputs=[use_ref_video]) # todo | |
with gr.Column(variant='panel'): | |
with gr.Tabs(elem_id="sadtalker_checkbox"): | |
with gr.TabItem('Settings'): | |
with gr.Column(variant='panel'): | |
# width = gr.Slider(minimum=64, elem_id="img2img_width", maximum=2048, step=8, label="Manually Crop Width", value=512) # img2img_width | |
# height = gr.Slider(minimum=64, elem_id="img2img_height", maximum=2048, step=8, label="Manually Crop Height", value=512) # img2img_width | |
with gr.Row(): | |
pose_style = gr.Slider(minimum=0, maximum=45, step=1, label="Pose style", value=0) # | |
exp_weight = gr.Slider(minimum=0, maximum=3, step=0.1, label="expression scale", value=1) # | |
blink_every = gr.Checkbox(label="use eye blink", value=True) | |
with gr.Row(): | |
size_of_image = gr.Radio([256, 512], value=256, label='face model resolution', info="use 256/512 model?") # | |
preprocess_type = gr.Radio(['crop', 'resize','full', 'extcrop', 'extfull'], value='crop', label='preprocess', info="How to handle input image?") | |
with gr.Row(): | |
is_still_mode = gr.Checkbox(label="Still Mode (fewer head motion, works with preprocess `full`)") | |
facerender = gr.Radio(['facevid2vid','pirender'], value='facevid2vid', label='facerender', info="which face render?") | |
with gr.Row(): | |
batch_size = gr.Slider(label="batch size in generation", step=1, maximum=10, value=1) | |
enhancer = gr.Checkbox(label="GFPGAN as Face enhancer") | |
submit = gr.Button('Generate', elem_id="sadtalker_generate", variant='primary') | |
with gr.Tabs(elem_id="sadtalker_genearted"): | |
gen_video = gr.Video(label="Generated video", format="mp4") | |
submit.click( | |
fn=sad_talker.test, | |
inputs=[source_image, | |
driven_audio, | |
preprocess_type, | |
is_still_mode, | |
enhancer, | |
batch_size, | |
size_of_image, | |
pose_style, | |
facerender, | |
exp_weight, | |
use_ref_video, | |
ref_video, | |
ref_info, | |
use_idle_mode, | |
length_of_audio, | |
blink_every | |
], | |
outputs=[gen_video] | |
) | |
return sadtalker_interface | |
if __name__ == "__main__": | |
demo = sadtalker_demo() | |
demo.queue(max_size=10) | |
demo.launch(debug=True) | |