Spaces:
Runtime error
Runtime error
File size: 1,313 Bytes
7f940ea c3f8843 7f940ea b5c66de 9465c88 7f940ea 9465c88 7f940ea b8a12ed 18d2e66 241a580 cdc5171 241a580 a34f94d 724f77f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import os
import gradio as gr
import numpy as np
io1 = gr.Interface.load("huggingface/facebook/xm_transformer_s2ut_en-hk")
io2 = gr.Interface.load("huggingface/facebook/xm_transformer_s2ut_hk-en")
io3 = gr.Interface.load("huggingface/facebook/xm_transformer_unity_en-hk")
io4 = gr.Interface.load("huggingface/facebook/xm_transformer_unity_hk-en")
def inference(audio, model):
print(audio)
out_audio = None
if model == "xm_transformer_s2ut_en-hk":
out_audio = io1(audio)
elif model == "xm_transformer_s2ut_hk-en":
out_audio = io2(audio)
elif model == "xm_transformer_unity_en-hk":
out_audio = io3(audio)
elif model == "xm_transformer_unity_hk-en":
out_audio = io4(audio)
return out_audio
model = gr.Dropdown(choices=["xm_transformer_unity_en-hk", "xm_transformer_unity_hk-en", "xm_transformer_s2ut_en-hk", "xm_transformer_s2ut_hk-en"])
audio = gr.Audio(source="microphone", type="filepath", label="Input")
demo = gr.Interface(fn=inference, inputs=[audio, model], outputs=["audio"], examples=[
['audio1.wav', 'xm_transformer_unity_hk-en'],
['audio2.wav', 'xm_transformer_unity_hk-en'],
['audio3.wav', 'xm_transformer_unity_hk-en'],
['en_audio1.wav', 'xm_transformer_unity_en-hk'],
['en_audio2.wav', 'xm_transformer_unity_en-hk']
])
demo.launch() |