Spaces:
Runtime error
Runtime error
import gradio as gr | |
import numpy as np | |
import scipy.io.wavfile | |
import torch | |
import torch.nn.functional as F | |
from whisperspeech.pipeline import Pipeline | |
def process_audio(audio_elem): | |
scipy.io.wavfile.write('test.mp3', 48000, audio_elem[1]) | |
# print out details about ut | |
pipe = Pipeline(s2a_ref='collabora/whisperspeech:s2a-q4-base-en+pl.model') | |
# save audio_elem as a file | |
speaker = pipe.extract_spk_emb("test.mp3") | |
speaker = speaker.cpu().numpy() # Move tensor from GPU to CPU and convert to numpy array | |
print(speaker) | |
#save it locally | |
np.savez_compressed("speaker", features=speaker) | |
return "speaker.npz" | |
# Define Gradio interface | |
with gr.Interface(fn=process_audio, inputs="audio", outputs="file") as iface: | |
iface.launch() | |