|
import gradio as gr |
|
import requests |
|
import numpy as np |
|
from pydub import AudioSegment |
|
import io |
|
|
|
|
|
TTS_API_URL = "https://api-inference.huggingface.co/models/Baghdad99/english_voice_tts" |
|
TRANSLATION_API_URL = "https://api-inference.huggingface.co/models/Baghdad99/saad-hausa-text-to-english-text" |
|
headers = {"Authorization": "Bearer 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"} |
|
|
|
|
|
asr_model = gr.load("models/Baghdad99/saad-speech-recognition-hausa-audio-to-text") |
|
|
|
|
|
def query(api_url, payload): |
|
response = requests.post(api_url, headers=headers, json=payload) |
|
return response.json() |
|
|
|
|
|
def translate_speech(audio_file): |
|
print(f"Type of audio: {type(audio_file)}, Value of audio: {audio_file}") |
|
|
|
|
|
transcription = asr_model.predict(audio_file.name) |
|
|
|
|
|
translated_text = query(TRANSLATION_API_URL, {"inputs": transcription}) |
|
|
|
|
|
response = requests.post(TTS_API_URL, headers=headers, json={"inputs": translated_text}) |
|
audio_bytes = response.content |
|
|
|
|
|
audio_segment = AudioSegment.from_mp3(io.BytesIO(audio_bytes)) |
|
|
|
|
|
audio_data = np.array(audio_segment.get_array_of_samples()) |
|
if audio_segment.channels == 2: |
|
audio_data = audio_data.reshape((-1, 2)) |
|
|
|
return audio_data |
|
|
|
|
|
iface = gr.Interface( |
|
fn=translate_speech, |
|
inputs=gr.inputs.File(type="file"), |
|
outputs=gr.outputs.Audio(type="numpy"), |
|
title="Hausa to English Translation", |
|
description="Realtime demo for Hausa to English translation using speech recognition and text-to-speech synthesis." |
|
) |
|
|
|
iface.launch() |
|
|