File size: 2,963 Bytes
5dbc4ea
5b74a4b
5dbc4ea
83e3ccb
ea3653e
5b74a4b
5dbc4ea
 
 
25fb027
 
a927d1d
ea3653e
c5fae6e
25ac55d
5add931
3077d90
de1b6c1
3077d90
5dbc4ea
 
3077d90
 
 
 
 
5dbc4ea
 
5add931
72632b9
25fb027
2de6d50
25fb027
 
 
 
 
d159f8d
 
 
 
1d76ff7
25fb027
 
 
 
 
 
 
 
 
 
 
 
 
c58bd88
8c23bfa
 
 
25fb027
 
17cfe18
25fb027
a5ec736
b2c7d3a
5b74a4b
 
ef4cfee
8fe6fd5
5b74a4b
 
 
 
b2c7d3a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import torch  # Add this line
import gradio as gr
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor, pipeline, AutoTokenizer
import numpy as np
import librosa

# Load the models and processors
asr_model = Wav2Vec2ForCTC.from_pretrained("Akashpb13/Hausa_xlsr")
asr_processor = Wav2Vec2Processor.from_pretrained("Akashpb13/Hausa_xlsr")
translator = pipeline("text2text-generation", model="Baghdad99/saad-hausa-text-to-english-text")
tts = pipeline("text-to-speech", model="Baghdad99/english_voice_tts")

def translate_speech(audio_input):
    # Load the audio file as a floating point time series
    audio_data, sample_rate = librosa.load(audio_input, sr=16000)

    # Prepare the input dictionary
    input_dict = asr_processor(audio_data, sampling_rate=sample_rate, return_tensors="pt", padding=True)

    # Use the ASR model to get the logits
    logits = asr_model(input_dict.input_values.to("cpu")).logits

    # Get the predicted IDs
    pred_ids = torch.argmax(logits, dim=-1)[0]

    # Decode the predicted IDs to get the transcription
    transcription = asr_processor.decode(pred_ids)
    print(f"Transcription: {transcription}")  # Print the transcription

    # Use the translation pipeline to translate the transcription
    translated_text = translator(transcription, return_tensors="pt")
    print(f"Translated text: {translated_text}")  # Print the translated text

    # Check if the translated text contains 'generated_token_ids'
    if 'generated_token_ids' in translated_text[0]:
        # Decode the tokens into text
        translated_text_str = translator.tokenizer.decode(translated_text[0]['generated_token_ids'])

        # Remove padding tokens
        translated_text_str = translated_text_str.replace("<pad>", "").strip()

        print(f"Translated text string: {translated_text_str}")  # Print the translated text string
    else:
        print("The translated text does not contain 'generated_token_ids'")
        return

    # Use the text-to-speech pipeline to synthesize the translated text
    synthesised_speech = tts(translated_text_str)

    # Check if the synthesised speech contains 'audio'
    if 'audio' in synthesised_speech:
        synthesised_speech_data = synthesised_speech['audio']
    else:
        print("The synthesised speech does not contain 'audio'")
        return

    # Flatten the audio data
    synthesised_speech_data = synthesised_speech_data.flatten()

    # Scale the audio data to the range of int16 format
    synthesised_speech = (synthesised_speech_data * 32767).astype(np.int16)

    return 16000, synthesised_speech

# Define the Gradio interface
iface = gr.Interface(
    fn=translate_speech, 
    inputs=gr.inputs.Audio(type="filepath"),  # Change this line
    outputs=gr.outputs.Audio(type="numpy"),
    title="Hausa to English Translation",
    description="Realtime demo for Hausa to English translation using speech recognition and text-to-speech synthesis."
)

iface.launch()