Baghdad99 commited on
Commit
d822bd4
1 Parent(s): ab7bc1a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -12
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import gradio as gr
2
- from transformers import pipeline
 
3
  import numpy as np
4
 
5
  # Load the pipeline for speech recognition and translation
@@ -9,7 +10,10 @@ pipe = pipeline(
9
  tokenizer="Baghdad99/saad-speech-recognition-hausa-audio-to-text"
10
  )
11
  translator = pipeline("text2text-generation", model="Baghdad99/saad-hausa-text-to-english-text")
12
- tts = pipeline("text-to-speech", model="Baghdad99/english_voice_tts")
 
 
 
13
 
14
  # Define the function to translate speech
15
  def translate_speech(audio):
@@ -39,20 +43,15 @@ def translate_speech(audio):
39
  print("The translated text does not contain 'generated_token_ids'")
40
  return
41
 
42
- # Use the text-to-speech pipeline to synthesize the translated text
43
- synthesised_speech = tts(translated_text_str)
 
 
44
  print(f"Synthesised speech: {synthesised_speech}") # Print the synthesised speech to see what it contains
45
 
46
- # Check if the synthesised speech contains 'audio'
47
- if 'audio' in synthesised_speech:
48
- synthesised_speech_data = synthesised_speech['audio']
49
- else:
50
- print("The synthesised speech does not contain 'audio'")
51
- return
52
-
53
  # Define the max_range variable
54
  max_range = 1.0 # You can adjust this value based on your requirements
55
- synthesised_speech = (synthesised_speech_data * max_range).astype(np.float32)
56
 
57
  return 16000, synthesised_speech
58
 
 
1
  import gradio as gr
2
+ from transformers import pipeline, VitsModel, AutoTokenizer
3
+ import torch
4
  import numpy as np
5
 
6
  # Load the pipeline for speech recognition and translation
 
10
  tokenizer="Baghdad99/saad-speech-recognition-hausa-audio-to-text"
11
  )
12
  translator = pipeline("text2text-generation", model="Baghdad99/saad-hausa-text-to-english-text")
13
+
14
+ # Load the VITS model for text-to-speech synthesis
15
+ tts_model = VitsModel.from_pretrained("Baghdad99/english_voice_tts")
16
+ tts_tokenizer = AutoTokenizer.from_pretrained("Baghdad99/english_voice_tts")
17
 
18
  # Define the function to translate speech
19
  def translate_speech(audio):
 
43
  print("The translated text does not contain 'generated_token_ids'")
44
  return
45
 
46
+ # Use the VITS model to synthesize the translated text
47
+ tts_inputs = tts_tokenizer(translated_text_str, return_tensors="pt")
48
+ with torch.no_grad():
49
+ synthesised_speech = tts_model(**tts_inputs).waveform
50
  print(f"Synthesised speech: {synthesised_speech}") # Print the synthesised speech to see what it contains
51
 
 
 
 
 
 
 
 
52
  # Define the max_range variable
53
  max_range = 1.0 # You can adjust this value based on your requirements
54
+ synthesised_speech = (synthesised_speech.numpy() * max_range).astype(np.float32)
55
 
56
  return 16000, synthesised_speech
57