Update app.py
Browse files
app.py
CHANGED
@@ -21,6 +21,8 @@ def query(api_url, payload=None, data=None):
|
|
21 |
|
22 |
# Define the function to translate speech
|
23 |
# Define the function to translate speech
|
|
|
|
|
24 |
def translate_speech(audio_file):
|
25 |
print(f"Type of audio: {type(audio_file)}, Value of audio: {audio_file}") # Debug line
|
26 |
|
@@ -29,6 +31,14 @@ def translate_speech(audio_file):
|
|
29 |
output = query(ASR_API_URL, data=data)
|
30 |
print(f"Output: {output}") # Debug line
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
# Check if 'text' key exists in the output
|
33 |
if 'text' in output:
|
34 |
transcription = output["text"]
|
@@ -46,6 +56,30 @@ def translate_speech(audio_file):
|
|
46 |
# Display the audio output
|
47 |
return Audio(audio_bytes)
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
# Define the Gradio interface
|
51 |
iface = gr.Interface(
|
|
|
21 |
|
22 |
# Define the function to translate speech
|
23 |
# Define the function to translate speech
|
24 |
+
def translate_speech(audio_file):
|
25 |
+
# Define the function to translate speech
|
26 |
def translate_speech(audio_file):
|
27 |
print(f"Type of audio: {type(audio_file)}, Value of audio: {audio_file}") # Debug line
|
28 |
|
|
|
31 |
output = query(ASR_API_URL, data=data)
|
32 |
print(f"Output: {output}") # Debug line
|
33 |
|
34 |
+
# Check if 'error' key exists in the output
|
35 |
+
if 'error' in output:
|
36 |
+
print(f"Error: {output['error']}")
|
37 |
+
estimated_time = output.get('estimated_time')
|
38 |
+
if estimated_time:
|
39 |
+
print(f"Estimated time for the model to load: {estimated_time} seconds")
|
40 |
+
return
|
41 |
+
|
42 |
# Check if 'text' key exists in the output
|
43 |
if 'text' in output:
|
44 |
transcription = output["text"]
|
|
|
56 |
# Display the audio output
|
57 |
return Audio(audio_bytes)
|
58 |
|
59 |
+
# print(f"Type of audio: {type(audio_file)}, Value of audio: {audio_file}") # Debug line
|
60 |
+
|
61 |
+
# # Use the ASR pipeline to transcribe the audio
|
62 |
+
# data = audio_file.read()
|
63 |
+
# output = query(ASR_API_URL, data=data)
|
64 |
+
# print(f"Output: {output}") # Debug line
|
65 |
+
|
66 |
+
# # Check if 'text' key exists in the output
|
67 |
+
# if 'text' in output:
|
68 |
+
# transcription = output["text"]
|
69 |
+
# else:
|
70 |
+
# print("Key 'text' does not exist in the output.")
|
71 |
+
# return
|
72 |
+
|
73 |
+
# # Use the translation pipeline to translate the transcription
|
74 |
+
# translated_text = query(TRANSLATION_API_URL, {"inputs": transcription})
|
75 |
+
|
76 |
+
# # Use the TTS pipeline to synthesize the translated text
|
77 |
+
# response = requests.post(TTS_API_URL, headers=headers, json={"inputs": translated_text})
|
78 |
+
# audio_bytes = response.content
|
79 |
+
|
80 |
+
# # Display the audio output
|
81 |
+
# return Audio(audio_bytes)
|
82 |
+
|
83 |
|
84 |
# Define the Gradio interface
|
85 |
iface = gr.Interface(
|