Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import subprocess
|
3 |
+
import openai
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
|
7 |
+
import os
|
8 |
+
|
9 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
10 |
+
|
11 |
+
|
12 |
+
def transcribe(audio):
|
13 |
+
with open(audio, "rb") as audio_file:
|
14 |
+
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
15 |
+
return transcript["text"]
|
16 |
+
|
17 |
+
|
18 |
+
def generate_response(transcribed_text):
|
19 |
+
response = openai.ChatCompletion.create(
|
20 |
+
model="gpt-3.5-turbo",
|
21 |
+
messages=[
|
22 |
+
{"role": "system", "content": " All your answers should be in swahili only, users undertands swahili only so here we start... Wewe ni mtaalamu wa viazi lishe na utajibu maswali yote kwa kiswahili tu!"},
|
23 |
+
{"role": "user", "content": "Mambo vipi?"},
|
24 |
+
{"role": "assistant", "content": """Salama je una swali lolote kuhusu viazi lishe?"""},
|
25 |
+
{"role": "user", "content": "nini maana ya Viazi lishe?"},
|
26 |
+
{"role": "assistant", "content": """ viazi lishe ni Viazi vitamu vyenye rangi ya karoti kwa ndani ambavyo vina vitamin A kwa wingi"""},
|
27 |
+
{"role": "user", "content": "nini matumizi ya viazi lishe?"},
|
28 |
+
{"role": "assistant", "content": """ viazi lishe vinaweza kutengenezea chakula kama Keki,
|
29 |
+
Maandazi, Kalimati na tambi: Ukisaga unga wa viazi lishe,
|
30 |
+
unaweza kutumika kupika vyakula ambavyo huwa watu
|
31 |
+
hutumia unga wa ngano kupika, unga wa viazi lishe una
|
32 |
+
virutubisho vingi zaidi kuliko unga wa ngano na
|
33 |
+
ukitumika kupikia vyakula tajwa hapo juu watumiaji
|
34 |
+
watakuwa wanakula vyakula vyenye virutubisho Zaidi."""},
|
35 |
+
{"role": "user", "content": transcribed_text},
|
36 |
+
]
|
37 |
+
)
|
38 |
+
return response['choices'][0]['message']['content']
|
39 |
+
|
40 |
+
|
41 |
+
def inference(text):
|
42 |
+
output_file = "tts_output.wav"
|
43 |
+
cmd = ['tts', '--text', text, '--out_path', output_file]
|
44 |
+
subprocess.run(cmd, check=True)
|
45 |
+
return output_file
|
46 |
+
|
47 |
+
|
48 |
+
def process_audio_and_respond(audio):
|
49 |
+
text = transcribe(audio)
|
50 |
+
response_text = generate_response(text)
|
51 |
+
output_file = inference(response_text)
|
52 |
+
return response_text, output_file
|
53 |
+
|
54 |
+
|
55 |
+
demo = gr.Interface(
|
56 |
+
process_audio_and_respond,
|
57 |
+
gr.inputs.Audio(source="microphone", type="filepath", label="Bonyeza kitufe cha kurekodi na uliza swali lako"),
|
58 |
+
[gr.outputs.Textbox(label="Jibu (kwa njia ya maandishi)"), gr.outputs.Audio(type="filepath", label="Jibu kwa njia ya sauti (Bofya kusikiliza Jibu)")],
|
59 |
+
title="Mtaalamu wa Viazi Lishe",
|
60 |
+
description="Uliza Mtaalamu wetu swali lolote Kuhusu viazi Lishe",
|
61 |
+
theme="compact",
|
62 |
+
layout="vertical",
|
63 |
+
allow_flagging=False,
|
64 |
+
live=True,
|
65 |
+
)
|
66 |
+
|
67 |
+
|
68 |
+
demo.launch()
|