File size: 9,085 Bytes
d58f539
f7de418
d58f539
f7de418
120d632
f7de418
120d632
f7de418
 
dec22aa
518275a
6e9cbe5
dec22aa
ce67688
 
d58f539
f7de418
 
f8bd65e
 
a0f34aa
f7de418
ef46ff0
913a139
a0f34aa
ef46ff0
1926927
c2d4418
 
 
1926927
ef46ff0
 
1926927
ef46ff0
16020a5
1926927
ce67688
 
 
 
 
 
 
 
 
a0f34aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1926927
f8bd65e
 
 
 
 
 
 
a0f34aa
 
913a139
a0f34aa
f8bd65e
a0f34aa
 
f8bd65e
1926927
518275a
 
 
 
 
 
 
 
f8bd65e
d6247a0
 
 
913a139
 
d6247a0
0b682de
 
 
 
 
 
 
 
a0f34aa
d6247a0
 
 
 
 
 
1926927
d6247a0
 
0b682de
913a139
 
d6247a0
 
 
f8bd65e
120d632
f8bd65e
518275a
d6247a0
 
f8bd65e
 
 
1926927
120d632
 
 
518275a
f8bd65e
 
518275a
f8bd65e
 
9f81a69
1926927
f8bd65e
518275a
9f81a69
d6247a0
f8bd65e
d58f539
1926927
f8bd65e
913a139
 
a0f34aa
f8bd65e
 
 
 
 
 
 
 
ef46ff0
e65a834
a0f34aa
518275a
e65a834
518275a
 
 
 
 
 
a0f34aa
913a139
e65a834
a0f34aa
 
1926927
ef46ff0
1926927
ce67688
 
 
 
 
 
1926927
ce67688
 
 
 
 
 
 
 
 
 
 
 
2ec09b1
 
 
 
ce67688
 
 
 
 
 
2ec09b1
ce67688
 
 
 
 
 
 
 
 
 
 
ef46ff0
f8bd65e
f7de418
d00dc38
1926927
2ec09b1
1926927
ef46ff0
1926927
 
2279da8
 
 
 
1926927
 
2279da8
1926927
 
 
 
a0f34aa
1926927
2279da8
 
 
 
 
1926927
a0f34aa
f8bd65e
 
ce67688
 
 
 
 
 
 
1926927
 
 
ce67688
1926927
f8bd65e
 
 
 
 
913a139
 
d6247a0
a0f34aa
f8bd65e
1926927
f8bd65e
a0f34aa
 
 
f8bd65e
120d632
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
import gradio as gr
import numpy as np
import io
from pydub import AudioSegment
import tempfile
import openai
import time
from dataclasses import dataclass, field
from threading import Lock
import base64
import uuid
import os

print(os.getenv("API_KEY"))


@dataclass
class AppState:
    stream: np.ndarray | None = None
    sampling_rate: int = 0
    pause_detected: bool = False
    conversation: list = field(default_factory=list)
    client: openai.OpenAI = None
    output_format: str = "mp3"
    stopped: bool = False


# Global lock for thread safety
state_lock = Lock()


def create_client(api_key):
    return openai.OpenAI(
        base_url="https://llama3-1-8b.lepton.run/api/v1/", api_key=api_key
    )


def test_api_key(client):
    # Try making a simple request to check if the API key works
    try:
        # Attempt to retrieve available models as a test
        client.models.list()
    except Exception as e:
        raise e


def determine_pause(audio, sampling_rate, state):
    # Take the last 1 second of audio
    pause_length = int(sampling_rate * 1)  # 1 second
    if len(audio) < pause_length:
        return False
    last_audio = audio[-pause_length:]
    amplitude = np.abs(last_audio)

    # Calculate the average amplitude in the last 1 second
    avg_amplitude = np.mean(amplitude)
    silence_threshold = 0.01  # Adjust this threshold as needed
    if avg_amplitude < silence_threshold:
        return True
    else:
        return False


def process_audio(audio: tuple, state: AppState):
    if state.stream is None:
        state.stream = audio[1]
        state.sampling_rate = audio[0]
    else:
        state.stream = np.concatenate((state.stream, audio[1]))

    pause_detected = determine_pause(state.stream, state.sampling_rate, state)
    state.pause_detected = pause_detected

    if state.pause_detected:
        return gr.Audio(recording=False), state
    else:
        return None, state


def update_or_append_conversation(conversation, id, role, new_content):
    for entry in conversation:
        if entry["id"] == id and entry["role"] == role:
            entry["content"] = new_content
            return
    conversation.append({"id": id, "role": role, "content": new_content})


def generate_response_and_audio(audio_bytes: bytes, state: AppState):
    if state.client is None:
        raise gr.Error("Please enter a valid API key first.")

    format_ = state.output_format
    bitrate = 128 if format_ == "mp3" else 32  # Higher bitrate for MP3, lower for OPUS
    audio_data = base64.b64encode(audio_bytes).decode()
    old_messages = []

    for item in state.conversation:
        old_messages.append({"role": item["role"], "content": item["content"]})

    old_messages.append(
        {"role": "user", "content": [{"type": "audio", "data": audio_data}]}
    )

    try:
        stream = state.client.chat.completions.create(
            extra_body={
                "require_audio": True,
                "tts_preset_id": "jessica",
                "tts_audio_format": format_,
                "tts_audio_bitrate": bitrate,
            },
            model="llama3.1-8b",
            messages=old_messages,
            temperature=0.7,
            max_tokens=256,
            stream=True,
        )

        full_response = ""
        asr_result = ""
        audios = []
        id = uuid.uuid4()

        for chunk in stream:
            if not chunk.choices:
                continue
            content = chunk.choices[0].delta.content
            audio = getattr(chunk.choices[0], "audio", [])
            asr_results = getattr(chunk.choices[0], "asr_results", [])
            if asr_results:
                asr_result += "".join(asr_results)
                yield id, full_response, asr_result, None, state
            if content:
                full_response += content
                yield id, full_response, asr_result, None, state
            if audio:
                audios.extend(audio)

        final_audio = b"".join([base64.b64decode(a) for a in audios])

        yield id, full_response, asr_result, final_audio, state

    except Exception as e:
        raise gr.Error(f"Error during audio streaming: {e}")


def response(state: AppState):
    if state.stream is None or len(state.stream) == 0:
        return None, None, state

    audio_buffer = io.BytesIO()
    segment = AudioSegment(
        state.stream.tobytes(),
        frame_rate=state.sampling_rate,
        sample_width=state.stream.dtype.itemsize,
        channels=(1 if len(state.stream.shape) == 1 else state.stream.shape[1]),
    )
    segment.export(audio_buffer, format="wav")

    generator = generate_response_and_audio(audio_buffer.getvalue(), state)

    for id, text, asr, audio, updated_state in generator:
        state = updated_state
        if asr:
            update_or_append_conversation(state.conversation, id, "user", asr)
        if text:
            update_or_append_conversation(state.conversation, id, "assistant", text)
        chatbot_output = state.conversation
        yield chatbot_output, audio, state

    # Reset the audio stream for the next interaction
    state.stream = None
    state.pause_detected = False


def set_api_key(api_key, state):
    try:
        state.client = create_client(api_key)
        test_api_key(state.client)  # Test the provided API key
        api_key_status = gr.update(value="API key set successfully!", visible=True)
        api_key_input = gr.update(visible=False)
        set_key_button = gr.update(visible=False)
        return api_key_status, api_key_input, set_key_button, state
    except Exception as e:
        api_key_status = gr.update(
            value="Invalid API key. Please try again.", visible=True
        )
        return api_key_status, None, None, state


def initial_setup(state):
    api_key = os.getenv("API_KEY")
    if api_key:
        try:
            state.client = create_client(api_key)
            test_api_key(state.client)  # Test the API key from the environment variable
            api_key_status = gr.update(
                value="You are using default Lepton API key, which have 10 requests/min limit",
                visible=True,
            )
            api_key_input = gr.update(visible=False)
            set_key_button = gr.update(visible=False)
            return api_key_status, api_key_input, set_key_button, state
        except Exception as e:
            # Failed to use the api_key, show input box
            api_key_status = gr.update(
                value="Failed to use default API key. Please enter a valid API key.",
                visible=True,
            )
            api_key_input = gr.update(visible=True)
            set_key_button = gr.update(visible=True)
            return api_key_status, api_key_input, set_key_button, state
    else:
        # No API key in environment variable
        api_key_status = gr.update(visible=False)
        api_key_input = gr.update(visible=True)
        set_key_button = gr.update(visible=True)
        return api_key_status, api_key_input, set_key_button, state


with gr.Blocks() as demo:
    gr.Markdown("# Lepton AI LLM Voice Mode")
    gr.Markdown(
        "You can find Lepton AI LLM voice doc [here](https://www.lepton.ai/playground/chat/llama-3.2-3b) and serverless endpoint API Key [here](https://dashboard.lepton.ai/workspace-redirect/settings/api-tokens)"
    )
    with gr.Row():
        with gr.Column(scale=3):
            api_key_input = gr.Textbox(
                type="password",
                placeholder="Enter your Lepton API Key",
                show_label=False,
                container=False,
            )
        with gr.Column(scale=1):
            set_key_button = gr.Button("Set API Key", scale=2, variant="primary")

    api_key_status = gr.Textbox(
        show_label=False, container=False, interactive=False, visible=False
    )

    with gr.Blocks():
        with gr.Row():
            input_audio = gr.Audio(
                label="Input Audio", sources="microphone", type="numpy"
            )
            output_audio = gr.Audio(label="Output Audio", autoplay=True)
        chatbot = gr.Chatbot(label="Conversation", type="messages")

    state = gr.State(AppState())

    # Initial setup to set API key from environment variable
    demo.load(
        initial_setup,
        inputs=state,
        outputs=[api_key_status, api_key_input, set_key_button, state],
    )

    set_key_button.click(
        set_api_key,
        inputs=[api_key_input, state],
        outputs=[api_key_status, api_key_input, set_key_button, state],
    )

    stream = input_audio.stream(
        process_audio,
        [input_audio, state],
        [input_audio, state],
        stream_every=0.25,  # Reduced to make it more responsive
        time_limit=60,  # Increased to allow for longer messages
    )

    respond = input_audio.stop_recording(
        response, [state], [chatbot, output_audio, state]
    )
    # Update the chatbot with the final conversation
    respond.then(lambda s: s.conversation, [state], [chatbot])


demo.launch()