akhaliq HF staff commited on
Commit
9f81a69
1 Parent(s): 913a139

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -18
app.py CHANGED
@@ -84,15 +84,16 @@ def generate_response_and_audio(audio_bytes: bytes, state: AppState):
84
  audio = getattr(chunk.choices[0], 'audio', [])
85
  if content:
86
  full_response += content
87
- yield full_response, None, state
88
  if audio:
89
  audios.extend(audio)
90
- audio_data = b''.join([base64.b64decode(a) for a in audios])
91
- yield full_response, audio_data, state
92
 
93
  state.conversation.append({"role": "user", "content": "Audio input"})
94
  state.conversation.append({"role": "assistant", "content": full_response})
95
 
 
 
96
  except Exception as e:
97
  raise gr.Error(f"Error during audio streaming: {e}")
98
 
@@ -109,25 +110,17 @@ def response(state: AppState):
109
  )
110
  segment.export(audio_buffer, format="wav")
111
 
112
- generator = generate_response_and_audio(audio_buffer.getvalue(), state)
113
-
114
- # Process the generator to get the final results
115
- final_text = ""
116
- final_audio = None
117
- for text, audio, updated_state in generator:
118
- final_text = text if text else final_text
119
- final_audio = audio if audio else final_audio
120
- state = updated_state
121
 
122
  # Update the chatbot with the final conversation
123
- chatbot_output = state.conversation[-2:] # Get the last two messages (user input and AI response)
124
 
125
  # Reset the audio stream for the next interaction
126
- state.stream = None
127
- state.pause_start = None
128
- state.last_speech = 0
129
 
130
- return chatbot_output, final_audio, state
131
 
132
  def set_api_key(api_key, state):
133
  if not api_key:
@@ -154,7 +147,7 @@ with gr.Blocks() as demo:
154
  input_audio = gr.Audio(label="Input Audio", sources="microphone", type="numpy")
155
  with gr.Column():
156
  chatbot = gr.Chatbot(label="Conversation", type="messages")
157
- output_audio = gr.Audio(label="Output Audio", streaming=True, autoplay=True)
158
 
159
  state = gr.State(AppState())
160
 
 
84
  audio = getattr(chunk.choices[0], 'audio', [])
85
  if content:
86
  full_response += content
 
87
  if audio:
88
  audios.extend(audio)
89
+
90
+ final_audio = b''.join([base64.b64decode(a) for a in audios])
91
 
92
  state.conversation.append({"role": "user", "content": "Audio input"})
93
  state.conversation.append({"role": "assistant", "content": full_response})
94
 
95
+ return full_response, final_audio, state
96
+
97
  except Exception as e:
98
  raise gr.Error(f"Error during audio streaming: {e}")
99
 
 
110
  )
111
  segment.export(audio_buffer, format="wav")
112
 
113
+ full_response, final_audio, updated_state = generate_response_and_audio(audio_buffer.getvalue(), state)
 
 
 
 
 
 
 
 
114
 
115
  # Update the chatbot with the final conversation
116
+ chatbot_output = updated_state.conversation[-2:] # Get the last two messages (user input and AI response)
117
 
118
  # Reset the audio stream for the next interaction
119
+ updated_state.stream = None
120
+ updated_state.pause_start = None
121
+ updated_state.last_speech = 0
122
 
123
+ return chatbot_output, final_audio, updated_state
124
 
125
  def set_api_key(api_key, state):
126
  if not api_key:
 
147
  input_audio = gr.Audio(label="Input Audio", sources="microphone", type="numpy")
148
  with gr.Column():
149
  chatbot = gr.Chatbot(label="Conversation", type="messages")
150
+ output_audio = gr.Audio(label="Output Audio", autoplay=True)
151
 
152
  state = gr.State(AppState())
153