Spaces:
Runtime error
Runtime error
ariankhalfani
commited on
Commit
•
ab8ff28
1
Parent(s):
b4df677
Update chatbot.py
Browse files- chatbot.py +6 -3
chatbot.py
CHANGED
@@ -211,7 +211,6 @@ def cleanup_response(response):
|
|
211 |
response = response[answer_start + len("Answer:"):].strip()
|
212 |
return response
|
213 |
|
214 |
-
# Gradio interface for the chatbot
|
215 |
def chatbot(audio, input_type, text):
|
216 |
if input_type == "Voice":
|
217 |
transcription = query_whisper(audio.name)
|
@@ -221,15 +220,20 @@ def chatbot(audio, input_type, text):
|
|
221 |
else:
|
222 |
query = text
|
223 |
|
|
|
224 |
details = extract_details_from_prompt(query)
|
|
|
|
|
225 |
patient_history = get_aggregated_patient_history(patient_data, details)
|
226 |
|
|
|
227 |
payload = {
|
228 |
"inputs": f"role: ophthalmologist assistant patient history: {patient_history} question: {query}"
|
229 |
}
|
230 |
|
231 |
logging.debug(f"Raw input to the LLM: {payload['inputs']}")
|
232 |
|
|
|
233 |
response = query_huggingface(payload)
|
234 |
if isinstance(response, list):
|
235 |
raw_response = response[0].get("generated_text", "Sorry, I couldn't generate a response.")
|
@@ -238,8 +242,7 @@ def chatbot(audio, input_type, text):
|
|
238 |
|
239 |
logging.debug(f"Raw output from the LLM: {raw_response}")
|
240 |
|
241 |
-
|
242 |
-
return clean_response, None
|
243 |
|
244 |
# Gradio interface for generating voice response
|
245 |
def generate_voice_response(tts_model, text_response):
|
|
|
211 |
response = response[answer_start + len("Answer:"):].strip()
|
212 |
return response
|
213 |
|
|
|
214 |
def chatbot(audio, input_type, text):
|
215 |
if input_type == "Voice":
|
216 |
transcription = query_whisper(audio.name)
|
|
|
220 |
else:
|
221 |
query = text
|
222 |
|
223 |
+
# Extract details from the prompt
|
224 |
details = extract_details_from_prompt(query)
|
225 |
+
|
226 |
+
# Get aggregated patient history based on the extracted details
|
227 |
patient_history = get_aggregated_patient_history(patient_data, details)
|
228 |
|
229 |
+
# Create the payload with the patient history and the user's query
|
230 |
payload = {
|
231 |
"inputs": f"role: ophthalmologist assistant patient history: {patient_history} question: {query}"
|
232 |
}
|
233 |
|
234 |
logging.debug(f"Raw input to the LLM: {payload['inputs']}")
|
235 |
|
236 |
+
# Query the Hugging Face model with the payload
|
237 |
response = query_huggingface(payload)
|
238 |
if isinstance(response, list):
|
239 |
raw_response = response[0].get("generated_text", "Sorry, I couldn't generate a response.")
|
|
|
242 |
|
243 |
logging.debug(f"Raw output from the LLM: {raw_response}")
|
244 |
|
245 |
+
return raw_response, None
|
|
|
246 |
|
247 |
# Gradio interface for generating voice response
|
248 |
def generate_voice_response(tts_model, text_response):
|