Spaces:
Sleeping
Sleeping
import streamlit as st | |
import requests | |
import os | |
from dotenv import load_dotenv | |
load_dotenv() | |
def reset_conversation(): | |
''' | |
Resets Conversation | |
''' | |
st.session_state.messages = [] | |
st.session_state.message_count = 0 | |
st.session_state.ask_intervention = False | |
st.session_state.diagnosis = None | |
return None | |
# Define model link for the Addiction Recovery and Mental Health models | |
model_link = "NousResearch/Nous-Hermes-2-Yi-34B" | |
# Define pre-instructions for each model | |
model_pre_instructions = { | |
"Biblical Religious Advisor": "You are a bible AI called hope that helps people. You are a therapy AI. when users asks for advice be friendly in a biblical way.", | |
"Mental health AI": "From now on, you are an AI Therapist called Therax. When the user asks for advice, be very friendly and empathize with them if necessary, never say something conclusive always do your best to keep the user hooked by trying to know more as you find a solution. When the user asks your name, just tell them you are Dave, created by SIST Kisii University. You were built to be very friendly and compassionate. Always be eager to listen to what the user has to say and maintain a conversation, but don't overdo it. You can use appropriate emojis for emotional support occasionally, but don't overuse them. Keep your responses concise to maintain a conversational flow. Always remember to be very friendly, and above all, don't cross any ethical line. From time to time, assure the user that you do not store any of their data. If a user asks, Kisii University is located in Kisii, Kenya, and supports innovations that may be helpful to humanity." | |
} | |
# Function to interact with the selected model via the Together API | |
def interact_with_together_api(messages, model_link, diagnostic=False): | |
all_messages = [] | |
# Add pre-instructions to the message history if it's the first interaction with this model | |
if not any("role" in msg for msg in messages): | |
all_messages.append({"role": "system", "content": model_pre_instructions[selected_model]}) | |
else: | |
all_messages.append({"role": "system", "content": f"Switched to model: {selected_model}"}) | |
# Append user and assistant messages | |
for message in messages: | |
if message[0] == "user": | |
all_messages.append({"role": "user", "content": message[1]}) | |
else: | |
all_messages.append({"role": "assistant", "content": message[1]}) | |
if diagnostic: | |
diagnostic_prompt = "Analyze the following conversation and predict the mental issue the user might be suffering from:" | |
diagnostic_messages = [{"role": "system", "content": "You are an AI model specialized in mental health diagnosis."}, | |
{"role": "user", "content": diagnostic_prompt}] | |
for message in messages: | |
if message[0] == "user": | |
diagnostic_messages.append({"role": "user", "content": message[1]}) | |
all_messages = diagnostic_messages | |
url = "https://api.together.xyz/v1/chat/completions" | |
payload = { | |
"model": model_link, | |
"temperature": 1.05, | |
"top_p": 0.9, | |
"top_k": 50, | |
"repetition_penalty": 1, | |
"n": 1, | |
"messages": all_messages, | |
} | |
TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY') | |
headers = { | |
"accept": "application/json", | |
"content-type": "application/json", | |
"Authorization": f"Bearer {TOGETHER_API_KEY}", | |
} | |
try: | |
response = requests.post(url, json=payload, headers=headers) | |
response.raise_for_status() # Ensure HTTP request was successful | |
response_data = response.json() | |
assistant_response = response_data["choices"][0]["message"]["content"] | |
except requests.exceptions.RequestException as e: | |
assistant_response = "Sorry, I couldn't connect to the server. Please try again later." | |
return assistant_response | |
# Function to diagnose mental health issue | |
def diagnose_mental_health(messages): | |
diagnostic_prompt = "Analyze the following conversation and predict the mental issue the user might be suffering from:" | |
diagnostic_messages = [{"role": "system", "content": "You are an AI model specialized in mental health diagnosis."}, | |
{"role": "user", "content": diagnostic_prompt}] | |
for message in messages: | |
if message[0] == "user": | |
diagnostic_messages.append({"role": "user", "content": message[1]}) | |
url = "https://api.together.xyz/v1/chat/completions" | |
payload = { | |
"model": model_link, | |
"temperature": 0.7, | |
"top_p": 0.9, | |
"top_k": 50, | |
"repetition_penalty": 1.2, | |
"n": 1, | |
"messages": diagnostic_messages, | |
} | |
TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY') | |
headers = { | |
"accept": "application/json", | |
"content-type": "application/json", | |
"Authorization": f"Bearer {TOGETHER_API_KEY}", | |
} | |
try: | |
response = requests.post(url, json=payload, headers=headers) | |
response.raise_for_status() # Ensure HTTP request was successful | |
response_data = response.json() | |
diagnosis = response_data["choices"][0]["message"]["content"] | |
except requests.exceptions.RequestException as e: | |
diagnosis = "Sorry, I couldn't perform the diagnosis. Please try again later." | |
return diagnosis | |
# Create sidebar with model selection dropdown and reset button | |
selected_model = st.sidebar.selectbox("Select Model", list(model_pre_instructions.keys())) | |
st.sidebar.button('Reset Chat', on_click=reset_conversation) | |
# Add cautionary message about testing phase at the bottom of the sidebar | |
st.sidebar.markdown("**Note**: This model is still in the beta phase. Responses may be inaccurate or undesired. Use it cautiously, especially for critical issues.") | |
# Add logo and text to the sidebar | |
st.sidebar.image("https://assets.isu.pub/document-structure/221118065013-a6029cf3d563afaf9b946bb9497d45d4/v1/2841525b232adaef7bd0efe1da81a4c5.jpeg", width=200) | |
st.sidebar.write("A product proudly developed by Kisii University") | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
st.session_state.message_count = 0 | |
st.session_state.ask_intervention = False | |
st.session_state.diagnosis = None | |
# Display diagnosis in the sidebar | |
if st.session_state.message_count >= 4: | |
if "diagnosis" not in st.session_state or st.session_state.diagnosis is None: | |
st.session_state.diagnosis = diagnose_mental_health(st.session_state.messages) | |
st.sidebar.markdown(f"### Diagnosis:\n**{st.session_state.diagnosis}**") | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message[0]): | |
st.markdown(message[1]) | |
# Keywords for intervention | |
intervention_keywords = [ | |
"human", "therapist", "someone", "died", "death", "help", "suicide", "suffering", "sucidal", "depression", | |
"crisis", "emergency", "support", "depressed", "anxiety", "lonely", "desperate", | |
"struggling", "counseling", "distressed", "hurt", "pain", "grief", "trauma", "die", "Kill", | |
"abuse", "danger", "risk", "urgent", "need assistance", "mental health", "talk to" | |
] | |
# Accept user input | |
if prompt := st.chat_input(f"Hi, I'm {selected_model}, ask me a question"): | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Add user message to chat history | |
st.session_state.messages.append(("user", prompt)) | |
st.session_state.message_count += 1 | |
# Check for intervention keywords in user input | |
for keyword in intervention_keywords: | |
if keyword in prompt.lower(): | |
# Intervention logic here | |
st.markdown("<span style='color:red;'>I have a feeling you may need to talk to a therapist. If you agree with me please contact +254793609747; Name: Davis. If you don't, then keep talking to me as we figure this out.</span>", unsafe_allow_html=True) | |
break # Exit loop once intervention is triggered | |
# Interact with the selected model | |
placeholder = st.empty() | |
with placeholder: | |
st.markdown("AI is typing..") | |
assistant_response = interact_with_together_api(st.session_state.messages, model_link) | |
placeholder.empty() | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
st.markdown(assistant_response) | |
# Add assistant response to chat history | |
st.session_state.messages.append(("assistant", assistant_response)) | |
# Update diagnosis if necessary | |
if st.session_state.message_count >= 4: | |
st.session_state.diagnosis = diagnose_mental_health(st.session_state.messages) | |
st.sidebar.markdown(f"### Diagnosis:\n**{st.session_state.diagnosis}**") | |