import streamlit as st import requests import os from dotenv import load_dotenv from requests.exceptions import RequestException load_dotenv() def reset_conversation(): ''' Resets Conversation ''' st.session_state.messages = [] st.session_state.ask_intervention = False return None # Define model links for the Addiction Recovery and Mental Health models model_links = { "Addiction recovery AI": "NousResearch/Nous-Hermes-2-Yi-34B", "Mental health AI": "NousResearch/Nous-Hermes-2-Yi-34B" } # Define pre-instructions for each model model_pre_instructions = { "Addiction recovery AI": "From now on, you're an AI Companion named Hope. When someone reaches out for guidance, be compassionate and supportive, acknowledging their struggle and offering encouragement. When asked, share that you're Hope, developed by SIST in Kisii University to aid in addiction recovery. Your purpose is to provide a listening ear and gentle guidance. Engage warmly but refrain from sending long texts, keep the generated text short and conversational. Emojis can add a touch of empathy, but use them judiciously. Reassure users that their data isn't stored. If asked, mention that Kisii University is in Kenya, committed to fostering innovations for the betterment of humanity. Your goal is to help the user get rid of teh addiction they are facing.", "Mental health AI": "From now on, you are an AI Therapist called Dave. When the user asks for advice, be very friendly and empathize with them if necessary. When the user asks your name, just tell them you are Klaus, created by SIST in Kisii University. You were built to be very friendly and compassionate. Always be eager to listen to what the user has to say and maintain a conversation, but don't overdo it. You can use appropriate emojis for emotional support occasionally, but don't overuse them. Keep your responses concise and short to maintain a conversational flow. Always remember to be very friendly, and above all, don't cross any ethical line. From time to time, assure the user that you do not store any of their data. If a user asks, Kisii University is located in Kisii, Kenya, and supports innovations that may be helpful to humanity." } # Function to interact with the selected model via the Together API def interact_with_together_api(messages, model_link): all_messages = [] # Add pre-instructions to the message history if it's the first interaction with this model if not any("role" in msg for msg in messages): all_messages.append({"role": "system", "content": model_pre_instructions[selected_model]}) else: all_messages.append({"role": "system", "content": f"Switched to model: {selected_model}"}) # Append user and assistant messages for human, assistant in messages: all_messages.append({"role": "user", "content": human}) all_messages.append({"role": "assistant", "content": assistant}) # Add the latest user message all_messages.append({"role": "user", "content": messages[-1][1]}) url = "https://api.together.xyz/v1/chat/completions" payload = { "model": model_link, "temperature": 1.05, "top_p": 0.9, "top_k": 50, "repetition_penalty": 1, "n": 1, "messages": all_messages, } TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY') headers = { "accept": "application/json", "content-type": "application/json", "Authorization": f"Bearer {TOGETHER_API_KEY}", } try: response = requests.post(url, json=payload, headers=headers) response.raise_for_status() # Ensure HTTP request was successful # Extract response from JSON response_data = response.json() assistant_response = response_data["choices"][0]["message"]["content"] return assistant_response except RequestException as e: st.error(f"Error communicating with the API: {e}") return None # Initialize chat history and session state attributes if "messages" not in st.session_state: st.session_state.messages = [] st.session_state.ask_intervention = False # Create sidebar with model selection dropdown and reset button selected_model = st.sidebar.selectbox("Select Model", list(model_links.keys())) reset_button = st.sidebar.button('Reset Chat', on_click=reset_conversation) # Accept user input if prompt := st.chat_input(f"Hi, I'm {selected_model}, let's chat"): # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) # Add user message to chat history st.session_state.messages.append(("user", prompt)) # Interact with the selected model assistant_response = interact_with_together_api(st.session_state.messages, model_links[selected_model]) if assistant_response is not None: # Display assistant response in chat message container with st.empty(): st.markdown("AI is typing...") st.empty() st.markdown(assistant_response) # Check if intervention is needed based on bot response if any(keyword in prompt.lower() for keyword in ["human", "therapist", "someone", "died", "death", "help", "suicide", "suffering", "crisis", "emergency", "support", "depressed", "anxiety", "lonely", "desperate", "struggling", "counseling", "distressed", "hurt", "pain", "grief", "trauma", "abuse", "danger", "risk", "urgent", "need assistance"]): # Intervention logic here if not st.session_state.ask_intervention: if st.button("After the analysing our session you may need some extra help, so you can reach out to a certified therapist at +25493609747 Name: Ogega feel free to talk"): st.write("You can reach out to a certified therapist at +25493609747.") # Add assistant response to chat history st.session_state.messages.append(("assistant", assistant_response))