import streamlit as st from langchain_core.messages.chat import ChatMessage from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_teddynote.prompts import load_prompt from dotenv import load_dotenv from langchain import hub load_dotenv() st.title("나만의 챗GPT💬") # 처음 1번만 실행하기 위한 코드 if "messages" not in st.session_state: st.session_state["messages"] = [] # 사이드바 생성 with st.sidebar: clear_btn = st.button("대화 초기화") selected_prompt = st.selectbox("프롬프트를 선택해 주세요", ("기본모드"), index=0) # 이전 대화를 출력 def print_messages(): for chat_message in st.session_state["messages"]: st.chat_message(chat_message.role).write(chat_message.content) # 새로운 메시지를 추가 def add_message(role, message): st.session_state["messages"].append(ChatMessage(role=role, content=message)) # 체인 생성 def create_chain(prompt_type): prompt = ChatPromptTemplate.from_messages( [ ( "system", "당신은 친절한 AI 어시스턴트입니다. 다음의 질문에 간결하게 답변해 주세요.", ), ("user", "#Question:\n{question}"), ] ) llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) return prompt | llm | StrOutputParser() if clear_btn: st.session_state["messages"] = [] print_messages() user_input = st.chat_input("궁금한 내용을 물어보세요!") if user_input: st.chat_message("user").write(user_input) chain = create_chain(selected_prompt) response = chain.stream({"question": user_input}) with st.chat_message("assistant"): container = st.empty() ai_answer = "" for token in response: ai_answer += token container.markdown(ai_answer) add_message("user", user_input) add_message("assistant", ai_answer)