Spaces:
Runtime error
Runtime error
from langchain.prompts.prompt import PromptTemplate | |
from langchain.llms import OpenAI | |
from langchain.chains import ChatVectorDBChain | |
from langchain.chat_models import ChatOpenAI | |
from langchain.prompts.chat import ( | |
ChatPromptTemplate, | |
SystemMessagePromptTemplate, | |
AIMessagePromptTemplate, | |
HumanMessagePromptTemplate, | |
) | |
from langchain.schema import ( | |
AIMessage, | |
HumanMessage, | |
SystemMessage | |
) | |
system_template = """Use the following pieces of context to answer the users question. | |
If you don't know the answer, just say that you don't know, don't try to make up an answer. | |
---------------- | |
{context}""" | |
messages = [ | |
SystemMessagePromptTemplate.from_template(system_template), | |
HumanMessagePromptTemplate.from_template("{question}") | |
] | |
prompt = ChatPromptTemplate.from_messages(messages) | |
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. | |
You can assume the question about the syllabus of the H2 Economics, H2 History and H2 Geography A-Level Examinations in Singapore. | |
Chat History: | |
{chat_history} | |
Follow Up Input: {question} | |
Standalone question:""" | |
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) | |
#template = """You are an AI assistant for answering questions about history, geography or economics for the H2 A-Levels. | |
#You are given the following extracted parts of a long document and a question. Provide a conversational answer. | |
#If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer. | |
#If the question is not about history, geography or economics, politely inform them that you are tuned to only answer questions about it. | |
#Question: {question} | |
#========= | |
#{context} | |
#========= | |
#Answer in Markdown:""" | |
#QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"]) | |
prompt = ChatPromptTemplate.from_messages(messages) | |
def get_chain(vectorstore): | |
llm = ChatOpenAI(temperature=0) | |
qa_chain = ChatVectorDBChain.from_llm( | |
llm, | |
vectorstore, | |
qa_prompt=prompt, | |
condense_question_prompt = CONDENSE_QUESTION_PROMPT | |
) | |
return qa_chain | |