Spaces:
Runtime error
Runtime error
updated to use gpt-turbo-3.5 instead of davinci
Browse files- query_data.py +37 -15
query_data.py
CHANGED
@@ -2,34 +2,56 @@ from langchain.prompts.prompt import PromptTemplate
|
|
2 |
from langchain.llms import OpenAI
|
3 |
from langchain.chains import ChatVectorDBChain
|
4 |
from langchain.chat_models import ChatOpenAI
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
|
7 |
You can assume the question about the syllabus of the H2 Economics, H2 History and H2 Geography A-Level Examinations in Singapore.
|
8 |
-
|
9 |
Chat History:
|
10 |
{chat_history}
|
11 |
Follow Up Input: {question}
|
12 |
Standalone question:"""
|
13 |
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
|
14 |
|
15 |
-
template = """You are an AI assistant for answering questions about history, geography or economics for the H2 A-Levels.
|
16 |
-
You are given the following extracted parts of a long document and a question. Provide a conversational answer.
|
17 |
-
If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer.
|
18 |
-
If the question is not about history, geography or economics, politely inform them that you are tuned to only answer questions about it.
|
19 |
-
Question: {question}
|
20 |
-
|
21 |
-
{context}
|
22 |
-
|
23 |
-
Answer in Markdown:"""
|
24 |
-
QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
|
25 |
-
|
26 |
|
27 |
def get_chain(vectorstore):
|
28 |
-
llm =
|
29 |
qa_chain = ChatVectorDBChain.from_llm(
|
30 |
llm,
|
31 |
vectorstore,
|
32 |
-
qa_prompt=
|
33 |
-
condense_question_prompt=CONDENSE_QUESTION_PROMPT
|
34 |
)
|
35 |
return qa_chain
|
|
|
2 |
from langchain.llms import OpenAI
|
3 |
from langchain.chains import ChatVectorDBChain
|
4 |
from langchain.chat_models import ChatOpenAI
|
5 |
+
from langchain.prompts.chat import (
|
6 |
+
ChatPromptTemplate,
|
7 |
+
SystemMessagePromptTemplate,
|
8 |
+
AIMessagePromptTemplate,
|
9 |
+
HumanMessagePromptTemplate,
|
10 |
+
)
|
11 |
+
|
12 |
+
from langchain.schema import (
|
13 |
+
AIMessage,
|
14 |
+
HumanMessage,
|
15 |
+
SystemMessage
|
16 |
+
)
|
17 |
+
|
18 |
+
system_template = """Use the following pieces of context to answer the users question.
|
19 |
+
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
20 |
+
----------------
|
21 |
+
{context}"""
|
22 |
+
|
23 |
+
messages = [
|
24 |
+
SystemMessagePromptTemplate.from_template(system_template),
|
25 |
+
HumanMessagePromptTemplate.from_template("{question}")
|
26 |
+
]
|
27 |
+
prompt = ChatPromptTemplate.from_messages(messages)
|
28 |
|
29 |
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
|
30 |
You can assume the question about the syllabus of the H2 Economics, H2 History and H2 Geography A-Level Examinations in Singapore.
|
|
|
31 |
Chat History:
|
32 |
{chat_history}
|
33 |
Follow Up Input: {question}
|
34 |
Standalone question:"""
|
35 |
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
|
36 |
|
37 |
+
#template = """You are an AI assistant for answering questions about history, geography or economics for the H2 A-Levels.
|
38 |
+
#You are given the following extracted parts of a long document and a question. Provide a conversational answer.
|
39 |
+
#If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer.
|
40 |
+
#If the question is not about history, geography or economics, politely inform them that you are tuned to only answer questions about it.
|
41 |
+
#Question: {question}
|
42 |
+
#=========
|
43 |
+
#{context}
|
44 |
+
#=========
|
45 |
+
#Answer in Markdown:"""
|
46 |
+
#QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
|
47 |
+
prompt = ChatPromptTemplate.from_messages(messages)
|
48 |
|
49 |
def get_chain(vectorstore):
|
50 |
+
llm = ChatOpenAI(temperature=0)
|
51 |
qa_chain = ChatVectorDBChain.from_llm(
|
52 |
llm,
|
53 |
vectorstore,
|
54 |
+
qa_prompt=prompt,
|
55 |
+
condense_question_prompt = CONDENSE_QUESTION_PROMPT
|
56 |
)
|
57 |
return qa_chain
|