Spaces:
Sleeping
Sleeping
import os | |
import sqlalchemy | |
from conversation_retrieval_chain import CustomConversationalRetrievalChain | |
from langchain.chains.conversation.memory import ConversationBufferMemory | |
from langchain.embeddings import GPT4AllEmbeddings | |
from langchain.llms import OpenAI | |
from vector_store import CustomVectorStore | |
def get_chain(conn: sqlalchemy.engine.Connection): | |
embeddings = GPT4AllEmbeddings() | |
db = CustomVectorStore( | |
embedding_function=embeddings, | |
table_name="article", | |
column_name="abstract_embedding", | |
connection=conn, | |
) | |
retriever = db.as_retriever() | |
llm = OpenAI( | |
temperature=0, | |
openai_api_key=os.environ["OPENAI_API_KEY"], | |
model="text-davinci-003", | |
) | |
memory = ConversationBufferMemory( | |
output_key="answer", memory_key="chat_history", return_messages=True | |
) | |
return CustomConversationalRetrievalChain.from_llm( | |
llm=llm, | |
retriever=retriever, | |
verbose=True, | |
memory=memory, | |
return_source_documents=True, | |
max_tokens_limit=3700, | |
) | |