File size: 2,912 Bytes
d1f3dc6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
from langchain.agents import initialize_agent, Tool
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.agents import AgentType
from langchain.tools import BaseTool
from langchain.llms import OpenAI
from langchain import SerpAPIWrapper, LLMChain
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
from langchain.document_loaders import TextLoader, DirectoryLoader
from langchain.vectorstores import Chroma
import os
import arxiv
import chainlit as cl
from chainlit import user_session
@cl.langchain_factory(use_async=True)
async def init():
# Set the OpenAI Embeddings model
embeddings = embeddings = OpenAIEmbeddings()
# Set the persist directory
persist_directory = "vector_db"
# Load the persisted Chroma vector store
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embeddings)
# Create a chain that uses the Chroma vector store
alice_qa = RetrievalQA.from_chain_type(
ChatOpenAI(
model_name="gpt-3.5-turbo-16k",
temperature=0,
),
chain_type="stuff",
retriever=vectordb.as_retriever(),
)
search = SerpAPIWrapper()
memory = ConversationBufferMemory(memory_key="chat_history")
readonlymemory = ReadOnlySharedMemory(memory=memory)
tools = [
Tool(
name = "Alice in Wonderland QA System",
func=alice_qa.run,
description="useful for when you need to answer questions about Alice in Wonderland. Input should be a fully formed question."
)
]
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"]
)
llm_chain = LLMChain(
llm=ChatOpenAI(
model_name="gpt-3.5-turbo-16k",
temperature=0,
),
prompt=prompt
)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)
# Let the user know that the system is ready
await cl.Message(
content=f"You can begin by asking any questions about Alice in Wonderland!"
).send()
return agent_chain
@cl.langchain_run
async def run(agent, input_str):
res = await cl.make_async(agent)(input_str, callbacks=[cl.LangchainCallbackHandler()])
print(res)
await cl.Message(content=res["output"]).send() |