prompt
stringlengths 43
25.9k
| completion
stringlengths 7
362
| api
stringlengths 18
90
|
---|---|---|
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai.chat_models import ChatOpenAI
model = ChatOpenAI()
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're an assistant who's good at {ability}. Respond in 20 words or fewer",
),
MessagesPlaceholder(variable_name="history"),
("human", "{input}"),
]
)
runnable = prompt | model
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
store = {}
def get_session_history(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
with_message_history = RunnableWithMessageHistory(
runnable,
get_session_history,
input_messages_key="input",
history_messages_key="history",
)
with_message_history.invoke(
{"ability": "math", "input": "What does cosine mean?"},
config={"configurable": {"session_id": "abc123"}},
)
with_message_history.invoke(
{"ability": "math", "input": "What?"},
config={"configurable": {"session_id": "abc123"}},
)
with_message_history.invoke(
{"ability": "math", "input": "What?"},
config={"configurable": {"session_id": "def234"}},
)
from langchain_core.runnables import ConfigurableFieldSpec
store = {}
def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory:
if (user_id, conversation_id) not in store:
store[(user_id, conversation_id)] = ChatMessageHistory()
return store[(user_id, conversation_id)]
with_message_history = RunnableWithMessageHistory(
runnable,
get_session_history,
input_messages_key="input",
history_messages_key="history",
history_factory_config=[
ConfigurableFieldSpec(
id="user_id",
annotation=str,
name="User ID",
description="Unique identifier for the user.",
default="",
is_shared=True,
),
ConfigurableFieldSpec(
id="conversation_id",
annotation=str,
name="Conversation ID",
description="Unique identifier for the conversation.",
default="",
is_shared=True,
),
],
)
with_message_history.invoke(
{"ability": "math", "input": "Hello"},
config={"configurable": {"user_id": "123", "conversation_id": "1"}},
)
from langchain_core.messages import HumanMessage
from langchain_core.runnables import RunnableParallel
chain = RunnableParallel({"output_message": | ChatOpenAI() | langchain_openai.chat_models.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet redis redisvl langchain-openai tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_openai import OpenAIEmbeddings
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from operator import itemgetter
from langchain.output_parsers import JsonOutputToolsParser
from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
@tool
def count_emails(last_n_days: int) -> int:
"""Multiply two integers together."""
return last_n_days * 2
@tool
def send_email(message: str, recipient: str) -> str:
"Add two integers."
return f"Successfully sent email to {recipient}."
tools = [count_emails, send_email]
model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0).bind_tools(tools)
def call_tool(tool_invocation: dict) -> Runnable:
"""Function for dynamically constructing the end of the chain based on the model-selected tool."""
tool_map = {tool.name: tool for tool in tools}
tool = tool_map[tool_invocation["type"]]
return RunnablePassthrough.assign(output=itemgetter("args") | tool)
call_tool_list = RunnableLambda(call_tool).map()
chain = model | JsonOutputToolsParser() | call_tool_list
chain.invoke("how many emails did i get in the last 5 days?")
import json
def human_approval(tool_invocations: list) -> Runnable:
tool_strs = "\n\n".join(
json.dumps(tool_call, indent=2) for tool_call in tool_invocations
)
msg = (
f"Do you approve of the following tool invocations\n\n{tool_strs}\n\n"
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no."
)
resp = input(msg)
if resp.lower() not in ("yes", "y"):
raise ValueError(f"Tool invocations not approved:\n\n{tool_strs}")
return tool_invocations
chain = model | | JsonOutputToolsParser() | langchain.output_parsers.JsonOutputToolsParser |
import os
import yaml
get_ipython().system('wget https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml -O openai_openapi.yaml')
get_ipython().system('wget https://www.klarna.com/us/shopping/public/openai/v0/api-docs -O klarna_openapi.yaml')
get_ipython().system('wget https://raw.githubusercontent.com/APIs-guru/openapi-directory/main/APIs/spotify.com/1.0.0/openapi.yaml -O spotify_openapi.yaml')
from langchain_community.agent_toolkits.openapi.spec import reduce_openapi_spec
with open("openai_openapi.yaml") as f:
raw_openai_api_spec = yaml.load(f, Loader=yaml.Loader)
openai_api_spec = reduce_openapi_spec(raw_openai_api_spec)
with open("klarna_openapi.yaml") as f:
raw_klarna_api_spec = yaml.load(f, Loader=yaml.Loader)
klarna_api_spec = reduce_openapi_spec(raw_klarna_api_spec)
with open("spotify_openapi.yaml") as f:
raw_spotify_api_spec = yaml.load(f, Loader=yaml.Loader)
spotify_api_spec = reduce_openapi_spec(raw_spotify_api_spec)
import spotipy.util as util
from langchain.requests import RequestsWrapper
def construct_spotify_auth_headers(raw_spec: dict):
scopes = list(
raw_spec["components"]["securitySchemes"]["oauth_2_0"]["flows"][
"authorizationCode"
]["scopes"].keys()
)
access_token = util.prompt_for_user_token(scope=",".join(scopes))
return {"Authorization": f"Bearer {access_token}"}
headers = construct_spotify_auth_headers(raw_spotify_api_spec)
requests_wrapper = RequestsWrapper(headers=headers)
endpoints = [
(route, operation)
for route, operations in raw_spotify_api_spec["paths"].items()
for operation in operations
if operation in ["get", "post"]
]
len(endpoints)
import tiktoken
enc = tiktoken.encoding_for_model("gpt-4")
def count_tokens(s):
return len(enc.encode(s))
count_tokens(yaml.dump(raw_spotify_api_spec))
from langchain_community.agent_toolkits.openapi import planner
from langchain_openai import OpenAI
llm = OpenAI(model_name="gpt-4", temperature=0.0)
spotify_agent = planner.create_openapi_agent(spotify_api_spec, requests_wrapper, llm)
user_query = (
"make me a playlist with the first song from kind of blue. call it machine blues."
)
spotify_agent.run(user_query)
user_query = "give me a song I'd like, make it blues-ey"
spotify_agent.run(user_query)
headers = {"Authorization": f"Bearer {os.getenv('OPENAI_API_KEY')}"}
openai_requests_wrapper = RequestsWrapper(headers=headers)
llm = OpenAI(model_name="gpt-4", temperature=0.25)
openai_agent = planner.create_openapi_agent(
openai_api_spec, openai_requests_wrapper, llm
)
user_query = "generate a short piece of advice"
openai_agent.run(user_query)
from langchain.agents import create_openapi_agent
from langchain_community.agent_toolkits import OpenAPIToolkit
from langchain_community.tools.json.tool import JsonSpec
from langchain_openai import OpenAI
with open("openai_openapi.yaml") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
json_spec = | JsonSpec(dict_=data, max_value_length=4000) | langchain_community.tools.json.tool.JsonSpec |
get_ipython().run_line_magic('pip', "install --upgrade --quiet langchain-openai 'deeplake[enterprise]' tiktoken")
from langchain_community.vectorstores import DeepLake
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
activeloop_token = getpass.getpass("activeloop token:")
embeddings = OpenAIEmbeddings()
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain fleet-context langchain-openai pandas faiss-cpu # faiss-gpu for CUDA supported GPU')
from operator import itemgetter
from typing import Any, Optional, Type
import pandas as pd
from langchain.retrievers import MultiVectorRetriever
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_core.stores import BaseStore
from langchain_core.vectorstores import VectorStore
from langchain_openai import OpenAIEmbeddings
def load_fleet_retriever(
df: pd.DataFrame,
*,
vectorstore_cls: Type[VectorStore] = FAISS,
docstore: Optional[BaseStore] = None,
**kwargs: Any,
):
vectorstore = _populate_vectorstore(df, vectorstore_cls)
if docstore is None:
return vectorstore.as_retriever(**kwargs)
else:
_populate_docstore(df, docstore)
return MultiVectorRetriever(
vectorstore=vectorstore, docstore=docstore, id_key="parent", **kwargs
)
def _populate_vectorstore(
df: pd.DataFrame,
vectorstore_cls: Type[VectorStore],
) -> VectorStore:
if not hasattr(vectorstore_cls, "from_embeddings"):
raise ValueError(
f"Incompatible vector store class {vectorstore_cls}."
"Must implement `from_embeddings` class method."
)
texts_embeddings = []
metadatas = []
for _, row in df.iterrows():
texts_embeddings.append((row.metadata["text"], row["dense_embeddings"]))
metadatas.append(row.metadata)
return vectorstore_cls.from_embeddings(
texts_embeddings,
| OpenAIEmbeddings(model="text-embedding-ada-002") | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet alibabacloud_ha3engine_vector')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.vectorstores import (
AlibabaCloudOpenSearch,
AlibabaCloudOpenSearchSettings,
)
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../../state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet beautifulsoup4')
from langchain_community.document_loaders import ReadTheDocsLoader
loader = | ReadTheDocsLoader("rtdocs", features="html.parser") | langchain_community.document_loaders.ReadTheDocsLoader |
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml')
path = "/Users/rlm/Desktop/Papers/LLaVA/"
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "LLaVA.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
from typing import List
from langchain.prompts.chat import (
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class CAMELAgent:
def __init__(
self,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.system_message = system_message
self.model = model
self.init_messages()
def reset(self) -> None:
self.init_messages()
return self.stored_messages
def init_messages(self) -> None:
self.stored_messages = [self.system_message]
def update_messages(self, message: BaseMessage) -> List[BaseMessage]:
self.stored_messages.append(message)
return self.stored_messages
def step(
self,
input_message: HumanMessage,
) -> AIMessage:
messages = self.update_messages(input_message)
output_message = self.model(messages)
self.update_messages(output_message)
return output_message
import os
os.environ["OPENAI_API_KEY"] = ""
assistant_role_name = "Python Programmer"
user_role_name = "Stock Trader"
task = "Develop a trading bot for the stock market"
word_limit = 50 # word limit for task brainstorming
task_specifier_sys_msg = SystemMessage(content="You can make a task more specific.")
task_specifier_prompt = """Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}.
Please make it more specific. Be creative and imaginative.
Please reply with the specified task in {word_limit} words or less. Do not add anything else."""
task_specifier_template = HumanMessagePromptTemplate.from_template(
template=task_specifier_prompt
)
task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(temperature=1.0))
task_specifier_msg = task_specifier_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task,
word_limit=word_limit,
)[0]
specified_task_msg = task_specify_agent.step(task_specifier_msg)
print(f"Specified task: {specified_task_msg.content}")
specified_task = specified_task_msg.content
assistant_inception_prompt = """Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me!
We share a common interest in collaborating to successfully complete a task.
You must help me to complete the task.
Here is the task: {task}. Never forget our task!
I must instruct you based on your expertise and my needs to complete the task.
I must give you one instruction at a time.
You must write a specific solution that appropriately completes the requested instruction.
You must decline my instruction honestly if you cannot perform the instruction due to physical, moral, legal reasons or your capability and explain the reasons.
Do not add anything else other than your solution to my instruction.
You are never supposed to ask me any questions you only answer questions.
You are never supposed to reply with a flake solution. Explain your solutions.
Your solution must be declarative sentences and simple present tense.
Unless I say the task is completed, you should always start with:
Solution: <YOUR_SOLUTION>
<YOUR_SOLUTION> should be specific and provide preferable implementations and examples for task-solving.
Always end <YOUR_SOLUTION> with: Next request."""
user_inception_prompt = """Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me.
We share a common interest in collaborating to successfully complete a task.
I must help you to complete the task.
Here is the task: {task}. Never forget our task!
You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways:
1. Instruct with a necessary input:
Instruction: <YOUR_INSTRUCTION>
Input: <YOUR_INPUT>
2. Instruct without any input:
Instruction: <YOUR_INSTRUCTION>
Input: None
The "Instruction" describes a task or question. The paired "Input" provides further context or information for the requested "Instruction".
You must give me one instruction at a time.
I must write a response that appropriately completes the requested instruction.
I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons.
You should instruct me not ask me questions.
Now you must start to instruct me using the two ways described above.
Do not add anything else other than your instruction and the optional corresponding input!
Keep giving me instructions and necessary inputs until you think the task is completed.
When the task is completed, you must only reply with a single word <CAMEL_TASK_DONE>.
Never say <CAMEL_TASK_DONE> unless my responses have solved your task."""
def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str):
assistant_sys_template = SystemMessagePromptTemplate.from_template(
template=assistant_inception_prompt
)
assistant_sys_msg = assistant_sys_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task,
)[0]
user_sys_template = SystemMessagePromptTemplate.from_template(
template=user_inception_prompt
)
user_sys_msg = user_sys_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task,
)[0]
return assistant_sys_msg, user_sys_msg
assistant_sys_msg, user_sys_msg = get_sys_msgs(
assistant_role_name, user_role_name, specified_task
)
assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2))
user_agent = CAMELAgent(user_sys_msg, | ChatOpenAI(temperature=0.2) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet rockset')
import os
import rockset
ROCKSET_API_KEY = os.environ.get(
"ROCKSET_API_KEY"
) # Verify ROCKSET_API_KEY environment variable
ROCKSET_API_SERVER = rockset.Regions.usw2a1 # Verify Rockset region
rockset_client = rockset.RocksetClient(ROCKSET_API_SERVER, ROCKSET_API_KEY)
COLLECTION_NAME = "langchain_demo"
TEXT_KEY = "description"
EMBEDDING_KEY = "description_embedding"
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Rockset
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet titan-iris')
from langchain_community.llms import TitanTakeoff
llm = TitanTakeoff(
base_url="http://localhost:8000", generate_max_length=128, temperature=1.0
)
prompt = "What is the largest planet in the solar system?"
llm(prompt)
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
llm = TitanTakeoff(
callback_manager=CallbackManager([ | StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
from typing import List
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
model = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('poetry run pip install replicate')
from getpass import getpass
REPLICATE_API_TOKEN = getpass()
import os
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import Replicate
llm = Replicate(
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
model_kwargs={"temperature": 0.75, "max_length": 500, "top_p": 1},
)
prompt = """
User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?
Assistant:
"""
llm(prompt)
llm = Replicate(
model="replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5"
)
prompt = """
Answer the following yes/no question by reasoning step by step.
Can a dog drive a car?
"""
llm(prompt)
text2image = Replicate(
model="stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf",
model_kwargs={"image_dimensions": "512x512"},
)
image_output = text2image("A cat riding a motorcycle by Picasso")
image_output
get_ipython().system('poetry run pip install Pillow')
from io import BytesIO
import requests
from PIL import Image
response = requests.get(image_output)
img = Image.open(BytesIO(response.content))
img
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
llm = Replicate(
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
model_kwargs={"temperature": 0.75, "max_length": 500, "top_p": 1},
)
prompt = """
User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?
Assistant:
"""
_ = llm(prompt)
import time
llm = Replicate(
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
model_kwargs={"temperature": 0.01, "max_length": 500, "top_p": 1},
)
prompt = """
User: What is the best way to learn python?
Assistant:
"""
start_time = time.perf_counter()
raw_output = llm(prompt) # raw output, no stop
end_time = time.perf_counter()
print(f"Raw output:\n {raw_output}")
print(f"Raw output runtime: {end_time - start_time} seconds")
start_time = time.perf_counter()
stopped_output = llm(prompt, stop=["\n\n"]) # stop on double newlines
end_time = time.perf_counter()
print(f"Stopped output:\n {stopped_output}")
print(f"Stopped output runtime: {end_time - start_time} seconds")
from langchain.chains import SimpleSequentialChain
dolly_llm = Replicate(
model="replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5"
)
text2image = Replicate(
model="stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf"
)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain = LLMChain(llm=dolly_llm, prompt=prompt)
second_prompt = PromptTemplate(
input_variables=["company_name"],
template="Write a description of a logo for this company: {company_name}",
)
chain_two = | LLMChain(llm=dolly_llm, prompt=second_prompt) | langchain.chains.LLMChain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet gpt4all > /dev/null')
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import GPT4All
template = """Question: {question}
Answer: Let's think step by step."""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai.chat_models import ChatOpenAI
model = | ChatOpenAI() | langchain_openai.chat_models.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandoc')
from langchain_community.document_loaders import UnstructuredEPubLoader
loader = UnstructuredEPubLoader("winter-sports.epub")
data = loader.load()
loader = | UnstructuredEPubLoader("winter-sports.epub", mode="elements") | langchain_community.document_loaders.UnstructuredEPubLoader |
from typing import Callable, List
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
self._step += 1
def step(self) -> tuple[str, str]:
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
message = speaker.send()
for receiver in self.agents:
receiver.receive(speaker.name, message)
self._step += 1
return speaker.name, message
character_names = ["Harry Potter", "Ron Weasley", "Hermione Granger", "Argus Filch"]
storyteller_name = "Dungeon Master"
quest = "Find all of Lord Voldemort's seven horcruxes."
word_limit = 50 # word limit for task brainstorming
game_description = f"""Here is the topic for a Dungeons & Dragons game: {quest}.
The characters are: {*character_names,}.
The story is narrated by the storyteller, {storyteller_name}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of a Dungeons & Dragons player."
)
def generate_character_description(character_name):
character_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the character, {character_name}, in {word_limit} words or less.
Speak directly to {character_name}.
Do not add anything else."""
),
]
character_description = ChatOpenAI(temperature=1.0)(
character_specifier_prompt
).content
return character_description
def generate_character_system_message(character_name, character_description):
return SystemMessage(
content=(
f"""{game_description}
Your name is {character_name}.
Your character description is as follows: {character_description}.
You will propose actions you plan to take and {storyteller_name} will explain what happens when you take those actions.
Speak in the first person from the perspective of {character_name}.
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of anyone else.
Remember you are {character_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
"""
)
)
character_descriptions = [
generate_character_description(character_name) for character_name in character_names
]
character_system_messages = [
generate_character_system_message(character_name, character_description)
for character_name, character_description in zip(
character_names, character_descriptions
)
]
storyteller_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the storyteller, {storyteller_name}, in {word_limit} words or less.
Speak directly to {storyteller_name}.
Do not add anything else."""
),
]
storyteller_description = ChatOpenAI(temperature=1.0)(
storyteller_specifier_prompt
).content
storyteller_system_message = SystemMessage(
content=(
f"""{game_description}
You are the storyteller, {storyteller_name}.
Your description is as follows: {storyteller_description}.
The other players will propose actions to take and you will explain what happens when they take those actions.
Speak in the first person from the perspective of {storyteller_name}.
Do not change roles!
Do not speak from the perspective of anyone else.
Remember you are the storyteller, {storyteller_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
"""
)
)
print("Storyteller Description:")
print(storyteller_description)
for character_name, character_description in zip(
character_names, character_descriptions
):
print(f"{character_name} Description:")
print(character_description)
quest_specifier_prompt = [
SystemMessage(content="You can make a task more specific."),
HumanMessage(
content=f"""{game_description}
You are the storyteller, {storyteller_name}.
Please make the quest more specific. Be creative and imaginative.
Please reply with the specified quest in {word_limit} words or less.
Speak directly to the characters: {*character_names,}.
Do not add anything else."""
),
]
specified_quest = ChatOpenAI(temperature=1.0)(quest_specifier_prompt).content
print(f"Original quest:\n{quest}\n")
print(f"Detailed quest:\n{specified_quest}\n")
characters = []
for character_name, character_system_message in zip(
character_names, character_system_messages
):
characters.append(
DialogueAgent(
name=character_name,
system_message=character_system_message,
model=ChatOpenAI(temperature=0.2),
)
)
storyteller = DialogueAgent(
name=storyteller_name,
system_message=storyteller_system_message,
model= | ChatOpenAI(temperature=0.2) | langchain_openai.ChatOpenAI |
get_ipython().system('pip install -U openai langchain langchain-experimental')
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=256)
chat.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "What is this image showing"},
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/static/img/langchain_stack.png",
"detail": "auto",
},
},
]
)
]
)
from langchain.agents.openai_assistant import OpenAIAssistantRunnable
interpreter_assistant = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=[{"type": "code_interpreter"}],
model="gpt-4-1106-preview",
)
output = interpreter_assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"})
output
get_ipython().system('pip install e2b duckduckgo-search')
from langchain.tools import DuckDuckGoSearchRun, E2BDataAnalysisTool
tools = [E2BDataAnalysisTool(api_key="..."), DuckDuckGoSearchRun()]
agent = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant e2b tool",
instructions="You are a personal math tutor. Write and run code to answer math questions. You can also search the internet.",
tools=tools,
model="gpt-4-1106-preview",
as_agent=True,
)
from langchain.agents import AgentExecutor
agent_executor = | AgentExecutor(agent=agent, tools=tools) | langchain.agents.AgentExecutor |
from langchain_community.llms import Ollama
llm = Ollama(model="llama2")
llm("The first man on the moon was ...")
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
llm = Ollama(
model="llama2", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)
llm("The first man on the moon was ...")
from langchain_community.llms import Ollama
llm = Ollama(model="llama2:13b")
llm("The first man on the moon was ... think step by step")
get_ipython().run_line_magic('env', 'CMAKE_ARGS="-DLLAMA_METAL=on"')
get_ipython().run_line_magic('env', 'FORCE_CMAKE=1')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python --no-cache-dirclear')
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_community.llms import LlamaCpp
llm = LlamaCpp(
model_path="/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin",
n_gpu_layers=1,
n_batch=512,
n_ctx=2048,
f16_kv=True,
callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]),
verbose=True,
)
llm("The first man on the moon was ... Let's think step by step")
get_ipython().run_line_magic('pip', 'install gpt4all')
from langchain_community.llms import GPT4All
llm = GPT4All(
model="/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin"
)
llm("The first man on the moon was ... Let's think step by step")
from langchain_community.llms.llamafile import Llamafile
llm = Llamafile()
llm.invoke("The first man on the moon was ... Let's think step by step.")
llm = LlamaCpp(
model_path="/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin",
n_gpu_layers=1,
n_batch=512,
n_ctx=2048,
f16_kv=True,
callback_manager=CallbackManager([ | StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"}
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-firestore')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
get_ipython().system('gcloud services enable firestore.googleapis.com')
from langchain_core.documents.base import Document
from langchain_google_firestore import FirestoreSaver
saver = FirestoreSaver()
data = [Document(page_content="Hello, World!")]
saver.upsert_documents(data)
saver = FirestoreSaver("Collection")
saver.upsert_documents(data)
doc_ids = ["AnotherCollection/doc_id", "foo/bar"]
saver = | FirestoreSaver() | langchain_google_firestore.FirestoreSaver |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-api-python-client > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-auth-oauthlib > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-auth-httplib2 > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet beautifulsoup4 > /dev/null # This is optional but is useful for parsing HTML messages')
from langchain_community.agent_toolkits import GmailToolkit
toolkit = GmailToolkit()
from langchain_community.tools.gmail.utils import (
build_resource_service,
get_gmail_credentials,
)
credentials = get_gmail_credentials(
token_file="token.json",
scopes=["https://mail.google.com/"],
client_secrets_file="credentials.json",
)
api_resource = build_resource_service(credentials=credentials)
toolkit = | GmailToolkit(api_resource=api_resource) | langchain_community.agent_toolkits.GmailToolkit |
from langchain_community.llms import AmazonAPIGateway
api_url = "https://<api_gateway_id>.execute-api.<region>.amazonaws.com/LATEST/HF"
llm = AmazonAPIGateway(api_url=api_url)
parameters = {
"max_new_tokens": 100,
"num_return_sequences": 1,
"top_k": 50,
"top_p": 0.95,
"do_sample": False,
"return_full_text": True,
"temperature": 0.2,
}
prompt = "what day comes after Friday?"
llm.model_kwargs = parameters
llm(prompt)
from langchain.agents import AgentType, initialize_agent, load_tools
parameters = {
"max_new_tokens": 50,
"num_return_sequences": 1,
"top_k": 250,
"top_p": 0.25,
"do_sample": False,
"temperature": 0.1,
}
llm.model_kwargs = parameters
tools = | load_tools(["python_repl", "llm-math"], llm=llm) | langchain.agents.load_tools |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet promptlayer --upgrade')
import promptlayer # Don't forget this 🍰
from langchain.callbacks import PromptLayerCallbackHandler
from langchain.schema import (
HumanMessage,
)
from langchain_openai import ChatOpenAI
chat_llm = ChatOpenAI(
temperature=0,
callbacks=[PromptLayerCallbackHandler(pl_tags=["chatopenai"])],
)
llm_results = chat_llm(
[
HumanMessage(content="What comes after 1,2,3 ?"),
HumanMessage(content="Tell me another joke?"),
]
)
print(llm_results)
import promptlayer # Don't forget this 🍰
from langchain.callbacks import PromptLayerCallbackHandler
from langchain_community.llms import GPT4All
model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8)
response = model(
"Once upon a time, ",
callbacks=[ | PromptLayerCallbackHandler(pl_tags=["langchain", "gpt4all"]) | langchain.callbacks.PromptLayerCallbackHandler |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.evaluation import load_evaluator
evaluator = load_evaluator("trajectory")
import subprocess
from urllib.parse import urlparse
from langchain.agents import AgentType, initialize_agent
from langchain.tools import tool
from langchain_openai import ChatOpenAI
from pydantic import HttpUrl
@tool
def ping(url: HttpUrl, return_error: bool) -> str:
"""Ping the fully specified url. Must include https:// in the url."""
hostname = urlparse(str(url)).netloc
completed_process = subprocess.run(
["ping", "-c", "1", hostname], capture_output=True, text=True
)
output = completed_process.stdout
if return_error and completed_process.returncode != 0:
return completed_process.stderr
return output
@tool
def trace_route(url: HttpUrl, return_error: bool) -> str:
"""Trace the route to the specified url. Must include https:// in the url."""
hostname = urlparse(str(url)).netloc
completed_process = subprocess.run(
["traceroute", hostname], capture_output=True, text=True
)
output = completed_process.stdout
if return_error and completed_process.returncode != 0:
return completed_process.stderr
return output
llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0)
agent = initialize_agent(
llm=llm,
tools=[ping, trace_route],
agent=AgentType.OPENAI_MULTI_FUNCTIONS,
return_intermediate_steps=True, # IMPORTANT!
)
result = agent("What's the latency like for https://langchain.com?")
evaluation_result = evaluator.evaluate_agent_trajectory(
prediction=result["output"],
input=result["input"],
agent_trajectory=result["intermediate_steps"],
)
evaluation_result
get_ipython().run_line_magic('pip', 'install --upgrade --quiet anthropic')
from langchain_community.chat_models import ChatAnthropic
eval_llm = ChatAnthropic(temperature=0)
evaluator = load_evaluator("trajectory", llm=eval_llm)
evaluation_result = evaluator.evaluate_agent_trajectory(
prediction=result["output"],
input=result["input"],
agent_trajectory=result["intermediate_steps"],
)
evaluation_result
from langchain.evaluation import load_evaluator
evaluator = | load_evaluator("trajectory", agent_tools=[ping, trace_route]) | langchain.evaluation.load_evaluator |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet cos-python-sdk-v5')
from langchain_community.document_loaders import TencentCOSDirectoryLoader
from qcloud_cos import CosConfig
conf = CosConfig(
Region="your cos region",
SecretId="your cos secret_id",
SecretKey="your cos secret_key",
)
loader = TencentCOSDirectoryLoader(conf=conf, bucket="you_cos_bucket")
loader.load()
loader = | TencentCOSDirectoryLoader(conf=conf, bucket="you_cos_bucket", prefix="fake") | langchain_community.document_loaders.TencentCOSDirectoryLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet cohere')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss-cpu')
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:")
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import CohereEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter
documents = TextLoader("../../modules/state_of_the_union.txt").load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
retriever = FAISS.from_documents(texts, CohereEmbeddings()).as_retriever(
search_kwargs={"k": 20}
)
query = "What did the president say about Ketanji Brown Jackson"
docs = retriever.get_relevant_documents(query)
pretty_print_docs(docs)
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import CohereRerank
from langchain_community.llms import Cohere
llm = Cohere(temperature=0)
compressor = | CohereRerank() | langchain.retrievers.document_compressors.CohereRerank |
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt", encoding="utf-8")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, text in enumerate(texts):
text.metadata["source"] = f"{i}-pl"
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings)
from langchain.chains import create_qa_with_sources_chain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
qa_chain = create_qa_with_sources_chain(llm)
doc_prompt = PromptTemplate(
template="Content: {page_content}\nSource: {source}",
input_variables=["page_content", "source"],
)
final_qa_chain = StuffDocumentsChain(
llm_chain=qa_chain,
document_variable_name="context",
document_prompt=doc_prompt,
)
retrieval_qa = RetrievalQA(
retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain
)
query = "What did the president say about russia"
retrieval_qa.run(query)
qa_chain_pydantic = create_qa_with_sources_chain(llm, output_parser="pydantic")
final_qa_chain_pydantic = StuffDocumentsChain(
llm_chain=qa_chain_pydantic,
document_variable_name="context",
document_prompt=doc_prompt,
)
retrieval_qa_pydantic = RetrievalQA(
retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain_pydantic
)
retrieval_qa_pydantic.run(query)
from langchain.chains import ConversationalRetrievalChain, LLMChain
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.\
Make sure to avoid using any unclear pronouns.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = | PromptTemplate.from_template(_template) | langchain.prompts.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install -qU chromadb langchain langchain-community langchain-openai')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, doc in enumerate(texts):
doc.metadata["page_chunk"] = i
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(texts, embeddings, collection_name="state-of-union")
retriever = vectorstore.as_retriever()
from langchain.tools.retriever import create_retriever_tool
retriever_tool = create_retriever_tool(
retriever,
"state-of-union-retriever",
"Query a retriever to get information about state of the union address",
)
from typing import List
from langchain_core.pydantic_v1 import BaseModel, Field
class Response(BaseModel):
"""Final response to the question being asked"""
answer: str = | Field(description="The final answer to respond to the user") | langchain_core.pydantic_v1.Field |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet marqo')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Marqo
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
import marqo
marqo_url = "http://localhost:8882" # if using marqo cloud replace with your endpoint (console.marqo.ai)
marqo_api_key = "" # if using marqo cloud replace with your api key (console.marqo.ai)
client = marqo.Client(url=marqo_url, api_key=marqo_api_key)
index_name = "langchain-demo"
docsearch = Marqo.from_documents(docs, index_name=index_name)
query = "What did the president say about Ketanji Brown Jackson"
result_docs = docsearch.similarity_search(query)
print(result_docs[0].page_content)
result_docs = docsearch.similarity_search_with_score(query)
print(result_docs[0][0].page_content, result_docs[0][1], sep="\n")
index_name = "langchain-multimodal-demo"
try:
client.delete_index(index_name)
except Exception:
print(f"Creating {index_name}")
settings = {"treat_urls_and_pointers_as_images": True, "model": "ViT-L/14"}
client.create_index(index_name, **settings)
client.index(index_name).add_documents(
[
{
"caption": "Bus",
"image": "https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image4.jpg",
},
{
"caption": "Plane",
"image": "https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image2.jpg",
},
],
)
def get_content(res):
"""Helper to format Marqo's documents into text to be used as page_content"""
return f"{res['caption']}: {res['image']}"
docsearch = Marqo(client, index_name, page_content_builder=get_content)
query = "vehicles that fly"
doc_results = docsearch.similarity_search(query)
for doc in doc_results:
print(doc.page_content)
index_name = "langchain-byo-index-demo"
try:
client.delete_index(index_name)
except Exception:
print(f"Creating {index_name}")
client.create_index(index_name)
client.index(index_name).add_documents(
[
{
"Title": "Smartphone",
"Description": "A smartphone is a portable computer device that combines mobile telephone "
"functions and computing functions into one unit.",
},
{
"Title": "Telephone",
"Description": "A telephone is a telecommunications device that permits two or more users to"
"conduct a conversation when they are too far apart to be easily heard directly.",
},
],
)
def get_content(res):
"""Helper to format Marqo's documents into text to be used as page_content"""
if "text" in res:
return res["text"]
return res["Description"]
docsearch = Marqo(client, index_name, page_content_builder=get_content)
docsearch.add_texts(["This is a document that is about elephants"])
query = "modern communications devices"
doc_results = docsearch.similarity_search(query)
print(doc_results[0].page_content)
query = "elephants"
doc_results = docsearch.similarity_search(query, page_content_builder=get_content)
print(doc_results[0].page_content)
query = {"communications devices": 1.0}
doc_results = docsearch.similarity_search(query)
print(doc_results[0].page_content)
query = {"communications devices": 1.0, "technology post 2000": -1.0}
doc_results = docsearch.similarity_search(query)
print(doc_results[0].page_content)
import getpass
import os
from langchain.chains import RetrievalQAWithSourcesChain
from langchain_openai import OpenAI
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
with open("../../modules/state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
index_name = "langchain-qa-with-retrieval"
docsearch = | Marqo.from_documents(docs, index_name=index_name) | langchain_community.vectorstores.Marqo.from_documents |
from langchain_community.document_loaders import ArcGISLoader
URL = "https://maps1.vcgov.org/arcgis/rest/services/Beaches/MapServer/7"
loader = ArcGISLoader(URL)
docs = loader.load()
get_ipython().run_cell_magic('time', '', '\ndocs = loader.load()\n')
docs[0].metadata
loader_geom = | ArcGISLoader(URL, return_geometry=True) | langchain_community.document_loaders.ArcGISLoader |
import getpass
import os
os.environ["TAVILY_API_KEY"] = getpass.getpass()
from langchain.retrievers.tavily_search_api import TavilySearchAPIRetriever
retriever = TavilySearchAPIRetriever(k=3)
retriever.invoke("what year was breath of the wild released?")
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
"""Answer the question based only on the context provided.
Context: {context}
Question: {question}"""
)
chain = (
| RunnablePassthrough.assign(context=(lambda x: x["question"]) | retriever) | langchain_core.runnables.RunnablePassthrough.assign |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikibase-rest-api-client mediawikiapi')
from langchain_community.tools.wikidata.tool import WikidataAPIWrapper, WikidataQueryRun
wikidata = WikidataQueryRun(api_wrapper= | WikidataAPIWrapper() | langchain_community.tools.wikidata.tool.WikidataAPIWrapper |
from langchain.callbacks import HumanApprovalCallbackHandler
from langchain.tools import ShellTool
tool = | ShellTool() | langchain.tools.ShellTool |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet scann')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import ScaNN
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = HuggingFaceEmbeddings()
db = | ScaNN.from_documents(docs, embeddings) | langchain_community.vectorstores.ScaNN.from_documents |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet arxiv')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent, load_tools
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.0)
tools = load_tools(
["arxiv"],
)
prompt = hub.pull("hwchase17/react")
agent = create_react_agent(llm, tools, prompt)
agent_executor = | AgentExecutor(agent=agent, tools=tools, verbose=True) | langchain.agents.AgentExecutor |
get_ipython().system('pip install boto3')
from langchain_experimental.recommenders import AmazonPersonalize
recommender_arn = "<insert_arn>"
client = AmazonPersonalize(
credentials_profile_name="default",
region_name="us-west-2",
recommender_arn=recommender_arn,
)
client.get_recommendations(user_id="1")
from langchain.llms.bedrock import Bedrock
from langchain_experimental.recommenders import AmazonPersonalizeChain
bedrock_llm = Bedrock(model_id="anthropic.claude-v2", region_name="us-west-2")
chain = AmazonPersonalizeChain.from_llm(
llm=bedrock_llm, client=client, return_direct=False
)
response = chain({"user_id": "1"})
print(response)
from langchain.prompts.prompt import PromptTemplate
RANDOM_PROMPT_QUERY = """
You are a skilled publicist. Write a high-converting marketing email advertising several movies available in a video-on-demand streaming platform next week,
given the movie and user information below. Your email will leverage the power of storytelling and persuasive language.
The movies to recommend and their information is contained in the <movie> tag.
All movies in the <movie> tag must be recommended. Give a summary of the movies and why the human should watch them.
Put the email between <email> tags.
<movie>
{result}
</movie>
Assistant:
"""
RANDOM_PROMPT = PromptTemplate(input_variables=["result"], template=RANDOM_PROMPT_QUERY)
chain = AmazonPersonalizeChain.from_llm(
llm=bedrock_llm, client=client, return_direct=False, prompt_template=RANDOM_PROMPT
)
chain.run({"user_id": "1", "item_id": "234"})
from langchain.chains import LLMChain, SequentialChain
RANDOM_PROMPT_QUERY_2 = """
You are a skilled publicist. Write a high-converting marketing email advertising several movies available in a video-on-demand streaming platform next week,
given the movie and user information below. Your email will leverage the power of storytelling and persuasive language.
You want the email to impress the user, so make it appealing to them.
The movies to recommend and their information is contained in the <movie> tag.
All movies in the <movie> tag must be recommended. Give a summary of the movies and why the human should watch them.
Put the email between <email> tags.
<movie>
{result}
</movie>
Assistant:
"""
RANDOM_PROMPT_2 = PromptTemplate(
input_variables=["result"], template=RANDOM_PROMPT_QUERY_2
)
personalize_chain_instance = AmazonPersonalizeChain.from_llm(
llm=bedrock_llm, client=client, return_direct=True
)
random_chain_instance = LLMChain(llm=bedrock_llm, prompt=RANDOM_PROMPT_2)
overall_chain = SequentialChain(
chains=[personalize_chain_instance, random_chain_instance],
input_variables=["user_id"],
verbose=True,
)
overall_chain.run({"user_id": "1", "item_id": "234"})
recommender_arn = "<insert_arn>"
metadata_column_names = [
"<insert metadataColumnName-1>",
"<insert metadataColumnName-2>",
]
metadataMap = {"ITEMS": metadata_column_names}
client = AmazonPersonalize(
credentials_profile_name="default",
region_name="us-west-2",
recommender_arn=recommender_arn,
)
client.get_recommendations(user_id="1", metadataColumns=metadataMap)
bedrock_llm = | Bedrock(model_id="anthropic.claude-v2", region_name="us-west-2") | langchain.llms.bedrock.Bedrock |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
runnable = RunnableParallel(
passed=RunnablePassthrough(),
extra= | RunnablePassthrough.assign(mult=lambda x: x["num"] * 3) | langchain_core.runnables.RunnablePassthrough.assign |
get_ipython().run_line_magic('pip', "install --upgrade --quiet faiss-gpu # For CUDA 7.5+ Supported GPU's.")
get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss-cpu # For CPU Installation')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../../extras/modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from langchain.callbacks import FileCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from loguru import logger
logfile = "output.log"
logger.add(logfile, colorize=True, enqueue=True)
handler = FileCallbackHandler(logfile)
llm = OpenAI()
prompt = PromptTemplate.from_template("1 + {number} = ")
chain = | LLMChain(llm=llm, prompt=prompt, callbacks=[handler], verbose=True) | langchain.chains.LLMChain |
get_ipython().system(' pip install langchain replicate')
from langchain_community.chat_models import ChatOllama
llama2_chat = ChatOllama(model="llama2:13b-chat")
llama2_code = ChatOllama(model="codellama:7b-instruct")
from langchain_community.llms import Replicate
replicate_id = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d"
llama2_chat_replicate = Replicate(
model=replicate_id, input={"temperature": 0.01, "max_length": 500, "top_p": 1}
)
llm = llama2_chat
from langchain_community.utilities import SQLDatabase
db = SQLDatabase.from_uri("sqlite:///nba_roster.db", sample_rows_in_table_info=0)
def get_schema(_):
return db.get_table_info()
def run_query(query):
return db.run(query)
from langchain_core.prompts import ChatPromptTemplate
template = """Based on the table schema below, write a SQL query that would answer the user's question:
{schema}
Question: {question}
SQL Query:"""
prompt = ChatPromptTemplate.from_messages(
[
("system", "Given an input question, convert it to a SQL query. No pre-amble."),
("human", template),
]
)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
sql_response = (
RunnablePassthrough.assign(schema=get_schema)
| prompt
| llm.bind(stop=["\nSQLResult:"])
| | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet weaviate-client')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
WEAVIATE_URL = getpass.getpass("WEAVIATE_URL:")
os.environ["WEAVIATE_API_KEY"] = getpass.getpass("WEAVIATE_API_KEY:")
WEAVIATE_API_KEY = os.environ["WEAVIATE_API_KEY"]
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Weaviate
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = Weaviate.from_documents(docs, embeddings, weaviate_url=WEAVIATE_URL, by_text=False)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
import weaviate
client = weaviate.Client(
url=WEAVIATE_URL, auth_client_secret=weaviate.AuthApiKey(WEAVIATE_API_KEY)
)
vectorstore = Weaviate.from_documents(
documents, embeddings, client=client, by_text=False
)
docs = db.similarity_search_with_score(query, by_text=False)
docs[0]
retriever = db.as_retriever(search_type="mmr")
retriever.get_relevant_documents(query)[0]
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
llm.predict("What did the president say about Justice Breyer")
from langchain.chains import RetrievalQAWithSourcesChain
from langchain_openai import OpenAI
with open("../../modules/state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
docsearch = Weaviate.from_texts(
texts,
embeddings,
weaviate_url=WEAVIATE_URL,
by_text=False,
metadatas=[{"source": f"{i}-pl"} for i in range(len(texts))],
)
chain = RetrievalQAWithSourcesChain.from_chain_type(
OpenAI(temperature=0), chain_type="stuff", retriever=docsearch.as_retriever()
)
chain(
{"question": "What did the president say about Justice Breyer"},
return_only_outputs=True,
)
with open("../../modules/state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
docsearch = Weaviate.from_texts(
texts,
embeddings,
weaviate_url=WEAVIATE_URL,
by_text=False,
metadatas=[{"source": f"{i}-pl"} for i in range(len(texts))],
)
retriever = docsearch.as_retriever()
from langchain_core.prompts import ChatPromptTemplate
template = """You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.
Question: {question}
Context: {context}
Answer:
"""
prompt = | ChatPromptTemplate.from_template(template) | langchain_core.prompts.ChatPromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azureml-mlflow')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandas')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet textstat')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
get_ipython().system('python -m spacy download en_core_web_sm')
import os
os.environ["MLFLOW_TRACKING_URI"] = ""
os.environ["OPENAI_API_KEY"] = ""
os.environ["SERPAPI_API_KEY"] = ""
from langchain.callbacks import MlflowCallbackHandler
from langchain_openai import OpenAI
"""Main function.
This function is used to try the callback handler.
Scenarios:
1. OpenAI LLM
2. Chain with multiple SubChains on multiple generations
3. Agent with Tools
"""
mlflow_callback = MlflowCallbackHandler()
llm = OpenAI(
model_name="gpt-3.5-turbo", temperature=0, callbacks=[mlflow_callback], verbose=True
)
llm_result = llm.generate(["Tell me a joke"])
mlflow_callback.flush_tracker(llm)
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
prompt_template = PromptTemplate(input_variables=["title"], template=template)
synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=[mlflow_callback])
test_prompts = [
{
"title": "documentary about good video games that push the boundary of game design"
},
]
synopsis_chain.apply(test_prompts)
mlflow_callback.flush_tracker(synopsis_chain)
from langchain.agents import AgentType, initialize_agent, load_tools
tools = | load_tools(["serpapi", "llm-math"], llm=llm, callbacks=[mlflow_callback]) | langchain.agents.load_tools |
from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://www.espn.com/")
data = loader.load()
data
"""
import requests
from bs4 import BeautifulSoup
html_doc = requests.get("{INSERT_NEW_URL_HERE}")
soup = BeautifulSoup(html_doc.text, 'html.parser')
"""
loader = WebBaseLoader(["https://www.espn.com/", "https://google.com"])
docs = loader.load()
docs
get_ipython().run_line_magic('pip', 'install --upgrade --quiet nest_asyncio')
import nest_asyncio
nest_asyncio.apply()
loader = WebBaseLoader(["https://www.espn.com/", "https://google.com"])
loader.requests_per_second = 1
docs = loader.aload()
docs
loader = | WebBaseLoader(
"https://www.govinfo.gov/content/pkg/CFR-2018-title10-vol3/xml/CFR-2018-title10-vol3-sec431-86.xml"
) | langchain_community.document_loaders.WebBaseLoader |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_community.retrievers import WikipediaRetriever
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
llm = | ChatOpenAI(model="gpt-3.5-turbo", temperature=0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints')
import getpass
import os
if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"):
nvapi_key = getpass.getpass("Enter your NVIDIA API key: ")
assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key"
os.environ["NVIDIA_API_KEY"] = nvapi_key
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="mixtral_8x7b")
result = llm.invoke("Write a ballad about LangChain.")
print(result.content)
print(llm.batch(["What's 2*3?", "What's 2*6?"]))
for chunk in llm.stream("How far can a seagull fly in one day?"):
print(chunk.content, end="|")
async for chunk in llm.astream(
"How long does it take for monarch butterflies to migrate?"
):
print(chunk.content, end="|")
ChatNVIDIA.get_available_models()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")]
)
chain = prompt | ChatNVIDIA(model="llama2_13b") | StrOutputParser()
for txt in chain.stream({"input": "What's your name?"}):
print(txt, end="")
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are an expert coding AI. Respond only in valid python; no narration whatsoever.",
),
("user", "{input}"),
]
)
chain = prompt | ChatNVIDIA(model="llama2_code_70b") | | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
meals = [
"Beef Enchiladas with Feta cheese. Mexican-Greek fusion",
"Chicken Flatbreads with red sauce. Italian-Mexican fusion",
"Veggie sweet potato quesadillas with vegan cheese",
"One-Pan Tortelonni bake with peppers and onions",
]
from langchain_openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-instruct")
from langchain.prompts import PromptTemplate
PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}".
Embed the meal into the given text: "{text_to_personalize}".
Prepend a personalized message including the user's name "{user}"
and their preference "{preference}".
Make it sound good.
"""
PROMPT = PromptTemplate(
input_variables=["meal", "text_to_personalize", "user", "preference"],
template=PROMPT_TEMPLATE,
)
import langchain_experimental.rl_chain as rl_chain
chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs \
believe you will love it!",
)
print(response["response"])
for _ in range(5):
try:
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
except Exception as e:
print(e)
print(response["response"])
print()
scoring_criteria_template = (
"Given {preference} rank how good or bad this selection is {meal}"
)
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(
llm=llm, scoring_criteria_template_str=scoring_criteria_template
),
)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
print(response["response"])
selection_metadata = response["selection_metadata"]
print(
f"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}"
)
class CustomSelectionScorer(rl_chain.SelectionScorer):
def score_response(
self, inputs, llm_response: str, event: rl_chain.PickBestEvent
) -> float:
print(event.based_on)
print(event.to_select_from)
selected_meal = event.to_select_from["meal"][event.selected.index]
print(f"selected meal: {selected_meal}")
if "Tom" in event.based_on["user"]:
if "Vegetarian" in event.based_on["preference"]:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 0.0
else:
return 1.0
else:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 1.0
else:
return 0.0
else:
raise NotImplementedError("I don't know how to score this user")
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference= | rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]) | langchain_experimental.rl_chain.BasedOn |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_community.retrievers import WikipediaRetriever
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}",
),
("human", "{question}"),
]
)
prompt.pretty_print()
from operator import itemgetter
from typing import List
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
def format_docs(docs: List[Document]) -> str:
"""Convert Documents to a single string.:"""
formatted = [
f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for doc in docs
]
return "\n\n" + "\n\n".join(formatted)
format = itemgetter("docs") | RunnableLambda(format_docs)
answer = prompt | llm | StrOutputParser()
chain = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format)
.assign(answer=answer)
.pick(["answer", "docs"])
)
chain.invoke("How fast are cheetahs?")
from langchain_core.pydantic_v1 import BaseModel, Field
class cited_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[int] = Field(
...,
description="The integer IDs of the SPECIFIC sources which justify the answer.",
)
llm_with_tool = llm.bind_tools(
[cited_answer],
tool_choice="cited_answer",
)
example_q = """What Brian's height?
Source: 1
Information: Suzy is 6'2"
Source: 2
Information: Jeremiah is blonde
Source: 3
Information: Brian is 3 inches shorted than Suzy"""
llm_with_tool.invoke(example_q)
from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser
output_parser = JsonOutputKeyToolsParser(key_name="cited_answer", return_single=True)
(llm_with_tool | output_parser).invoke(example_q)
def format_docs_with_id(docs: List[Document]) -> str:
formatted = [
f"Source ID: {i}\nArticle Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for i, doc in enumerate(docs)
]
return "\n\n" + "\n\n".join(formatted)
format_1 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_1 = prompt | llm_with_tool | output_parser
chain_1 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_1)
.assign(cited_answer=answer_1)
.pick(["cited_answer", "docs"])
)
chain_1.invoke("How fast are cheetahs?")
class Citation(BaseModel):
source_id: int = Field(
...,
description="The integer ID of a SPECIFIC source which justifies the answer.",
)
quote: str = Field(
...,
description="The VERBATIM quote from the specified source that justifies the answer.",
)
class quoted_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[Citation] = Field(
..., description="Citations from the given sources that justify the answer."
)
output_parser_2 = JsonOutputKeyToolsParser(key_name="quoted_answer", return_single=True)
llm_with_tool_2 = llm.bind_tools(
[quoted_answer],
tool_choice="quoted_answer",
)
format_2 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_2 = prompt | llm_with_tool_2 | output_parser_2
chain_2 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_2)
.assign(quoted_answer=answer_2)
.pick(["quoted_answer", "docs"])
)
chain_2.invoke("How fast are cheetahs?")
from langchain_anthropic import ChatAnthropicMessages
anthropic = ChatAnthropicMessages(model_name="claude-instant-1.2")
system = """You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, \
answer the user question and provide citations. If none of the articles answer the question, just say you don't know.
Remember, you must return both an answer and citations. A citation consists of a VERBATIM quote that \
justifies the answer and the ID of the quote article. Return a citation for every quote across all articles \
that justify the answer. Use the following format for your final output:
<cited_answer>
<answer></answer>
<citations>
<citation><source_id></source_id><quote></quote></citation>
<citation><source_id></source_id><quote></quote></citation>
...
</citations>
</cited_answer>
Here are the Wikipedia articles:{context}"""
prompt_3 = ChatPromptTemplate.from_messages(
[("system", system), ("human", "{question}")]
)
from langchain_core.output_parsers import XMLOutputParser
def format_docs_xml(docs: List[Document]) -> str:
formatted = []
for i, doc in enumerate(docs):
doc_str = f"""\
<source id=\"{i}\">
<title>{doc.metadata['title']}</title>
<article_snippet>{doc.page_content}</article_snippet>
</source>"""
formatted.append(doc_str)
return "\n\n<sources>" + "\n".join(formatted) + "</sources>"
format_3 = itemgetter("docs") | RunnableLambda(format_docs_xml)
answer_3 = prompt_3 | anthropic | XMLOutputParser() | itemgetter("cited_answer")
chain_3 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_3)
.assign(cited_answer=answer_3)
.pick(["cited_answer", "docs"])
)
chain_3.invoke("How fast are cheetahs?")
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
splitter = RecursiveCharacterTextSplitter(
chunk_size=400,
chunk_overlap=0,
separators=["\n\n", "\n", ".", " "],
keep_separator=False,
)
compressor = EmbeddingsFilter(embeddings= | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sagemaker')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
import os
os.environ["OPENAI_API_KEY"] = "<ADD-KEY-HERE>"
os.environ["SERPAPI_API_KEY"] = "<ADD-KEY-HERE>"
from langchain.agents import initialize_agent, load_tools
from langchain.callbacks import SageMakerCallbackHandler
from langchain.chains import LLMChain, SimpleSequentialChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from sagemaker.analytics import ExperimentAnalytics
from sagemaker.experiments.run import Run
from sagemaker.session import Session
HPARAMS = {
"temperature": 0.1,
"model_name": "gpt-3.5-turbo-instruct",
}
BUCKET_NAME = None
EXPERIMENT_NAME = "langchain-sagemaker-tracker"
session = Session(default_bucket=BUCKET_NAME)
RUN_NAME = "run-scenario-1"
PROMPT_TEMPLATE = "tell me a joke about {topic}"
INPUT_VARIABLES = {"topic": "fish"}
with Run(
experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session
) as run:
sagemaker_callback = SageMakerCallbackHandler(run)
llm = | OpenAI(callbacks=[sagemaker_callback], **HPARAMS) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain label-studio label-studio-sdk langchain-openai')
import os
os.environ["LABEL_STUDIO_URL"] = "<YOUR-LABEL-STUDIO-URL>" # e.g. http://localhost:8080
os.environ["LABEL_STUDIO_API_KEY"] = "<YOUR-LABEL-STUDIO-API-KEY>"
os.environ["OPENAI_API_KEY"] = "<YOUR-OPENAI-API-KEY>"
from langchain.callbacks import LabelStudioCallbackHandler
from langchain_openai import OpenAI
llm = OpenAI(
temperature=0, callbacks=[LabelStudioCallbackHandler(project_name="My Project")]
)
print(llm("Tell me a joke"))
from langchain.callbacks import LabelStudioCallbackHandler
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
chat_llm = ChatOpenAI(
callbacks=[
LabelStudioCallbackHandler(
mode="chat",
project_name="New Project with Chat",
)
]
)
llm_results = chat_llm(
[
| SystemMessage(content="Always use a lot of emojis") | langchain_core.messages.SystemMessage |
from langchain_community.chat_models import ChatDatabricks
from langchain_core.messages import HumanMessage
from mlflow.deployments import get_deploy_client
client = get_deploy_client("databricks")
secret = "secrets/<scope>/openai-api-key" # replace `<scope>` with your scope
name = "my-chat" # rename this if my-chat already exists
client.create_endpoint(
name=name,
config={
"served_entities": [
{
"name": "my-chat",
"external_model": {
"name": "gpt-4",
"provider": "openai",
"task": "llm/v1/chat",
"openai_config": {
"openai_api_key": "{{" + secret + "}}",
},
},
}
],
},
)
chat = ChatDatabricks(
target_uri="databricks",
endpoint=name,
temperature=0.1,
)
chat([HumanMessage(content="hello")])
from langchain_community.embeddings import DatabricksEmbeddings
embeddings = DatabricksEmbeddings(endpoint="databricks-bge-large-en")
embeddings.embed_query("hello")[:3]
from langchain_community.llms import Databricks
llm = Databricks(endpoint_name="dolly")
llm("How are you?")
llm("How are you?", stop=["."])
import os
import dbutils
os.environ["DATABRICKS_TOKEN"] = dbutils.secrets.get("myworkspace", "api_token")
llm = Databricks(host="myworkspace.cloud.databricks.com", endpoint_name="dolly")
llm("How are you?")
llm = Databricks(endpoint_name="dolly", model_kwargs={"temperature": 0.1})
llm("How are you?")
def transform_input(**request):
full_prompt = f"""{request["prompt"]}
Be Concise.
"""
request["prompt"] = full_prompt
return request
llm = | Databricks(endpoint_name="dolly", transform_input_fn=transform_input) | langchain_community.llms.Databricks |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch')
path = "/Users/rlm/Desktop/cpi/"
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader(path + "cpi.pdf")
pdf_pages = loader.load()
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits_pypdf = text_splitter.split_documents(pdf_pages)
all_splits_pypdf_texts = [d.page_content for d in all_splits_pypdf]
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "cpi.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
baseline = Chroma.from_texts(
texts=all_splits_pypdf_texts,
collection_name="baseline",
embedding=OpenAIEmbeddings(),
)
retriever_baseline = baseline.as_retriever()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
import base64
import io
import os
from io import BytesIO
from langchain_core.messages import HumanMessage
from PIL import Image
def encode_image(image_path):
"""Getting the base64 string"""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def image_summarize(img_base64, prompt):
"""Image summary"""
chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024)
msg = chat.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
]
)
]
)
return msg.content
img_base64_list = []
image_summaries = []
prompt = """You are an assistant tasked with summarizing images for retrieval. \
These summaries will be embedded and used to retrieve the raw image. \
Give a concise summary of the image that is well optimized for retrieval."""
for img_file in sorted(os.listdir(path)):
if img_file.endswith(".jpg"):
img_path = os.path.join(path, img_file)
base64_image = encode_image(img_path)
img_base64_list.append(base64_image)
image_summaries.append(image_summarize(base64_image, prompt))
import uuid
from base64 import b64decode
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_core.documents import Document
def create_multi_vector_retriever(
vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images
):
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
def add_documents(retriever, doc_summaries, doc_contents):
doc_ids = [str(uuid.uuid4()) for _ in doc_contents]
summary_docs = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(doc_summaries)
]
retriever.vectorstore.add_documents(summary_docs)
retriever.docstore.mset(list(zip(doc_ids, doc_contents)))
if text_summaries:
add_documents(retriever, text_summaries, texts)
if table_summaries:
add_documents(retriever, table_summaries, tables)
if image_summaries:
add_documents(retriever, image_summaries, images)
return retriever
multi_vector_img = Chroma(
collection_name="multi_vector_img", embedding_function=OpenAIEmbeddings()
)
retriever_multi_vector_img = create_multi_vector_retriever(
multi_vector_img,
text_summaries,
texts,
table_summaries,
tables,
image_summaries,
img_base64_list,
)
query = "What percentage of CPI is dedicated to Housing, and how does it compare to the combined percentage of Medical Care, Apparel, and Other Goods and Services?"
suffix_for_images = " Include any pie charts, graphs, or tables."
docs = retriever_multi_vector_img.get_relevant_documents(query + suffix_for_images)
from IPython.display import HTML, display
def plt_img_base64(img_base64):
image_html = f'<img src="data:image/jpeg;base64,{img_base64}" />'
display(HTML(image_html))
plt_img_base64(docs[1])
multi_vector_text = Chroma(
collection_name="multi_vector_text", embedding_function=OpenAIEmbeddings()
)
retriever_multi_vector_img_summary = create_multi_vector_retriever(
multi_vector_text,
text_summaries,
texts,
table_summaries,
tables,
image_summaries,
image_summaries,
)
from langchain_experimental.open_clip import OpenCLIPEmbeddings
multimodal_embd = Chroma(
collection_name="multimodal_embd", embedding_function=OpenCLIPEmbeddings()
)
image_uris = sorted(
[
os.path.join(path, image_name)
for image_name in os.listdir(path)
if image_name.endswith(".jpg")
]
)
if image_uris:
multimodal_embd.add_images(uris=image_uris)
if texts:
multimodal_embd.add_texts(texts=texts)
if tables:
multimodal_embd.add_texts(texts=tables)
retriever_multimodal_embd = multimodal_embd.as_retriever()
from operator import itemgetter
from langchain_core.runnables import RunnablePassthrough
template = """Answer the question based only on the following context, which can include text and tables:
{context}
Question: {question}
"""
rag_prompt_text = ChatPromptTemplate.from_template(template)
def text_rag_chain(retriever):
"""RAG chain"""
model = | ChatOpenAI(temperature=0, model="gpt-4") | langchain_openai.ChatOpenAI |
import nest_asyncio
nest_asyncio.apply()
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import SurrealDBStore
from langchain_text_splitters import CharacterTextSplitter
documents = | TextLoader("../../modules/state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
get_ipython().system('pip install -U openai langchain langchain-experimental')
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=256)
chat.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "What is this image showing"},
{
"type": "image_url",
"image_url": {
"url": "https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/static/img/langchain_stack.png",
"detail": "auto",
},
},
]
)
]
)
from langchain.agents.openai_assistant import OpenAIAssistantRunnable
interpreter_assistant = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=[{"type": "code_interpreter"}],
model="gpt-4-1106-preview",
)
output = interpreter_assistant.invoke({"content": "What's 10 - 4 raised to the 2.7"})
output
get_ipython().system('pip install e2b duckduckgo-search')
from langchain.tools import DuckDuckGoSearchRun, E2BDataAnalysisTool
tools = [E2BDataAnalysisTool(api_key="..."), DuckDuckGoSearchRun()]
agent = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant e2b tool",
instructions="You are a personal math tutor. Write and run code to answer math questions. You can also search the internet.",
tools=tools,
model="gpt-4-1106-preview",
as_agent=True,
)
from langchain.agents import AgentExecutor
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"content": "What's the weather in SF today divided by 2.7"})
agent = OpenAIAssistantRunnable.create_assistant(
name="langchain assistant e2b tool",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=tools,
model="gpt-4-1106-preview",
as_agent=True,
)
from langchain_core.agents import AgentFinish
def execute_agent(agent, tools, input):
tool_map = {tool.name: tool for tool in tools}
response = agent.invoke(input)
while not isinstance(response, AgentFinish):
tool_outputs = []
for action in response:
tool_output = tool_map[action.tool].invoke(action.tool_input)
print(action.tool, action.tool_input, tool_output, end="\n\n")
tool_outputs.append(
{"output": tool_output, "tool_call_id": action.tool_call_id}
)
response = agent.invoke(
{
"tool_outputs": tool_outputs,
"run_id": action.run_id,
"thread_id": action.thread_id,
}
)
return response
response = execute_agent(agent, tools, {"content": "What's 10 - 4 raised to the 2.7"})
print(response.return_values["output"])
next_response = execute_agent(
agent, tools, {"content": "now add 17.241", "thread_id": response.thread_id}
)
print(next_response.return_values["output"])
chat = ChatOpenAI(model="gpt-3.5-turbo-1106").bind(
response_format={"type": "json_object"}
)
output = chat.invoke(
[
SystemMessage(
content="Extract the 'name' and 'origin' of any companies mentioned in the following statement. Return a JSON list."
),
HumanMessage(
content="Google was founded in the USA, while Deepmind was founded in the UK"
),
]
)
print(output.content)
import json
json.loads(output.content)
chat = | ChatOpenAI(model="gpt-3.5-turbo-1106") | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub gpt4all chromadb')
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
from langchain_community.embeddings import GPT4AllEmbeddings
from langchain_community.vectorstores import Chroma
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
question = "What are the approaches to Task Decomposition?"
docs = vectorstore.similarity_search(question)
len(docs)
docs[0]
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python')
get_ipython().system(' CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 /Users/rlm/miniforge3/envs/llama/bin/pip install -U llama-cpp-python --no-cache-dir')
from langchain_community.llms import LlamaCpp
n_gpu_layers = 1 # Metal set to 1 is enough.
n_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip.
llm = LlamaCpp(
model_path="/Users/rlm/Desktop/Code/llama.cpp/models/llama-2-13b-chat.ggufv3.q4_0.bin",
n_gpu_layers=n_gpu_layers,
n_batch=n_batch,
n_ctx=2048,
f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls
verbose=True,
)
llm.invoke("Simulate a rap battle between Stephen Colbert and John Oliver")
from langchain_community.llms import GPT4All
gpt4all = GPT4All(
model="/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin",
max_tokens=2048,
)
from langchain_community.llms.llamafile import Llamafile
llamafile = | Llamafile() | langchain_community.llms.llamafile.Llamafile |
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml')
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
path = "/Users/rlm/Desktop/Papers/LLaVA/"
raw_pdf_elements = partition_pdf(
filename=path + "LLaVA.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
from langchain_community.chat_models import ChatOllama
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = | ChatOllama(model="llama2:13b-chat") | langchain_community.chat_models.ChatOllama |
import os
os.environ["SERPER_API_KEY"] = ""
os.environ["OPENAI_API_KEY"] = ""
from typing import Any, List
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_community.utilities import GoogleSerperAPIWrapper
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_openai import ChatOpenAI, OpenAI
class SerperSearchRetriever(BaseRetriever):
search: GoogleSerperAPIWrapper = None
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
return [Document(page_content=self.search.run(query))]
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
raise NotImplementedError()
retriever = SerperSearchRetriever(search= | GoogleSerperAPIWrapper() | langchain_community.utilities.GoogleSerperAPIWrapper |
arthur_url = "https://app.arthur.ai"
arthur_login = "your-arthur-login-username-here"
arthur_model_id = "your-arthur-model-id-here"
from langchain.callbacks import ArthurCallbackHandler
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
def make_langchain_chat_llm():
return ChatOpenAI(
streaming=True,
temperature=0.1,
callbacks=[
| StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain fleet-context langchain-openai pandas faiss-cpu # faiss-gpu for CUDA supported GPU')
from operator import itemgetter
from typing import Any, Optional, Type
import pandas as pd
from langchain.retrievers import MultiVectorRetriever
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_core.stores import BaseStore
from langchain_core.vectorstores import VectorStore
from langchain_openai import OpenAIEmbeddings
def load_fleet_retriever(
df: pd.DataFrame,
*,
vectorstore_cls: Type[VectorStore] = FAISS,
docstore: Optional[BaseStore] = None,
**kwargs: Any,
):
vectorstore = _populate_vectorstore(df, vectorstore_cls)
if docstore is None:
return vectorstore.as_retriever(**kwargs)
else:
_populate_docstore(df, docstore)
return MultiVectorRetriever(
vectorstore=vectorstore, docstore=docstore, id_key="parent", **kwargs
)
def _populate_vectorstore(
df: pd.DataFrame,
vectorstore_cls: Type[VectorStore],
) -> VectorStore:
if not hasattr(vectorstore_cls, "from_embeddings"):
raise ValueError(
f"Incompatible vector store class {vectorstore_cls}."
"Must implement `from_embeddings` class method."
)
texts_embeddings = []
metadatas = []
for _, row in df.iterrows():
texts_embeddings.append((row.metadata["text"], row["dense_embeddings"]))
metadatas.append(row.metadata)
return vectorstore_cls.from_embeddings(
texts_embeddings,
OpenAIEmbeddings(model="text-embedding-ada-002"),
metadatas=metadatas,
)
def _populate_docstore(df: pd.DataFrame, docstore: BaseStore) -> None:
parent_docs = []
df = df.copy()
df["parent"] = df.metadata.apply(itemgetter("parent"))
for parent_id, group in df.groupby("parent"):
sorted_group = group.iloc[
group.metadata.apply(itemgetter("section_index")).argsort()
]
text = "".join(sorted_group.metadata.apply(itemgetter("text")))
metadata = {
k: sorted_group.iloc[0].metadata[k] for k in ("title", "type", "url")
}
text = metadata["title"] + "\n" + text
metadata["id"] = parent_id
parent_docs.append(Document(page_content=text, metadata=metadata))
docstore.mset(((d.metadata["id"], d) for d in parent_docs))
from context import download_embeddings
df = download_embeddings("langchain")
vecstore_retriever = load_fleet_retriever(df)
vecstore_retriever.get_relevant_documents("How does the multi vector retriever work")
from langchain.storage import InMemoryStore
parent_retriever = load_fleet_retriever(
"https://www.dropbox.com/scl/fi/4rescpkrg9970s3huz47l/libraries_langchain_release.parquet?rlkey=283knw4wamezfwiidgpgptkep&dl=1",
docstore=InMemoryStore(),
)
parent_retriever.get_relevant_documents("How does the multi vector retriever work")
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are a great software engineer who is very familiar \
with Python. Given a user question or request about a new Python library called LangChain and \
parts of the LangChain documentation, answer the question or generate the requested code. \
Your answers must be accurate, should include code whenever possible, and should assume anything \
about LangChain which is note explicitly stated in the LangChain documentation. If the required \
information is not available, just say so.
LangChain Documentation
------------------
{context}""",
),
("human", "{question}"),
]
)
model = ChatOpenAI(model="gpt-3.5-turbo-16k")
chain = (
{
"question": RunnablePassthrough(),
"context": parent_retriever
| (lambda docs: "\n\n".join(d.page_content for d in docs)),
}
| prompt
| model
| | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().system('pip install pettingzoo pygame rlcard')
import collections
import inspect
import tenacity
from langchain.output_parsers import RegexParser
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class GymnasiumAgent:
@classmethod
def get_docs(cls, env):
return env.unwrapped.__doc__
def __init__(self, model, env):
self.model = model
self.env = env
self.docs = self.get_docs(env)
self.instructions = """
Your goal is to maximize your return, i.e. the sum of the rewards you receive.
I will give you an observation, reward, terminiation flag, truncation flag, and the return so far, formatted as:
Observation: <observation>
Reward: <reward>
Termination: <termination>
Truncation: <truncation>
Return: <sum_of_rewards>
You will respond with an action, formatted as:
Action: <action>
where you replace <action> with your actual action.
Do nothing else but return the action.
"""
self.action_parser = RegexParser(
regex=r"Action: (.*)", output_keys=["action"], default_output_key="action"
)
self.message_history = []
self.ret = 0
def random_action(self):
action = self.env.action_space.sample()
return action
def reset(self):
self.message_history = [
SystemMessage(content=self.docs),
SystemMessage(content=self.instructions),
]
def observe(self, obs, rew=0, term=False, trunc=False, info=None):
self.ret += rew
obs_message = f"""
Observation: {obs}
Reward: {rew}
Termination: {term}
Truncation: {trunc}
Return: {self.ret}
"""
self.message_history.append(HumanMessage(content=obs_message))
return obs_message
def _act(self):
act_message = self.model(self.message_history)
self.message_history.append(act_message)
action = int(self.action_parser.parse(act_message.content)["action"])
return action
def act(self):
try:
for attempt in tenacity.Retrying(
stop=tenacity.stop_after_attempt(2),
wait=tenacity.wait_none(), # No waiting time between retries
retry=tenacity.retry_if_exception_type(ValueError),
before_sleep=lambda retry_state: print(
f"ValueError occurred: {retry_state.outcome.exception()}, retrying..."
),
):
with attempt:
action = self._act()
except tenacity.RetryError:
action = self.random_action()
return action
def main(agents, env):
env.reset()
for name, agent in agents.items():
agent.reset()
for agent_name in env.agent_iter():
observation, reward, termination, truncation, info = env.last()
obs_message = agents[agent_name].observe(
observation, reward, termination, truncation, info
)
print(obs_message)
if termination or truncation:
action = None
else:
action = agents[agent_name].act()
print(f"Action: {action}")
env.step(action)
env.close()
class PettingZooAgent(GymnasiumAgent):
@classmethod
def get_docs(cls, env):
return inspect.getmodule(env.unwrapped).__doc__
def __init__(self, name, model, env):
super().__init__(model, env)
self.name = name
def random_action(self):
action = self.env.action_space(self.name).sample()
return action
from pettingzoo.classic import rps_v2
env = rps_v2.env(max_cycles=3, render_mode="human")
agents = {
name: PettingZooAgent(name=name, model= | ChatOpenAI(temperature=1) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken langchain-openai python-dotenv datasets langchain deeplake beautifulsoup4 html2text ragas')
ORG_ID = "..."
import getpass
import os
from langchain.chains import RetrievalQA
from langchain.vectorstores.deeplake import DeepLake
from langchain_openai import OpenAIChat, OpenAIEmbeddings
os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API token: ")
os.environ["ACTIVELOOP_TOKEN"] = getpass.getpass(
"Enter your ActiveLoop API token: "
) # Get your API token from https://app.activeloop.ai, click on your profile picture in the top right corner, and select "API Tokens"
token = os.getenv("ACTIVELOOP_TOKEN")
openai_embeddings = OpenAIEmbeddings()
db = DeepLake(
dataset_path=f"hub://{ORG_ID}/deeplake-docs-deepmemory", # org_id stands for your username or organization from activeloop
embedding=openai_embeddings,
runtime={"tensor_db": True},
token=token,
read_only=False,
)
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
def get_all_links(url):
response = requests.get(url)
if response.status_code != 200:
print(f"Failed to retrieve the page: {url}")
return []
soup = BeautifulSoup(response.content, "html.parser")
links = [
urljoin(url, a["href"]) for a in soup.find_all("a", href=True) if a["href"]
]
return links
base_url = "https://docs.deeplake.ai/en/latest/"
all_links = get_all_links(base_url)
from langchain.document_loaders import AsyncHtmlLoader
loader = AsyncHtmlLoader(all_links)
docs = loader.load()
from langchain.document_transformers import Html2TextTransformer
html2text = Html2TextTransformer()
docs_transformed = html2text.transform_documents(docs)
from langchain_text_splitters import RecursiveCharacterTextSplitter
chunk_size = 4096
docs_new = []
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
)
for doc in docs_transformed:
if len(doc.page_content) < chunk_size:
docs_new.append(doc)
else:
docs = text_splitter.create_documents([doc.page_content])
docs_new.extend(docs)
docs = db.add_documents(docs_new)
from typing import List
from langchain.chains.openai_functions import (
create_structured_output_chain,
)
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
docs = db.vectorstore.dataset.text.data(fetch_chunks=True, aslist=True)["value"]
ids = db.vectorstore.dataset.id.data(fetch_chunks=True, aslist=True)["value"]
llm = | ChatOpenAI(model="gpt-3.5-turbo", temperature=0) | langchain_openai.ChatOpenAI |
import os
os.environ["EXA_API_KEY"] = "..."
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-exa')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_exa import ExaSearchRetriever, TextContentsOptions
from langchain_openai import ChatOpenAI
retriever = ExaSearchRetriever(
k=5, text_contents_options= | TextContentsOptions(max_length=200) | langchain_exa.TextContentsOptions |
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
def initialize_chain(instructions, memory=None):
if memory is None:
memory = | ConversationBufferWindowMemory() | langchain.memory.ConversationBufferWindowMemory |
import xorbits.pandas as pd
from langchain_experimental.agents.agent_toolkits import create_xorbits_agent
from langchain_openai import OpenAI
data = pd.read_csv("titanic.csv")
agent = create_xorbits_agent( | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic')
import os
import boto3
comprehend_client = boto3.client("comprehend", region_name="us-east-1")
from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client,
verbose=True, # optional
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comprehend_moderation
| {"input": (lambda x: x["output"]) | llm}
| comprehend_moderation
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?"
}
)
except ModerationPiiError as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import (
BaseModerationConfig,
ModerationPiiConfig,
ModerationPromptSafetyConfig,
ModerationToxicityConfig,
)
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5)
moderation_config = BaseModerationConfig(
filters=[pii_config, toxicity_config, prompt_safety_config]
)
comp_moderation_with_config = AmazonComprehendModerationChain(
moderation_config=moderation_config, # specify the configuration
client=comprehend_client, # optionally pass the Boto3 Client
verbose=True,
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comp_moderation_with_config
| {"input": (lambda x: x["output"]) | llm}
| comp_moderation_with_config
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?"
}
)
except Exception as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler
class MyModCallback(BaseModerationCallbackHandler):
async def on_after_pii(self, output_beacon, unique_id):
import json
moderation_type = output_beacon["moderation_type"]
chain_id = output_beacon["moderation_chain_id"]
with open(f"output-{moderation_type}-{chain_id}.json", "w") as file:
data = {"beacon_data": output_beacon, "unique_id": unique_id}
json.dump(data, file)
"""
async def on_after_toxicity(self, output_beacon, unique_id):
pass
async def on_after_prompt_safety(self, output_beacon, unique_id):
pass
"""
my_callback = MyModCallback()
pii_config = | ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") | langchain_experimental.comprehend_moderation.ModerationPiiConfig |
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
template = """Answer the users question based only on the following context:
<context>
{context}
</context>
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI(temperature=0)
search = DuckDuckGoSearchAPIWrapper()
def retriever(query):
return search.run(query)
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
simple_query = "what is langchain?"
chain.invoke(simple_query)
distracted_query = "man that sam bankman fried trial was crazy! what is langchain?"
chain.invoke(distracted_query)
retriever(distracted_query)
template = """Provide a better search query for \
web search engine to answer the given question, end \
the queries with ’**’. Question: \
{x} Answer:"""
rewrite_prompt = ChatPromptTemplate.from_template(template)
from langchain import hub
rewrite_prompt = | hub.pull("langchain-ai/rewrite") | langchain.hub.pull |
from langchain_community.document_loaders import WebBaseLoader
loader = | WebBaseLoader("https://www.espn.com/") | langchain_community.document_loaders.WebBaseLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet infinopy')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet matplotlib')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import datetime as dt
import json
import time
import matplotlib.dates as md
import matplotlib.pyplot as plt
from infinopy import InfinoClient
from langchain.callbacks import InfinoCallbackHandler
from langchain_openai import OpenAI
get_ipython().system('docker run --rm --detach --name infino-example -p 3000:3000 infinohq/infino:latest')
client = InfinoClient()
data = """In what country is Normandy located?
When were the Normans in Normandy?
From which countries did the Norse originate?
Who was the Norse leader?
What century did the Normans first gain their separate identity?
Who gave their name to Normandy in the 1000's and 1100's
What is France a region of?
Who did King Charles III swear fealty to?
When did the Frankish identity emerge?
Who was the duke in the battle of Hastings?
Who ruled the duchy of Normandy
What religion were the Normans
What type of major impact did the Norman dynasty have on modern Europe?
Who was famed for their Christian spirit?
Who assimilted the Roman language?
Who ruled the country of Normandy?
What principality did William the conquerer found?
What is the original meaning of the word Norman?
When was the Latin version of the word Norman first recorded?
What name comes from the English words Normans/Normanz?"""
questions = data.split("\n")
handler = InfinoCallbackHandler(
model_id="test_openai", model_version="0.1", verbose=False
)
llm = OpenAI(temperature=0.1)
num_questions = 10
questions = questions[0:num_questions]
for question in questions:
print(question)
llm_result = llm.generate([question], callbacks=[handler])
print(llm_result)
def plot(data, title):
data = json.loads(data)
timestamps = [item["time"] for item in data]
dates = [dt.datetime.fromtimestamp(ts) for ts in timestamps]
y = [item["value"] for item in data]
plt.rcParams["figure.figsize"] = [6, 4]
plt.subplots_adjust(bottom=0.2)
plt.xticks(rotation=25)
ax = plt.gca()
xfmt = md.DateFormatter("%Y-%m-%d %H:%M:%S")
ax.xaxis.set_major_formatter(xfmt)
plt.plot(dates, y)
plt.xlabel("Time")
plt.ylabel("Value")
plt.title(title)
plt.show()
response = client.search_ts("__name__", "latency", 0, int(time.time()))
plot(response.text, "Latency")
response = client.search_ts("__name__", "error", 0, int(time.time()))
plot(response.text, "Errors")
response = client.search_ts("__name__", "prompt_tokens", 0, int(time.time()))
plot(response.text, "Prompt Tokens")
response = client.search_ts("__name__", "completion_tokens", 0, int(time.time()))
plot(response.text, "Completion Tokens")
response = client.search_ts("__name__", "total_tokens", 0, int(time.time()))
plot(response.text, "Total Tokens")
query = "normandy"
response = client.search_log(query, 0, int(time.time()))
print("Results for", query, ":", response.text)
print("===")
query = "king charles III"
response = client.search_log("king charles III", 0, int(time.time()))
print("Results for", query, ":", response.text)
from langchain.chains.summarize import load_summarize_chain
from langchain_community.document_loaders import WebBaseLoader
from langchain_openai import ChatOpenAI
handler = InfinoCallbackHandler(
model_id="test_chatopenai", model_version="0.1", verbose=False
)
urls = [
"https://lilianweng.github.io/posts/2023-06-23-agent/",
"https://medium.com/lyft-engineering/lyftlearn-ml-model-training-infrastructure-built-on-kubernetes-aef8218842bb",
"https://blog.langchain.dev/week-of-10-2-langchain-release-notes/",
]
for url in urls:
loader = WebBaseLoader(url)
docs = loader.load()
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-16k", callbacks=[handler])
chain = | load_summarize_chain(llm, chain_type="stuff", verbose=False) | langchain.chains.summarize.load_summarize_chain |
from langchain_community.vectorstores import Bagel
texts = ["hello bagel", "hello langchain", "I love salad", "my car", "a dog"]
cluster = Bagel.from_texts(cluster_name="testing", texts=texts)
cluster.similarity_search("bagel", k=3)
cluster.similarity_search_with_score("bagel", k=3)
cluster.delete_cluster()
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('poetry run pip install replicate')
from getpass import getpass
REPLICATE_API_TOKEN = getpass()
import os
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import Replicate
llm = Replicate(
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
model_kwargs={"temperature": 0.75, "max_length": 500, "top_p": 1},
)
prompt = """
User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?
Assistant:
"""
llm(prompt)
llm = Replicate(
model="replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5"
)
prompt = """
Answer the following yes/no question by reasoning step by step.
Can a dog drive a car?
"""
llm(prompt)
text2image = Replicate(
model="stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf",
model_kwargs={"image_dimensions": "512x512"},
)
image_output = text2image("A cat riding a motorcycle by Picasso")
image_output
get_ipython().system('poetry run pip install Pillow')
from io import BytesIO
import requests
from PIL import Image
response = requests.get(image_output)
img = Image.open(BytesIO(response.content))
img
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
llm = Replicate(
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
model_kwargs={"temperature": 0.75, "max_length": 500, "top_p": 1},
)
prompt = """
User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?
Assistant:
"""
_ = llm(prompt)
import time
llm = Replicate(
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
model_kwargs={"temperature": 0.01, "max_length": 500, "top_p": 1},
)
prompt = """
User: What is the best way to learn python?
Assistant:
"""
start_time = time.perf_counter()
raw_output = llm(prompt) # raw output, no stop
end_time = time.perf_counter()
print(f"Raw output:\n {raw_output}")
print(f"Raw output runtime: {end_time - start_time} seconds")
start_time = time.perf_counter()
stopped_output = llm(prompt, stop=["\n\n"]) # stop on double newlines
end_time = time.perf_counter()
print(f"Stopped output:\n {stopped_output}")
print(f"Stopped output runtime: {end_time - start_time} seconds")
from langchain.chains import SimpleSequentialChain
dolly_llm = | Replicate(
model="replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5"
) | langchain_community.llms.Replicate |
from langchain_core.pydantic_v1 import BaseModel, Field
class Joke(BaseModel):
setup: str = | Field(description="The setup of the joke") | langchain_core.pydantic_v1.Field |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet playwright beautifulsoup4')
get_ipython().system(' playwright install')
from langchain_community.document_loaders import AsyncChromiumLoader
urls = ["https://www.wsj.com"]
loader = | AsyncChromiumLoader(urls) | langchain_community.document_loaders.AsyncChromiumLoader |
import os
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import ForefrontAI
from getpass import getpass
FOREFRONTAI_API_KEY = getpass()
os.environ["FOREFRONTAI_API_KEY"] = FOREFRONTAI_API_KEY
llm = ForefrontAI(endpoint_url="YOUR ENDPOINT URL HERE")
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm_chain = | LLMChain(prompt=prompt, llm=llm) | langchain.chains.LLMChain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-community')
import os
os.environ["YDC_API_KEY"] = ""
os.environ["OPENAI_API_KEY"] = ""
from langchain_community.utilities.you import YouSearchAPIWrapper
utility = YouSearchAPIWrapper(num_web_results=1)
utility
import json
response = utility.raw_results(query="What is the weather in NY")
hits = response["hits"]
print(len(hits))
print(json.dumps(hits, indent=2))
response = utility.results(query="What is the weather in NY")
print(len(response))
print(response)
from langchain_community.retrievers.you import YouRetriever
retriever = YouRetriever(num_web_results=1)
retriever
response = retriever.invoke("What is the weather in NY")
print(len(response))
print(response)
get_ipython().system('pip install --upgrade --quiet langchain-openai')
from langchain_community.retrievers.you import YouRetriever
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
runnable = RunnablePassthrough
retriever = YouRetriever(num_web_results=1)
model = | ChatOpenAI(model="gpt-3.5-turbo-16k") | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-google-alloydb-pg langchain-google-vertexai')
from google.colab import auth
auth.authenticate_user()
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
get_ipython().system('gcloud services enable alloydb.googleapis.com')
REGION = "us-central1" # @param {type: "string"}
CLUSTER = "my-cluster" # @param {type: "string"}
INSTANCE = "my-primary" # @param {type: "string"}
DATABASE = "my-database" # @param {type: "string"}
TABLE_NAME = "vector_store" # @param {type: "string"}
from langchain_google_alloydb_pg import AlloyDBEngine
engine = await AlloyDBEngine.afrom_instance(
project_id=PROJECT_ID,
region=REGION,
cluster=CLUSTER,
instance=INSTANCE,
database=DATABASE,
)
await engine.ainit_vectorstore_table(
table_name=TABLE_NAME,
vector_size=768, # Vector size for VertexAI model(textembedding-gecko@latest)
)
get_ipython().system('gcloud services enable aiplatform.googleapis.com')
from langchain_google_vertexai import VertexAIEmbeddings
embedding = VertexAIEmbeddings(
model_name="textembedding-gecko@latest", project=PROJECT_ID
)
from langchain_google_alloydb_pg import AlloyDBVectorStore
store = await AlloyDBVectorStore.create(
engine=engine,
table_name=TABLE_NAME,
embedding_service=embedding,
)
import uuid
all_texts = ["Apples and oranges", "Cars and airplanes", "Pineapple", "Train", "Banana"]
metadatas = [{"len": len(t)} for t in all_texts]
ids = [str(uuid.uuid4()) for _ in all_texts]
await store.aadd_texts(all_texts, metadatas=metadatas, ids=ids)
await store.adelete([ids[1]])
query = "I'd like a fruit."
docs = await store.asimilarity_search(query)
print(docs)
query_vector = embedding.embed_query(query)
docs = await store.asimilarity_search_by_vector(query_vector, k=2)
print(docs)
from langchain_google_alloydb_pg.indexes import IVFFlatIndex
index = | IVFFlatIndex() | langchain_google_alloydb_pg.indexes.IVFFlatIndex |
from langchain.chains import HypotheticalDocumentEmbedder, LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI, OpenAIEmbeddings
base_embeddings = OpenAIEmbeddings()
llm = OpenAI()
embeddings = HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, "web_search")
result = embeddings.embed_query("Where is the Taj Mahal?")
multi_llm = OpenAI(n=4, best_of=4)
embeddings = HypotheticalDocumentEmbedder.from_llm(
multi_llm, base_embeddings, "web_search"
)
result = embeddings.embed_query("Where is the Taj Mahal?")
prompt_template = """Please answer the user's question about the most recent state of the union address
Question: {question}
Answer:"""
prompt = | PromptTemplate(input_variables=["question"], template=prompt_template) | langchain.prompts.PromptTemplate |
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
tools = load_tools(["google-serper"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is the weather in Pomfret?")
tools = load_tools(["searchapi"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is the weather in Pomfret?")
tools = load_tools(["serpapi"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is the weather in Pomfret?")
tools = | load_tools(["google-search"], llm=llm) | langchain.agents.load_tools |
from langchain.evaluation import load_evaluator
evaluator = load_evaluator("criteria", criteria="conciseness")
from langchain.evaluation import EvaluatorType
evaluator = | load_evaluator(EvaluatorType.CRITERIA, criteria="conciseness") | langchain.evaluation.load_evaluator |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
import os
from langchain_community.tools.google_finance import GoogleFinanceQueryRun
from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper
os.environ["SERPAPI_API_KEY"] = ""
tool = GoogleFinanceQueryRun(api_wrapper= | GoogleFinanceAPIWrapper() | langchain_community.utilities.google_finance.GoogleFinanceAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet supabase')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
os.environ["SUPABASE_URL"] = getpass.getpass("Supabase URL:")
os.environ["SUPABASE_SERVICE_KEY"] = getpass.getpass("Supabase Service Key:")
from dotenv import load_dotenv
load_dotenv()
import os
from langchain_community.vectorstores import SupabaseVectorStore
from langchain_openai import OpenAIEmbeddings
from supabase.client import Client, create_client
supabase_url = os.environ.get("SUPABASE_URL")
supabase_key = os.environ.get("SUPABASE_SERVICE_KEY")
supabase: Client = create_client(supabase_url, supabase_key)
embeddings = OpenAIEmbeddings()
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet timescale-vector')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import os
from dotenv import find_dotenv, load_dotenv
_ = load_dotenv(find_dotenv())
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
from typing import Tuple
from datetime import datetime, timedelta
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders.json_loader import JSONLoader
from langchain_community.vectorstores.timescalevector import TimescaleVector
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../../extras/modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
SERVICE_URL = os.environ["TIMESCALE_SERVICE_URL"]
COLLECTION_NAME = "state_of_the_union_test"
db = TimescaleVector.from_documents(
embedding=embeddings,
documents=docs,
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
)
query = "What did the president say about Ketanji Brown Jackson"
docs_with_score = db.similarity_search_with_score(query)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print(doc.page_content)
print("-" * 80)
retriever = db.as_retriever()
print(retriever)
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.1, model="gpt-3.5-turbo-16k")
from langchain.chains import RetrievalQA
qa_stuff = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
verbose=True,
)
query = "What did the president say about Ketanji Brown Jackson?"
response = qa_stuff.run(query)
print(response)
from timescale_vector import client
def create_uuid(date_string: str):
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
datetime_obj = datetime.strptime(date_string, time_format)
uuid = client.uuid_from_time(datetime_obj)
return str(uuid)
def split_name(input_string: str) -> Tuple[str, str]:
if input_string is None:
return None, None
start = input_string.find("<")
end = input_string.find(">")
name = input_string[:start].strip()
email = input_string[start + 1 : end].strip()
return name, email
def create_date(input_string: str) -> datetime:
if input_string is None:
return None
month_dict = {
"Jan": "01",
"Feb": "02",
"Mar": "03",
"Apr": "04",
"May": "05",
"Jun": "06",
"Jul": "07",
"Aug": "08",
"Sep": "09",
"Oct": "10",
"Nov": "11",
"Dec": "12",
}
components = input_string.split()
day = components[2]
month = month_dict[components[1]]
year = components[4]
time = components[3]
timezone_offset_minutes = int(components[5]) # Convert the offset to minutes
timezone_hours = timezone_offset_minutes // 60 # Calculate the hours
timezone_minutes = timezone_offset_minutes % 60 # Calculate the remaining minutes
timestamp_tz_str = (
f"{year}-{month}-{day} {time}+{timezone_hours:02}{timezone_minutes:02}"
)
return timestamp_tz_str
def extract_metadata(record: dict, metadata: dict) -> dict:
record_name, record_email = split_name(record["author"])
metadata["id"] = create_uuid(record["date"])
metadata["date"] = create_date(record["date"])
metadata["author_name"] = record_name
metadata["author_email"] = record_email
metadata["commit_hash"] = record["commit"]
return metadata
get_ipython().system('curl -O https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json')
FILE_PATH = "../../../../../ts_git_log.json"
loader = JSONLoader(
file_path=FILE_PATH,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
documents = [doc for doc in documents if doc.metadata["date"] is not None]
print(documents[0])
NUM_RECORDS = 500
documents = documents[:NUM_RECORDS]
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
COLLECTION_NAME = "timescale_commits"
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet nlpcloud')
from langchain_community.embeddings import NLPCloudEmbeddings
import os
os.environ["NLPCLOUD_API_KEY"] = "xxx"
nlpcloud_embd = | NLPCloudEmbeddings() | langchain_community.embeddings.NLPCloudEmbeddings |
get_ipython().system('pip install langchain lark openai elasticsearch pandas')
import pandas as pd
details = (
pd.read_csv("~/Downloads/archive/Hotel_details.csv")
.drop_duplicates(subset="hotelid")
.set_index("hotelid")
)
attributes = pd.read_csv(
"~/Downloads/archive/Hotel_Room_attributes.csv", index_col="id"
)
price = pd.read_csv("~/Downloads/archive/hotels_RoomPrice.csv", index_col="id")
latest_price = price.drop_duplicates(subset="refid", keep="last")[
[
"hotelcode",
"roomtype",
"onsiterate",
"roomamenities",
"maxoccupancy",
"mealinclusiontype",
]
]
latest_price["ratedescription"] = attributes.loc[latest_price.index]["ratedescription"]
latest_price = latest_price.join(
details[["hotelname", "city", "country", "starrating"]], on="hotelcode"
)
latest_price = latest_price.rename({"ratedescription": "roomdescription"}, axis=1)
latest_price["mealsincluded"] = ~latest_price["mealinclusiontype"].isnull()
latest_price.pop("hotelcode")
latest_price.pop("mealinclusiontype")
latest_price = latest_price.reset_index(drop=True)
latest_price.head()
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-4")
res = model.predict(
"Below is a table with information about hotel rooms. "
"Return a JSON list with an entry for each column. Each entry should have "
'{"name": "column name", "description": "column description", "type": "column data type"}'
f"\n\n{latest_price.head()}\n\nJSON:\n"
)
import json
attribute_info = json.loads(res)
attribute_info
latest_price.nunique()[latest_price.nunique() < 40]
attribute_info[-2][
"description"
] += f". Valid values are {sorted(latest_price['starrating'].value_counts().index.tolist())}"
attribute_info[3][
"description"
] += f". Valid values are {sorted(latest_price['maxoccupancy'].value_counts().index.tolist())}"
attribute_info[-3][
"description"
] += f". Valid values are {sorted(latest_price['country'].value_counts().index.tolist())}"
attribute_info
from langchain.chains.query_constructor.base import (
get_query_constructor_prompt,
load_query_constructor_runnable,
)
doc_contents = "Detailed description of a hotel room"
prompt = | get_query_constructor_prompt(doc_contents, attribute_info) | langchain.chains.query_constructor.base.get_query_constructor_prompt |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import chain
from langchain_openai import ChatOpenAI
prompt1 = ChatPromptTemplate.from_template("Tell me a joke about {topic}")
prompt2 = | ChatPromptTemplate.from_template("What is the subject of this joke: {joke}") | langchain_core.prompts.ChatPromptTemplate.from_template |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]" pillow pydantic lxml pillow matplotlib chromadb tiktoken')
from langchain_text_splitters import CharacterTextSplitter
from unstructured.partition.pdf import partition_pdf
def extract_pdf_elements(path, fname):
"""
Extract images, tables, and chunk text from a PDF file.
path: File path, which is used to dump images (.jpg)
fname: File name
"""
return partition_pdf(
filename=path + fname,
extract_images_in_pdf=False,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
def categorize_elements(raw_pdf_elements):
"""
Categorize extracted elements from a PDF into tables and texts.
raw_pdf_elements: List of unstructured.documents.elements
"""
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
return texts, tables
fpath = "/Users/rlm/Desktop/cj/"
fname = "cj.pdf"
raw_pdf_elements = extract_pdf_elements(fpath, fname)
texts, tables = categorize_elements(raw_pdf_elements)
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=4000, chunk_overlap=0
)
joined_texts = " ".join(texts)
texts_4k_token = text_splitter.split_text(joined_texts)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
def generate_text_summaries(texts, tables, summarize_texts=False):
"""
Summarize text elements
texts: List of str
tables: List of str
summarize_texts: Bool to summarize texts
"""
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = | ChatPromptTemplate.from_template(prompt_text) | langchain_core.prompts.ChatPromptTemplate.from_template |
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryByteStore
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loaders = [
TextLoader("../../paul_graham_essay.txt"),
TextLoader("../../state_of_the_union.txt"),
]
docs = []
for loader in loaders:
docs.extend(loader.load())
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)
docs = text_splitter.split_documents(docs)
vectorstore = Chroma(
collection_name="full_documents", embedding_function= | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().system(' pip install lancedb')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import LanceDB
from langchain.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
documents = CharacterTextSplitter().split_documents(documents)
embeddings = | OpenAIEmbeddings() | langchain.embeddings.OpenAIEmbeddings |
from langchain.chains import GraphCypherQAChain
from langchain_community.graphs import Neo4jGraph
from langchain_openai import ChatOpenAI
graph = Neo4jGraph(
url="bolt://localhost:7687", username="neo4j", password="pleaseletmein"
)
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
graph.refresh_schema()
print(graph.schema)
chain = GraphCypherQAChain.from_llm(
| ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.fake import FakeEmbeddings
from langchain_community.vectorstores import Vectara
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
vectara = Vectara.from_documents(
docs,
embedding= | FakeEmbeddings(size=768) | langchain_community.embeddings.fake.FakeEmbeddings |
get_ipython().system('pip install --upgrade langchain langchain-google-vertexai')
project: str = "PUT_YOUR_PROJECT_ID_HERE" # @param {type:"string"}
endpoint_id: str = "PUT_YOUR_ENDPOINT_ID_HERE" # @param {type:"string"}
location: str = "PUT_YOUR_ENDPOINT_LOCAtION_HERE" # @param {type:"string"}
from langchain_google_vertexai import (
GemmaChatVertexAIModelGarden,
GemmaVertexAIModelGarden,
)
llm = GemmaVertexAIModelGarden(
endpoint_id=endpoint_id,
project=project,
location=location,
)
output = llm.invoke("What is the meaning of life?")
print(output)
from langchain_core.messages import HumanMessage
llm = GemmaChatVertexAIModelGarden(
endpoint_id=endpoint_id,
project=project,
location=location,
)
message1 = HumanMessage(content="How much is 2+2?")
answer1 = llm.invoke([message1])
print(answer1)
message2 = HumanMessage(content="How much is 3+3?")
answer2 = llm.invoke([message1, answer1, message2])
print(answer2)
answer1 = llm.invoke([message1], parse_response=True)
print(answer1)
answer2 = llm.invoke([message1, answer1, message2], parse_response=True)
print(answer2)
get_ipython().system('mkdir -p ~/.kaggle && cp kaggle.json ~/.kaggle/kaggle.json')
get_ipython().system('pip install keras>=3 keras_nlp')
from langchain_google_vertexai import GemmaLocalKaggle
keras_backend: str = "jax" # @param {type:"string"}
model_name: str = "gemma_2b_en" # @param {type:"string"}
llm = GemmaLocalKaggle(model_name=model_name, keras_backend=keras_backend)
output = llm.invoke("What is the meaning of life?", max_tokens=30)
print(output)
from langchain_google_vertexai import GemmaChatLocalKaggle
keras_backend: str = "jax" # @param {type:"string"}
model_name: str = "gemma_2b_en" # @param {type:"string"}
llm = GemmaChatLocalKaggle(model_name=model_name, keras_backend=keras_backend)
from langchain_core.messages import HumanMessage
message1 = HumanMessage(content="Hi! Who are you?")
answer1 = llm.invoke([message1], max_tokens=30)
print(answer1)
message2 = HumanMessage(content="What can you help me with?")
answer2 = llm.invoke([message1, answer1, message2], max_tokens=60)
print(answer2)
answer1 = llm.invoke([message1], max_tokens=30, parse_response=True)
print(answer1)
answer2 = llm.invoke([message1, answer1, message2], max_tokens=60, parse_response=True)
print(answer2)
from langchain_google_vertexai import GemmaChatLocalHF, GemmaLocalHF
hf_access_token: str = "PUT_YOUR_TOKEN_HERE" # @param {type:"string"}
model_name: str = "google/gemma-2b" # @param {type:"string"}
llm = GemmaLocalHF(model_name="google/gemma-2b", hf_access_token=hf_access_token)
output = llm.invoke("What is the meaning of life?", max_tokens=50)
print(output)
llm = GemmaChatLocalHF(model_name=model_name, hf_access_token=hf_access_token)
from langchain_core.messages import HumanMessage
message1 = | HumanMessage(content="Hi! Who are you?") | langchain_core.messages.HumanMessage |
from langchain.prompts import PromptTemplate
prompt = (
PromptTemplate.from_template("Tell me a joke about {topic}")
+ ", make it funny"
+ "\n\nand in {language}"
)
prompt
prompt.format(topic="sports", language="spanish")
from langchain.chains import LLMChain
from langchain_openai import ChatOpenAI
model = ChatOpenAI()
chain = LLMChain(llm=model, prompt=prompt)
chain.run(topic="sports", language="spanish")
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
prompt = SystemMessage(content="You are a nice pirate")
new_prompt = (
prompt + HumanMessage(content="hi") + | AIMessage(content="what?") | langchain_core.messages.AIMessage |
get_ipython().system(' nomic login')
get_ipython().system(' nomic login token')
get_ipython().system(' pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain')
import os
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = "api_key"
from langchain_community.document_loaders import WebBaseLoader
urls = [
"https://lilianweng.github.io/posts/2023-06-23-agent/",
"https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/",
"https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/",
]
docs = [ | WebBaseLoader(url) | langchain_community.document_loaders.WebBaseLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymilvus')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Milvus
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vector_db = Milvus.from_documents(
docs,
embeddings,
connection_args={"host": "127.0.0.1", "port": "19530"},
)
query = "What did the president say about Ketanji Brown Jackson"
docs = vector_db.similarity_search(query)
docs[0].page_content
vector_db = Milvus.from_documents(
docs,
embeddings,
collection_name="collection_1",
connection_args={"host": "127.0.0.1", "port": "19530"},
)
vector_db = Milvus(
embeddings,
connection_args={"host": "127.0.0.1", "port": "19530"},
collection_name="collection_1",
)
from langchain_core.documents import Document
docs = [
Document(page_content="i worked at kensho", metadata={"namespace": "harrison"}),
Document(page_content="i worked at facebook", metadata={"namespace": "ankush"}),
]
vectorstore = Milvus.from_documents(
docs,
embeddings,
connection_args={"host": "127.0.0.1", "port": "19530"},
drop_old=True,
partition_key_field="namespace", # Use the "namespace" field as the partition key
)
vectorstore.as_retriever(
search_kwargs={"expr": 'namespace == "ankush"'}
).get_relevant_documents("where did i work?")
vectorstore.as_retriever(
search_kwargs={"expr": 'namespace == "harrison"'}
).get_relevant_documents("where did i work?")
from langchain.docstore.document import Document
docs = [
Document(page_content="foo", metadata={"id": 1}),
| Document(page_content="bar", metadata={"id": 2}) | langchain.docstore.document.Document |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub gpt4all chromadb')
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
from langchain_community.embeddings import GPT4AllEmbeddings
from langchain_community.vectorstores import Chroma
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
question = "What are the approaches to Task Decomposition?"
docs = vectorstore.similarity_search(question)
len(docs)
docs[0]
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python')
get_ipython().system(' CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 /Users/rlm/miniforge3/envs/llama/bin/pip install -U llama-cpp-python --no-cache-dir')
from langchain_community.llms import LlamaCpp
n_gpu_layers = 1 # Metal set to 1 is enough.
n_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip.
llm = LlamaCpp(
model_path="/Users/rlm/Desktop/Code/llama.cpp/models/llama-2-13b-chat.ggufv3.q4_0.bin",
n_gpu_layers=n_gpu_layers,
n_batch=n_batch,
n_ctx=2048,
f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls
verbose=True,
)
llm.invoke("Simulate a rap battle between Stephen Colbert and John Oliver")
from langchain_community.llms import GPT4All
gpt4all = GPT4All(
model="/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin",
max_tokens=2048,
)
from langchain_community.llms.llamafile import Llamafile
llamafile = Llamafile()
llamafile.invoke("Here is my grandmother's beloved recipe for spaghetti and meatballs:")
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
prompt = PromptTemplate.from_template(
"Summarize the main themes in these retrieved docs: {docs}"
)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
chain = {"docs": format_docs} | prompt | llm | StrOutputParser()
question = "What are the approaches to Task Decomposition?"
docs = vectorstore.similarity_search(question)
chain.invoke(docs)
from langchain import hub
rag_prompt = hub.pull("rlm/rag-prompt")
rag_prompt.messages
from langchain_core.runnables import RunnablePassthrough, RunnablePick
chain = (
RunnablePassthrough.assign(context=RunnablePick("context") | format_docs)
| rag_prompt
| llm
| StrOutputParser()
)
chain.invoke({"context": docs, "question": question})
rag_prompt_llama = | hub.pull("rlm/rag-prompt-llama") | langchain.hub.pull |
from langchain.tools import ShellTool
shell_tool = ShellTool()
print(shell_tool.run({"commands": ["echo 'Hello World!'", "time"]}))
from langchain.agents import AgentType, initialize_agent
from langchain_openai import ChatOpenAI
llm = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.chains import OpenAIModerationChain
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import OpenAI
moderate = OpenAIModerationChain()
model = OpenAI()
prompt = | ChatPromptTemplate.from_messages([("system", "repeat after me: {input}")]) | langchain_core.prompts.ChatPromptTemplate.from_messages |
get_ipython().system('poetry run pip install dgml-utils==0.3.0 --upgrade --quiet')
import os
from langchain_community.document_loaders import DocugamiLoader
DOCUGAMI_API_KEY = os.environ.get("DOCUGAMI_API_KEY")
docset_id = "26xpy3aes7xp"
document_ids = ["d7jqdzcj50sj", "cgd1eacfkchw"]
loader = DocugamiLoader(docset_id=docset_id, document_ids=document_ids)
chunks = loader.load()
len(chunks)
loader.min_text_length = 64
loader.include_xml_tags = True
chunks = loader.load()
for chunk in chunks[:5]:
print(chunk)
get_ipython().system('poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib')
loader = DocugamiLoader(docset_id="zo954yqy53wp")
chunks = loader.load()
for chunk in chunks:
stripped_metadata = chunk.metadata.copy()
for key in chunk.metadata:
if key not in ["name", "xpath", "id", "structure"]:
del stripped_metadata[key]
chunk.metadata = stripped_metadata
print(len(chunks))
from langchain.chains import RetrievalQA
from langchain_community.vectorstores.chroma import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
embedding = OpenAIEmbeddings()
vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)
retriever = vectordb.as_retriever()
qa_chain = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=retriever, return_source_documents=True
)
qa_chain("What can tenants do with signage on their properties?")
chain_response = qa_chain("What is rentable area for the property owned by DHA Group?")
chain_response["result"] # correct answer should be 13,500 sq ft
chain_response["source_documents"]
loader = | DocugamiLoader(docset_id="zo954yqy53wp") | langchain_community.document_loaders.DocugamiLoader |
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
db = Chroma.from_documents(docs, embedding_function)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
db2 = Chroma.from_documents(docs, embedding_function, persist_directory="./chroma_db")
docs = db2.similarity_search(query)
db3 = Chroma(persist_directory="./chroma_db", embedding_function=embedding_function)
docs = db3.similarity_search(query)
print(docs[0].page_content)
import chromadb
persistent_client = chromadb.PersistentClient()
collection = persistent_client.get_or_create_collection("collection_name")
collection.add(ids=["1", "2", "3"], documents=["a", "b", "c"])
langchain_chroma = Chroma(
client=persistent_client,
collection_name="collection_name",
embedding_function=embedding_function,
)
print("There are", langchain_chroma._collection.count(), "in the collection")
import uuid
import chromadb
from chromadb.config import Settings
client = chromadb.HttpClient(settings=Settings(allow_reset=True))
client.reset() # resets the database
collection = client.create_collection("my_collection")
for doc in docs:
collection.add(
ids=[str(uuid.uuid1())], metadatas=doc.metadata, documents=doc.page_content
)
db4 = Chroma(
client=client,
collection_name="my_collection",
embedding_function=embedding_function,
)
query = "What did the president say about Ketanji Brown Jackson"
docs = db4.similarity_search(query)
print(docs[0].page_content)
ids = [str(i) for i in range(1, len(docs) + 1)]
example_db = | Chroma.from_documents(docs, embedding_function, ids=ids) | langchain_community.vectorstores.Chroma.from_documents |
from langchain_community.llms import Baseten
mistral = Baseten(model="MODEL_ID", deployment="production")
mistral("What is the Mistral wind?")
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
template = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
{history}
Human: {human_input}
Assistant:"""
prompt = | PromptTemplate(input_variables=["history", "human_input"], template=template) | langchain.prompts.PromptTemplate |
get_ipython().run_line_magic('pip', "install --upgrade --quiet langchain-openai 'deeplake[enterprise]' tiktoken")
from langchain_community.vectorstores import DeepLake
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
activeloop_token = getpass.getpass("activeloop token:")
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |