Spaces:
Sleeping
Sleeping
File size: 1,236 Bytes
732917f 9828300 260ccf2 732917f 260ccf2 732917f 260ccf2 732917f 9828300 732917f 260ccf2 732917f 260ccf2 732917f 9828300 732917f 72e4b93 732917f 260ccf2 732917f 260ccf2 a3187fe 9828300 a3187fe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import streamlit as st
from langchain import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from dotenv import load_dotenv
import os
# Load environment variables
load_dotenv()
# Set LangChain tracing (optional)
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY")
# Initialize the Hugging Face LLaMA 2 model via LangChain
llm = ChatOpenAI(
model_name="meta-llama/Llama-2-7b-chat-hf",
temperature=0.7,
max_tokens=512,
openai_api_key=os.getenv("OPENAI_API_KEY") # If using OpenAI; otherwise, remove
)
# Define the prompt template
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant."),
("user", "Question: {question}")
]
)
# Create the LLM Chain
chain = LLMChain(llm=llm, prompt=prompt, output_key="response")
# Streamlit App Interface
st.title('LangChain Demo with LLaMA 2 on Hugging Face')
# User input
input_text = st.text_input("Enter your question:")
# Display the response
if input_text:
try:
response = chain.run({"question": input_text})
st.write(response)
except Exception as e:
st.error(f"Error: {e}")
|