Spaces:
Sleeping
Sleeping
import streamlit as st | |
from langchain import LLMChain | |
from langchain.chat_models import ChatOpenAI | |
from langchain.prompts import ChatPromptTemplate | |
from dotenv import load_dotenv | |
import os | |
# Load environment variables | |
load_dotenv() | |
# Set LangChain tracing (optional) | |
os.environ["LANGCHAIN_TRACING_V2"] = "true" | |
os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY") | |
# Initialize the Hugging Face LLaMA 2 model via LangChain | |
llm = ChatOpenAI( | |
model_name="meta-llama/Llama-2-7b-chat-hf", | |
temperature=0.7, | |
max_tokens=512, | |
openai_api_key=os.getenv("OPENAI_API_KEY") # If using OpenAI; otherwise, remove | |
) | |
# Define the prompt template | |
prompt = ChatPromptTemplate.from_messages( | |
[ | |
("system", "You are a helpful assistant."), | |
("user", "Question: {question}") | |
] | |
) | |
# Create the LLM Chain | |
chain = LLMChain(llm=llm, prompt=prompt, output_key="response") | |
# Streamlit App Interface | |
st.title('LangChain Demo with LLaMA 2 on Hugging Face') | |
# User input | |
input_text = st.text_input("Enter your question:") | |
# Display the response | |
if input_text: | |
try: | |
response = chain.run({"question": input_text}) | |
st.write(response) | |
except Exception as e: | |
st.error(f"Error: {e}") | |