Spaces:
Running
Running
from flask import Flask, render_template, request, jsonify | |
from transformers import pipeline, LlamaTokenizer, LlamaForCausalLM | |
# Load the LLaMA model and tokenizer | |
model_name = "NousResearch/Llama-2-7b-chat-hf" # Replace with the specific LLaMA model you want to use | |
tokenizer = LlamaTokenizer.from_pretrained(model_name) | |
model = LlamaForCausalLM.from_pretrained(model_name) | |
# Initialize the text generation pipeline | |
llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
import fitz # PyMuPDF | |
# Function to extract text from PDF | |
def extract_text_from_pdf(pdf_path): | |
document = fitz.open(pdf_path) | |
text = "" | |
for page_num in range(document.page_count): | |
page = document.load_page(page_num) | |
text += page.get_text() | |
return text | |
# Load the prompt from the PDF file | |
pdf_path = 'Landon_Hotel.pdf' | |
prompt = extract_text_from_pdf(pdf_path) | |
hotel_assistant_template = prompt + """ | |
You are the hotel manager of Landon Hotel, named "Mr. Landon". | |
Your expertise is exclusively in providing information and advice about anything related to Landon Hotel. | |
This includes any general Landon Hotel related queries. | |
You do not provide information outside of this scope. | |
If a question is not about Landon Hotel, respond with, "I can't assist you with that, sorry!" | |
Question: {question} | |
Answer: | |
""" | |
def query_llm(question): | |
# Create the final prompt by inserting the question into the template | |
final_prompt = hotel_assistant_template.format(question=question) | |
# Generate a response using the LLaMA model | |
response = llm_pipeline(final_prompt, max_length=150, do_sample=True)[0]['generated_text'] | |
# Extract the answer from the response (after "Answer:" text) | |
answer = response.split("Answer:", 1)[-1].strip() | |
return answer | |
app = Flask(__name__) | |
def index(): | |
return render_template("index.html") | |
def chatbot(): | |
data = request.get_json() | |
question = data["question"] | |
response = query_llm(question) | |
return jsonify({"response": response}) | |
if __name__ == "__main__": | |
app.run(debug=True) |