import gradio as gr from sentence_transformers import SentenceTransformer, util import openai import os os.environ["TOKENIZERS_PARALLELISM"] = "false" # Initialize paths and model identifiers for easy configuration and maintenance filename = "output_topic_details.txt" # Path to the file storing chess-specific details retrieval_model_name = 'output/sentence-transformer-finetuned/' openai.api_key = os.environ["OPENAI_API_KEY"] system_message = "You are a restaurant finder chatbot that specializes in providing sustainble restaurants in the Seattle area that accomodate to user-input dietary restrictions and cuisine preferences. The user will input a message in the format 'Could you give me a Could you give me a (cuisine) restaurant with (dietary restriction) options that is (budget) budget?', and you will check the Details About Restaurants dataset to find ONE restaurant that meets the user's criteria and output the name and restaurant's link." # Initial system message to set the behavior of the assistant messages = [{"role": "system", "content": system_message}] # Attempt to load the necessary models and provide feedback on success or failure try: retrieval_model = SentenceTransformer(retrieval_model_name) print("Models loaded successfully.") except Exception as e: print(f"Failed to load models: {e}") def load_and_preprocess_text(filename): """ Load and preprocess text from a file, removing empty lines and stripping whitespace. """ try: with open(filename, 'r', encoding='utf-8') as file: segments = [line.strip() for line in file if line.strip()] print("Text loaded and preprocessed successfully.") return segments except Exception as e: print(f"Failed to load or preprocess text: {e}") return [] segments = load_and_preprocess_text(filename) def find_relevant_segment(user_query, segments): """ Find the most relevant text segment for a user's query using cosine similarity among sentence embeddings. This version finds the best match based on the content of the query. """ try: # Lowercase the query for better matching lower_query = user_query.lower() # Encode the query and the segments query_embedding = retrieval_model.encode(lower_query) segment_embeddings = retrieval_model.encode(segments) # Compute cosine similarities between the query and the segments similarities = util.pytorch_cos_sim(query_embedding, segment_embeddings)[0] # Find the index of the most similar segment best_idx = similarities.argmax() # Return the most relevant segment return segments[best_idx] except Exception as e: print(f"Error in finding relevant segment: {e}") return "" def generate_response(user_query, relevant_segment): """ Generate a response emphasizing the bot's capability in providing chess information. """ try: user_message = f"Here's the restaurant suggestions based on your criteria: {relevant_segment}" # Append user's message to messages list messages.append({"role": "user", "content": user_message}) response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=messages, max_tokens=150, temperature=0.2, top_p=1, frequency_penalty=0, presence_penalty=0 ) # Extract the response text output_text = response['choices'][0]['message']['content'].strip() # Append assistant's message to messages list for context messages.append({"role": "assistant", "content": output_text}) return output_text except Exception as e: print(f"Error in generating response: {e}") return f"Error in generating response: {e}" def query_model(question): """ Process a question, find relevant information, and generate a response. """ if question == "": return "Give me your preferences..." relevant_segment = find_relevant_segment(question, segments) if not relevant_segment: return "Could not find specific information. Please refine your question." response = generate_response(question, relevant_segment) return response # Define the welcome message and specific topics the chatbot can provide information about welcome_message = """ # Welcome to Ethical Eats Explorer! ## Your AI-driven assistant for restaurant recs in Seattle. Created by Saranya, Cindy, and Liana of the 2024 Kode With Klossy Seattle Camp. """ topics = """ ### Please give me your restaurant preferences: - Dietary Restrictions - Cuisine Preferences (optional) - Budget Preferences (Low: $0 - $20, Moderate: $20 - $30, High: $30+ - per person) """ # Setup the Gradio Blocks interface with custom layout components with gr.Blocks(theme='JohnSmith9982/small_and_pretty') as demo: gr.Markdown(welcome_message) # Display the formatted welcome message with gr.Row(): with gr.Column(): gr.Markdown(topics) # Show the topics on the left side with gr.Row(): with gr.Column(): question = gr.Textbox(label="Your question", placeholder="Give me your information...") answer = gr.Textbox(label="Explorer's Response", placeholder="Explorer will respond here...", interactive=False, lines=10) submit_button = gr.Button("Submit") submit_button.click(fn=query_model, inputs=question, outputs=answer) # Launch the Gradio app to allow user interaction demo.launch(share=True)