dock / app.py
barathm111's picture
Upload app.py
6225e5c verified
raw
history blame
2.87 kB
import transformers
import torch
import gradio as gr
import os
# Retrieve Hugging Face API token from environment variable
hf_token = os.getenv("HF_TOKEN")
# Ensure the token is available
if not hf_token:
raise ValueError("Hugging Face token not found. Please add it to the secrets in Hugging Face Spaces.")
# Load the chatbot model with the token (for private models or usage limits)
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
pipeline = transformers.pipeline(
"text-generation",
model=model_id,
model_kwargs={"torch_dtype": torch.bfloat16},
device_map="auto",
use_auth_token=hf_token # Use the Hugging Face token here
)
# Predefined data
example_data = [
{"Institution": "A", "TLR": 70, "GO": 85, "OI": 90, "PR": 75},
{"Institution": "B", "TLR": 80, "GO": 88, "OI": 85, "PR": 90},
{"Institution": "C", "TLR": 65, "GO": 80, "OI": 70, "PR": 60},
]
# Format predefined data into a readable string
predefined_context = "Here are the institution rankings based on scores:\n"
for institution in sorted(example_data, key=lambda x: x["TLR"] + x["GO"] + x["OI"] + x["PR"], reverse=True):
total_score = institution["TLR"] + institution["GO"] + institution["OI"] + institution["PR"]
predefined_context += f"- {institution['Institution']} (Total Score: {total_score})\n"
# System prompt to provide context to the model
system_prompt = f"""You are an intelligent assistant. Here is some contextual information:
{predefined_context}
When a user asks about rankings, respond with this information. If the user asks general questions, respond appropriately.
"""
# Chatbot function
def chatbot_response(user_message):
# Combine system prompt with the user's message
full_prompt = f"{system_prompt}\nUser: {user_message}\nAssistant:"
# Generate a response using the model
outputs = pipeline(
full_prompt,
max_new_tokens=150, # Adjust token limit as needed
do_sample=True,
temperature=0.7,
top_p=0.9,
)
return outputs[0]["generated_text"]
# Gradio interface
def build_gradio_ui():
with gr.Blocks() as demo:
gr.Markdown("## Intelligent Chatbot with Predefined Context and AI Responses")
gr.Markdown("Ask about institution rankings or any general query!")
with gr.Row():
user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
chatbot_output = gr.Textbox(label="Chatbot Response", interactive=False)
submit_button = gr.Button("Send")
submit_button.click(chatbot_response, inputs=[user_input], outputs=[chatbot_output])
return demo
# Launch the Gradio app with a public link
demo = build_gradio_ui()
if __name__ == "__main__":
demo.launch(share=True) # Enable public link