IEEEVITPune-AI-Team's picture
Update app.py
76a93f3 verified
raw
history blame
1.01 kB
import gradio as gr
import transformers
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
# Load the model and tokenizer
model_name = "IEEEVITPune-AI-Team/ChatbotAlpha0.7"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Define the function to generate response
def generate_response(prompt):
instruction = f"### Instruction:\n{prompt}\n\n### Response:\n"
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=64)
result = pipe(instruction)
generated_text = result[0]['generated_text'][len(instruction):].strip()
return generated_text
# Create a Gradio interface
input_text = gr.inputs.Textbox(lines=3, label="Enter your prompt")
output_text = gr.outputs.Textbox(label="Response")
gr.Interface(generate_response, inputs=input_text, outputs=output_text, title="Chatbot", description="What is IEEE?.").launch()
ad("models/IEEEVITPune-AI-Team/ChatbotAlpha0.7").launch()