|
import gradio as gr |
|
import transformers |
|
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
model_name = "IEEEVITPune-AI-Team/ChatbotAlpha0.7" |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
def generate_response(prompt): |
|
instruction = f"### Instruction:\n{prompt}\n\n### Response:\n" |
|
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=64) |
|
result = pipe(instruction) |
|
generated_text = result[0]['generated_text'][len(instruction):].strip() |
|
return generated_text |
|
|
|
|
|
input_text = gr.inputs.Textbox(lines=3, label="Enter your prompt") |
|
output_text = gr.outputs.Textbox(label="Response") |
|
|
|
gr.Interface(generate_response, inputs=input_text, outputs=output_text, title="Chatbot", description="What is IEEE?.").launch() |
|
ad("models/IEEEVITPune-AI-Team/ChatbotAlpha0.7").launch() |