|
import torch |
|
from transformers import T5Tokenizer, T5ForConditionalGeneration |
|
|
|
|
|
model_path = './fine-tuned-t5-efficient-tiny' |
|
tokenizer = T5Tokenizer.from_pretrained(model_path) |
|
model = T5ForConditionalGeneration.from_pretrained(model_path) |
|
|
|
def generate_response(input_text, model, tokenizer, max_length=256): |
|
|
|
inputs = tokenizer(input_text, return_tensors='pt', truncation=True, padding='max_length', max_length=max_length) |
|
|
|
|
|
outputs = model.generate( |
|
input_ids=inputs['input_ids'], |
|
attention_mask=inputs['attention_mask'], |
|
max_length=max_length, |
|
num_beams=1, |
|
do_sample=True, |
|
temperature=1.0, |
|
top_p=0.9, |
|
early_stopping=False |
|
) |
|
|
|
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return response |
|
|
|
def chat_with_model(): |
|
print("Chatbot is ready! Type 'exit' to end the conversation.") |
|
while True: |
|
user_input = input("You: ") |
|
if user_input.lower() == 'exit': |
|
print("Goodbye!") |
|
break |
|
response = generate_response(user_input, model, tokenizer) |
|
print(f"Chatbot: {response}") |
|
|
|
|
|
chat_with_model() |
|
|