|
import torch |
|
import gradio as gr |
|
from transformers import T5Tokenizer, T5ForConditionalGeneration |
|
|
|
def generate_text(input_text): |
|
|
|
model_name = 'kurry/t5_small_finetuned' |
|
model = T5ForConditionalGeneration.from_pretrained(model_name).to('cpu') |
|
tokenizer = T5Tokenizer.from_pretrained(model_name) |
|
|
|
|
|
inputs = tokenizer.encode("summarize: " + input_text, return_tensors="pt", truncation=True).to('cpu') |
|
outputs = model.generate(inputs) |
|
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
iface = gr.Interface(fn=generate_text, inputs='text', outputs='text') |
|
iface.launch() |
|
|