import torch import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained('humarin/chatgpt_paraphraser_on_T5_base', cache_dir='./Models') model = AutoModelForSeq2SeqLM.from_pretrained('humarin/chatgpt_paraphraser_on_T5_base', cache_dir='./Models') def paraphrase( text, num_beams=5, num_beam_groups=5, num_return_sequences=5, repetition_penalty=10.0, diversity_penalty=3.0, no_repeat_ngram_size=2, temperature=0.7, max_length=128 ): input_ids = tokenizer( f'paraphrase: {text}', return_tensors="pt", padding="longest", max_length=max_length, truncation=True, ).input_ids outputs = model.generate( input_ids, temperature=temperature, repetition_penalty=repetition_penalty, num_return_sequences=num_return_sequences, no_repeat_ngram_size=no_repeat_ngram_size, num_beams=num_beams, num_beam_groups=num_beam_groups, max_length=max_length, diversity_penalty=diversity_penalty ) res = tokenizer.batch_decode(outputs, skip_special_tokens=True) return res def fn( text, num_beams=5, num_beam_groups=5, num_return_sequences=5, repetition_penalty=10.0, diversity_penalty=3.0, no_repeat_ngram_size=2, temperature=0.7, max_length=128 ): return '\n'.join(paraphrase(text, num_beams, num_beam_groups, num_return_sequences, repetition_penalty, diversity_penalty, no_repeat_ngram_size, temperature, max_length)) demo = gr.Interface( fn=fn, inputs=[ gr.Textbox(lines=3, placeholder='Enter Text To Paraphrase'), gr.Slider(minimum=1, maximum=25, step=1, value=5), gr.Slider(minimum=1, maximum=25, step=1, value=5), gr.Slider(minimum=1, maximum=20, step=1, value=5), gr.Slider(minimum=0.6, maximum=20.1, step=0.5, value=10.1), gr.Slider(minimum=0.6, maximum=20.1, step=0.5, value=3.1), gr.Slider(minimum=1, maximum=10, step=1, value=2), gr.Slider(minimum=0.0, maximum=1000, step=0.1, value=0.7), gr.Slider(minimum=32, maximum=512, step=1, value=128), ], outputs=['text'], ) demo.launch()