gen_1 / app.py
shuheyhey's picture
Update app.py
b1744ca
# # modelの準備用
# python -m pip install transformers accelerate bitsandbytes
# python -m pip install sentencepiece
# python -m pip install
# import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, set_seed
model = AutoModelForCausalLM.from_pretrained("line-corporation/japanese-large-lm-3.6b", torch_dtype=torch.float16)
# float16は指定しなくても問題ありません
tokenizer = AutoTokenizer.from_pretrained("line-corporation/japanese-large-lm-3.6b", use_fast=False)
# use_fast=False は必ず付与してください。なくても動きますが、我々の学習状況とは異なるので性能が下がります。
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0)
set_seed(101)
# demo
import gradio as gr
def gen(input):
text = generator(
input,
max_length=30,
do_sample=True,
pad_token_id=tokenizer.pad_token_id,
num_return_sequences=1,
)
return text
demo = gr.Interface(fn=gen, inputs="text", outputs="text")
demo.launch(share=True)