Spaces:
Sleeping
Sleeping
File size: 1,156 Bytes
78210ab 63c4df3 78210ab 6703bdf 63c4df3 e4d8995 426e1d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import spaces
@spaces.GPU
def get_unsloth():
from unsloth import FastLanguageModel
return FastLanguageModel
FastLanguageModel = get_unsloth()
class InferencePipeline:
def __init__(self, conf, api_key):
self.conf = conf
self.token = api_key
self.model, self.tokenizer = self.get_model()
def get_model(self):
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = self.conf["model"]["model_name"],
max_seq_length = self.conf["model"]["max_seq_length"],
dtype = self.conf["model"]["dtype"],
load_in_4bit = self.conf["model"]["load_in_4bit"],
token = self.token
)
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
return model, tokenizer
def infer(self, prompt):
inputs = self.tokenizer([prompt], return_tensors = "pt").to("cuda")
outputs = model.generate(**inputs,
max_new_tokens = self.conf["model"]["max_new_tokens"],
use_cache = True)
outputs = tokenizer.batch_decode(outputs)
return outputs
|