Spaces:
Sleeping
Sleeping
File size: 490 Bytes
bfadc34 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
import os, torch, accelerate
from langchain.llms import HuggingFacePipeline
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline, AutoModelForCausalLM
model_id = 'google/flan-t5-large'
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, load_in_8bit=True)
pipe = pipeline(
"text2text-generation",
model=model,
tokenizer=tokenizer,
max_length=512,
)
local_llm = HuggingFacePipeline(Pipeline=pipe)
|