Update README.md
Browse files
README.md
CHANGED
@@ -38,7 +38,8 @@ import torch
|
|
38 |
from PIL import Image
|
39 |
import requests
|
40 |
|
41 |
-
|
|
|
42 |
|
43 |
model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-34b-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
44 |
model.to("cuda:0")
|
|
|
38 |
from PIL import Image
|
39 |
import requests
|
40 |
|
41 |
+
# TODO support fast tokenizer here
|
42 |
+
processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-34b-hf", use_fast=False)
|
43 |
|
44 |
model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-34b-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
45 |
model.to("cuda:0")
|