RaushanTurganbay HF staff commited on
Commit
dd2d34b
1 Parent(s): 1b04246
Files changed (1) hide show
  1. README.md +3 -2
README.md CHANGED
@@ -40,12 +40,13 @@ Below we used [`"llava-hf/llava-interleave-qwen-0.5b-hf"`](https://huggingface.c
40
 
41
 
42
  ```python
43
- from transformers import pipeline
44
  from PIL import Image
45
  import requests
46
 
47
  model_id = "llava-hf/llava-interleave-qwen-0.5b-hf"
48
  pipe = pipeline("image-to-text", model=model_id)
 
49
 
50
  url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"
51
  image = Image.open(requests.get(url, stream=True).raw)
@@ -88,7 +89,7 @@ model = LlavaForConditionalGeneration.from_pretrained(
88
 
89
  processor = AutoProcessor.from_pretrained(model_id)
90
 
91
- # Define a chat histiry and use `apply_chat_template` to get correctly formatted prompt
92
  # Each value in "content" has to be a list of dicts with types ("text", "image")
93
  conversation = [
94
  {
 
40
 
41
 
42
  ```python
43
+ from transformers import pipeline, AutoProcessor
44
  from PIL import Image
45
  import requests
46
 
47
  model_id = "llava-hf/llava-interleave-qwen-0.5b-hf"
48
  pipe = pipeline("image-to-text", model=model_id)
49
+ processor = AutoProcessor.from_pretrained(model_id)
50
 
51
  url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg"
52
  image = Image.open(requests.get(url, stream=True).raw)
 
89
 
90
  processor = AutoProcessor.from_pretrained(model_id)
91
 
92
+ # Define a chat history and use `apply_chat_template` to get correctly formatted prompt
93
  # Each value in "content" has to be a list of dicts with types ("text", "image")
94
  conversation = [
95
  {