Update README.md
Browse files
README.md
CHANGED
@@ -171,46 +171,10 @@ generation_config = dict(
|
|
171 |
)
|
172 |
|
173 |
# single-round single-image conversation
|
174 |
-
question = "
|
175 |
response = model.chat(tokenizer, pixel_values, question, generation_config)
|
176 |
print(question, response)
|
177 |
|
178 |
-
# multi-round single-image conversation
|
179 |
-
question = "请详细描述图片" # Please describe the picture in detail
|
180 |
-
response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
|
181 |
-
print(question, response)
|
182 |
-
|
183 |
-
question = "请根据图片写一首诗" # Please write a poem according to the picture
|
184 |
-
response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=history, return_history=True)
|
185 |
-
print(question, response)
|
186 |
-
|
187 |
-
# multi-round multi-image conversation
|
188 |
-
pixel_values1 = load_image('./examples/image1.jpg', max_num=6).to(torch.bfloat16).cuda()
|
189 |
-
pixel_values2 = load_image('./examples/image2.jpg', max_num=6).to(torch.bfloat16).cuda()
|
190 |
-
pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
|
191 |
-
|
192 |
-
question = "详细描述这两张图片" # Describe the two pictures in detail
|
193 |
-
response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True)
|
194 |
-
print(question, response)
|
195 |
-
|
196 |
-
question = "这两张图片的相同点和区别分别是什么" # What are the similarities and differences between these two pictures
|
197 |
-
response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=history, return_history=True)
|
198 |
-
print(question, response)
|
199 |
-
|
200 |
-
# batch inference (single image per sample)
|
201 |
-
pixel_values1 = load_image('./examples/image1.jpg', max_num=6).to(torch.bfloat16).cuda()
|
202 |
-
pixel_values2 = load_image('./examples/image2.jpg', max_num=6).to(torch.bfloat16).cuda()
|
203 |
-
image_counts = [pixel_values1.size(0), pixel_values2.size(0)]
|
204 |
-
pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0)
|
205 |
-
|
206 |
-
questions = ["Describe the image in detail."] * len(image_counts)
|
207 |
-
responses = model.batch_chat(tokenizer, pixel_values,
|
208 |
-
image_counts=image_counts,
|
209 |
-
questions=questions,
|
210 |
-
generation_config=generation_config)
|
211 |
-
for question, response in zip(questions, responses):
|
212 |
-
print(question)
|
213 |
-
print(response)
|
214 |
```
|
215 |
|
216 |
## Citation
|
|
|
171 |
)
|
172 |
|
173 |
# single-round single-image conversation
|
174 |
+
question = "Describe this image in detail" # Please describe the picture in detail
|
175 |
response = model.chat(tokenizer, pixel_values, question, generation_config)
|
176 |
print(question, response)
|
177 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
```
|
179 |
|
180 |
## Citation
|