Fuxiao commited on
Commit
ffd81a9
1 Parent(s): b9384fe

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +58 -0
README.md CHANGED
@@ -61,6 +61,64 @@ https://github.com/NVlabs/Eagle/issues
61
 
62
  **Output Format:** String
63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  **[Preferred/Supported] Operating System(s):** <br>
65
  Linux
66
 
 
61
 
62
  **Output Format:** String
63
 
64
+ ## Inference:
65
+ ```
66
+ import os
67
+ import torch
68
+ import numpy as np
69
+ from eagle import conversation as conversation_lib
70
+ from eagle.constants import DEFAULT_IMAGE_TOKEN
71
+ from eagle.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
72
+ from eagle.conversation import conv_templates, SeparatorStyle
73
+ from eagle.model.builder import load_pretrained_model
74
+ from eagle.utils import disable_torch_init
75
+ from eagle.mm_utils import tokenizer_image_token, get_model_name_from_path, process_images, KeywordsStoppingCriteria
76
+ from PIL import Image
77
+ import argparse
78
+ from transformers import TextIteratorStreamer
79
+ from threading import Thread
80
+
81
+ model_path = "NVEagle/Eagle-X5-13B-Chat"
82
+ conv_mode = "vicuna_v1"
83
+ image_path = "assets/georgia-tech.jpeg"
84
+ input_prompt = "Describe this image."
85
+
86
+ model_name = get_model_name_from_path(model_path)
87
+ tokenizer, model, image_processor, context_len = load_pretrained_model(model_path,None,model_name,False,False)
88
+ if model.config.mm_use_im_start_end:
89
+ input_prompt = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + input_prompt
90
+ else:
91
+ input_prompt = DEFAULT_IMAGE_TOKEN + '\n' + input_prompt
92
+
93
+ conv = conv_templates[conv_mode].copy()
94
+ conv.append_message(conv.roles[0], input_prompt)
95
+ conv.append_message(conv.roles[1], None)
96
+ prompt = conv.get_prompt()
97
+
98
+ image = Image.open(image_path).convert('RGB')
99
+ image_tensor = process_images([image], image_processor, model.config)[0]
100
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
101
+
102
+ input_ids = input_ids.to(device='cuda', non_blocking=True)
103
+ image_tensor = image_tensor.to(dtype=torch.float16, device='cuda', non_blocking=True)
104
+
105
+ with torch.inference_mode():
106
+ output_ids = model.generate(
107
+ input_ids.unsqueeze(0),
108
+ images=image_tensor.unsqueeze(0),
109
+ image_sizes=[image.size],
110
+ do_sample=True,
111
+ temperature=0.2,
112
+ top_p=0.5,
113
+ num_beams=1,
114
+ max_new_tokens=256,
115
+ use_cache=True)
116
+
117
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
118
+ print(f"Image:{image_path} \nPrompt:{input_prompt} \nOutput:{outputs}")
119
+ ```
120
+
121
+
122
  **[Preferred/Supported] Operating System(s):** <br>
123
  Linux
124