ha1772007 commited on
Commit
f19ba3d
1 Parent(s): 46bb42e

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +211 -0
  2. packages.txt +1 -0
  3. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import onnxruntime as ort
4
+ from PIL import Image
5
+ import requests
6
+ import numpy as np
7
+ from transformers import AutoTokenizer, AutoProcessor
8
+ import os
9
+
10
+ os.system('wget https://huggingface.co/llava-hf/llava-interleave-qwen-0.5b-hf/resolve/main/onnx/decoder_model_merged_q4f16.onnx')
11
+ os.system('wget https://huggingface.co/llava-hf/llava-interleave-qwen-0.5b-hf/resolve/main/onnx/embed_tokens_q4f16.onnx')
12
+ os.system('wget https://huggingface.co/llava-hf/llava-interleave-qwen-0.5b-hf/resolve/main/onnx/vision_encoder_q4f16.onnx')
13
+ # Load the tokenizer and processor
14
+ tokenizer = AutoTokenizer.from_pretrained("llava-hf/llava-interleave-qwen-0.5b-hf")
15
+ processor = AutoProcessor.from_pretrained("llava-hf/llava-interleave-qwen-0.5b-hf")
16
+
17
+ vision_encoder_session = ort.InferenceSession("vision_encoder_q4f16.onnx")
18
+ decoder_session = ort.InferenceSession("decoder_model_merged_q4f16.onnx")
19
+ embed_tokens_session = ort.InferenceSession("embed_tokens_q4f16.onnx")
20
+
21
+ def merge_input_ids_with_image_features(image_features, inputs_embeds, input_ids, attention_mask,pad_token_id,special_image_token_id):
22
+ num_images, num_image_patches, embed_dim = image_features.shape
23
+ batch_size, sequence_length = input_ids.shape
24
+ left_padding = not np.sum(input_ids[:, -1] == pad_token_id)
25
+ # 1. Create a mask to know where special image tokens are
26
+ special_image_token_mask = input_ids == special_image_token_id
27
+ num_special_image_tokens = np.sum(special_image_token_mask, axis=-1)
28
+ # Compute the maximum embed dimension
29
+ max_embed_dim = (num_special_image_tokens.max() * (num_image_patches - 1)) + sequence_length
30
+ batch_indices, non_image_indices = np.where(input_ids != special_image_token_id)
31
+
32
+ # 2. Compute the positions where text should be written
33
+ # Calculate new positions for text tokens in merged image-text sequence.
34
+ # `special_image_token_mask` identifies image tokens. Each image token will be replaced by `nb_text_tokens_per_images - 1` text tokens.
35
+ # `np.cumsum` computes how each image token shifts subsequent text token positions.
36
+ # - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one.
37
+ new_token_positions = np.cumsum((special_image_token_mask * (num_image_patches - 1) + 1), -1) - 1
38
+ nb_image_pad = max_embed_dim - 1 - new_token_positions[:, -1]
39
+ if left_padding:
40
+ new_token_positions += nb_image_pad[:, None] # offset for left padding
41
+ text_to_overwrite = new_token_positions[batch_indices, non_image_indices]
42
+
43
+ # 3. Create the full embedding, already padded to the maximum position
44
+ final_embedding = np.zeros((batch_size, max_embed_dim, embed_dim), dtype=np.float32)
45
+ final_attention_mask = np.zeros((batch_size, max_embed_dim), dtype=np.int64)
46
+
47
+ # 4. Fill the embeddings based on the mask. If we have ["hey" "<image>", "how", "are"]
48
+ # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the image features
49
+ final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices]
50
+ final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices]
51
+ # 5. Fill the embeddings corresponding to the images. Anything that is not `text_positions` needs filling (#29835)
52
+ image_to_overwrite = np.full((batch_size, max_embed_dim), True)
53
+ image_to_overwrite[batch_indices, text_to_overwrite] = False
54
+ image_to_overwrite &= image_to_overwrite.cumsum(-1) - 1 >= nb_image_pad[:, None]
55
+
56
+ final_embedding[image_to_overwrite] = image_features.reshape(-1, embed_dim)
57
+ final_attention_mask = np.logical_or(final_attention_mask, image_to_overwrite).astype(final_attention_mask.dtype)
58
+ position_ids = final_attention_mask.cumsum(axis=-1) - 1
59
+ position_ids = np.where(final_attention_mask == 0, 1, position_ids)
60
+
61
+ # 6. Mask out the embedding at padding positions, as we later use the past_key_value value to determine the non-attended tokens.
62
+ batch_indices, pad_indices = np.where(input_ids == pad_token_id)
63
+ indices_to_mask = new_token_positions[batch_indices, pad_indices]
64
+ final_embedding[batch_indices, indices_to_mask] = 0
65
+
66
+ return final_embedding, final_attention_mask, position_ids
67
+
68
+ # Load model and processor
69
+
70
+ def describe_image(image):
71
+ if(image.mode != 'RGB'):
72
+ image = image.convert('RGB')
73
+ conversation = [
74
+ {
75
+ "role": "system",
76
+ "content": "You are a helpful assistant who describes image."
77
+ },
78
+ {
79
+ "role": "user",
80
+ "content": [
81
+ {"type": "text", "text": "Describe this image in about 200 words and explain each and every element in full detail"},
82
+ {"type": "image"},
83
+ ],
84
+ },
85
+ ]
86
+
87
+ # Apply chat template
88
+ prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
89
+
90
+ # Preprocess the image and text
91
+ inputs = processor(images=image, text=prompt, return_tensors="np")
92
+ vision_input_name = vision_encoder_session.get_inputs()[0].name
93
+ vision_output_name = vision_encoder_session.get_outputs()[0].name
94
+ vision_features = vision_encoder_session.run([vision_output_name], {vision_input_name: inputs["pixel_values"]})[0]
95
+
96
+ # print('Total Time for Image Features Making ', time.time() - start)
97
+
98
+ # Tokens for the prompt
99
+ input_ids, attention_mask = inputs["input_ids"], inputs["attention_mask"]
100
+
101
+ # Prepare inputs
102
+ sequence_length = input_ids.shape[1]
103
+ batch_size = 1
104
+ num_layers = 24
105
+ head_dim = 64
106
+ num_heads = 16
107
+ pad_token_id = tokenizer.pad_token_id
108
+ past_sequence_length = 0 # Set to 0 for the initial pass
109
+ special_image_token_id = 151646
110
+
111
+ # Position IDs
112
+ position_ids = np.arange(sequence_length, dtype=np.int64).reshape(1, -1)
113
+
114
+ # Past Key Values
115
+ past_key_values = {
116
+ f"past_key_values.{i}.key": np.zeros((batch_size, num_heads, past_sequence_length, head_dim), dtype=np.float32)
117
+ for i in range(num_layers)
118
+ }
119
+ past_key_values.update({
120
+ f"past_key_values.{i}.value": np.zeros((batch_size, num_heads, past_sequence_length, head_dim), dtype=np.float32)
121
+ for i in range(num_layers)
122
+ })
123
+
124
+ # Run embed tokens
125
+ embed_input_name = embed_tokens_session.get_inputs()[0].name
126
+ embed_output_name = embed_tokens_session.get_outputs()[0].name
127
+ token_embeddings = embed_tokens_session.run([embed_output_name], {embed_input_name: input_ids})[0]
128
+
129
+ # Combine token embeddings and vision features
130
+ combined_embeddings, attention_mask, position_ids = merge_input_ids_with_image_features(vision_features, token_embeddings, input_ids, attention_mask,pad_token_id,special_image_token_id)
131
+ combined_len = combined_embeddings.shape[1]
132
+
133
+ # Combine all inputs
134
+ decoder_inputs = {
135
+ "attention_mask": attention_mask,
136
+ "position_ids": position_ids,
137
+ "inputs_embeds": combined_embeddings,
138
+ **past_key_values
139
+ }
140
+
141
+ # Print input shapes
142
+ for name, value in decoder_inputs.items():
143
+ print(f"{name} shape: {value.shape} dtype {value.dtype}")
144
+
145
+ # Run the decoder
146
+ decoder_input_names = [input.name for input in decoder_session.get_inputs()]
147
+ decoder_output_name = decoder_session.get_outputs()[0].name
148
+ names = [n.name for n in decoder_session.get_outputs()]
149
+ outputs = decoder_session.run(names, {name: decoder_inputs[name] for name in decoder_input_names if name in decoder_inputs})
150
+
151
+ # ... (previous code remains the same until after the decoder run)
152
+ # print(f"Outputs shape: {outputs[0].shape}")
153
+ # print(f"Outputs type: {outputs[0].dtype}")
154
+
155
+ # Process outputs (decode tokens to text)
156
+ generated_tokens = []
157
+ eos_token_id = tokenizer.eos_token_id
158
+ max_new_tokens = 2048
159
+
160
+ for i in range(max_new_tokens):
161
+ logits = outputs[0]
162
+ past_kv = outputs[1:]
163
+ logits_next_token = logits[:, -1]
164
+ token_id = np.argmax(logits_next_token)
165
+
166
+ if token_id == eos_token_id:
167
+ break
168
+
169
+ generated_tokens.append(token_id)
170
+
171
+ # Prepare input for next token generation
172
+ new_input_embeds = embed_tokens_session.run([embed_output_name], {embed_input_name: np.array([[token_id]])})[0]
173
+
174
+ past_key_values = {name.replace("present", "past_key_values"): value for name, value in zip(names[1:], outputs[1:])}
175
+
176
+ attention_mask = np.ones((1, combined_len + i + 1), dtype=np.int64)
177
+ position_ids = np.arange(combined_len + i + 1, dtype=np.int64).reshape(1, -1)[:, -1:]
178
+
179
+ decoder_inputs = {
180
+ "attention_mask": attention_mask,
181
+ "position_ids": position_ids,
182
+ "inputs_embeds": new_input_embeds,
183
+ **past_key_values
184
+ }
185
+
186
+ outputs = decoder_session.run(names, {name: decoder_inputs[name] for name in decoder_input_names if name in decoder_inputs})
187
+
188
+ # Convert to list of integers
189
+ token_ids = [int(token) for token in generated_tokens]
190
+
191
+ print(f"Generated token IDs: {token_ids}")
192
+
193
+ # Decode tokens one by one
194
+ decoded_tokens = [tokenizer.decode([token]) for token in token_ids]
195
+ print(f"Decoded tokens: {decoded_tokens}")
196
+
197
+ # Full decoded output
198
+ decoded_output = tokenizer.decode(token_ids, skip_special_tokens=True)
199
+ return decoded_output
200
+
201
+ # Create Gradio interface
202
+ interface = gr.Interface(
203
+ fn=describe_image,
204
+ inputs=gr.Image(type="pil"),
205
+ outputs=gr.Textbox(lines=5, placeholder="Description will appear here"),
206
+ title="Image Description Generator",
207
+ description="Upload an image to get a detailed description."
208
+ )
209
+
210
+ # Enable API
211
+ interface.launch(share=True,show_error=True,debug=True)
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ wget
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ onnxruntime
2
+ onnx
3
+ gradio
4
+ Pillow
5
+ torch
6
+ transformers
7
+ numpy