aiqtech commited on
Commit
f6a98e4
ยท
verified ยท
1 Parent(s): e077d4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +226 -101
app.py CHANGED
@@ -1,121 +1,246 @@
 
1
  import os
2
- from transformers import MllamaForConditionalGeneration, AutoProcessor, TextIteratorStreamer
3
- from PIL import Image
4
- import requests
5
  import torch
6
- from threading import Thread
7
  import gradio as gr
8
- from gradio import FileData
9
- import time
10
- import spaces
 
 
 
 
 
 
 
 
 
 
11
 
12
- # Hugging Face ํ† ํฐ์„ ํ™˜๊ฒฝ ๋ณ€์ˆ˜์—์„œ ๊ฐ€์ ธ์˜ด
13
- hf_token = os.getenv("HF_TOKEN")
14
- if not hf_token:
15
- raise ValueError("HF_TOKEN environment variable not found")
 
16
 
17
- ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
18
- # ํ† ํฐ์„ ์‚ฌ์šฉํ•˜์—ฌ ๋ชจ๋ธ๊ณผ ํ”„๋กœ์„ธ์„œ ๋กœ๋“œ
19
- model = MllamaForConditionalGeneration.from_pretrained(
20
- ckpt,
21
  torch_dtype=torch.bfloat16,
22
- token=hf_token # ํ† ํฐ ์ถ”๊ฐ€
23
- ).to("cuda")
24
-
25
- processor = AutoProcessor.from_pretrained(
26
- ckpt,
27
- token=hf_token # ํ† ํฐ ์ถ”๊ฐ€
28
- )
29
-
30
-
31
-
32
- @spaces.GPU
33
- def bot_streaming(message, history, max_new_tokens=250):
34
-
35
- txt = message["text"]
36
- ext_buffer = f"{txt}"
37
-
38
- messages= []
39
- images = []
40
-
41
-
42
- for i, msg in enumerate(history):
43
- if isinstance(msg[0], tuple):
44
- messages.append({"role": "user", "content": [{"type": "text", "text": history[i+1][0]}, {"type": "image"}]})
45
- messages.append({"role": "assistant", "content": [{"type": "text", "text": history[i+1][1]}]})
46
- images.append(Image.open(msg[0][0]).convert("RGB"))
47
- elif isinstance(history[i-1], tuple) and isinstance(msg[0], str):
48
- # messages are already handled
49
- pass
50
- elif isinstance(history[i-1][0], str) and isinstance(msg[0], str): # text only turn
51
- messages.append({"role": "user", "content": [{"type": "text", "text": msg[0]}]})
52
- messages.append({"role": "assistant", "content": [{"type": "text", "text": msg[1]}]})
53
-
54
- # add current message
55
- if len(message["files"]) == 1:
56
-
57
- if isinstance(message["files"][0], str): # examples
58
- image = Image.open(message["files"][0]).convert("RGB")
59
- else: # regular input
60
- image = Image.open(message["files"][0]["path"]).convert("RGB")
61
- images.append(image)
62
- messages.append({"role": "user", "content": [{"type": "text", "text": txt}, {"type": "image"}]})
63
- else:
64
- messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
- texts = processor.apply_chat_template(messages, add_generation_prompt=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
- if images == []:
70
- inputs = processor(text=texts, return_tensors="pt").to("cuda")
71
- else:
72
- inputs = processor(text=texts, images=images, return_tensors="pt").to("cuda")
73
- streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
74
-
75
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens)
76
- generated_text = ""
77
-
78
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
79
- thread.start()
80
  buffer = ""
81
-
82
  for new_text in streamer:
83
  buffer += new_text
84
- generated_text_without_prompt = buffer
85
- time.sleep(0.01)
86
  yield buffer
87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  css = """
89
  footer {
90
  visibility: hidden;
91
  }
92
  """
93
 
94
- demo = gr.ChatInterface(theme="Yntec/HaleyCH_Theme_Orange", css=css,fn=bot_streaming, title="Multimodal Llama", examples=[
95
- [{"text": "Which era does this piece belong to? Give details about the era.", "files":["./examples/rococo.jpg"]},
96
- 200],
97
- [{"text": "Where do the droughts happen according to this diagram?", "files":["./examples/weather_events.png"]},
98
- 250],
99
- [{"text": "What happens when you take out white cat from this chain?", "files":["./examples/ai2d_test.jpg"]},
100
- 250],
101
- [{"text": "How long does it take from invoice date to due date? Be short and concise.", "files":["./examples/invoice.png"]},
102
- 250],
103
- [{"text": "Where to find this monument? Can you give me other recommendations around the area?", "files":["./examples/wat_arun.jpg"]},
104
- 250],
105
- ],
106
- textbox=gr.MultimodalTextbox(),
107
- additional_inputs = [gr.Slider(
108
- minimum=10,
109
- maximum=500,
110
- value=250,
111
- step=10,
112
- label="Maximum number of new tokens to generate",
113
- )
114
- ],
115
- cache_examples=False,
116
- description="Try Multimodal Llama by Meta with transformers in this demo. Upload an image, and start chatting about it, or simply try one of the examples below. To learn more about Llama Vision, visit [our blog post](https://huggingface.co/blog/llama32). ",
117
- stop_btn="Stop Generation",
118
- fill_height=True,
119
- multimodal=True)
120
-
121
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
  import os
3
+ import time
 
 
4
  import torch
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig, AutoProcessor
6
  import gradio as gr
7
+ from threading import Thread
8
+ from PIL import Image
9
+ import subprocess
10
+
11
+ # Install flash-attn if not already installed
12
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
13
+
14
+ # Model and tokenizer for the chatbot
15
+ MODEL_ID1 = "microsoft/Phi-3.5-mini-instruct"
16
+ MODEL_LIST1 = ["microsoft/Phi-3.5-mini-instruct"]
17
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
18
+
19
+ device = "cuda" if torch.cuda.is_available() else "cpu" # for GPU usage or "cpu" for CPU usage / But you need GPU :)
20
 
21
+ quantization_config = BitsAndBytesConfig(
22
+ load_in_4bit=True,
23
+ bnb_4bit_compute_dtype=torch.bfloat16,
24
+ bnb_4bit_use_double_quant=True,
25
+ bnb_4bit_quant_type="nf4")
26
 
27
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID1)
28
+ model = AutoModelForCausalLM.from_pretrained(
29
+ MODEL_ID1,
 
30
  torch_dtype=torch.bfloat16,
31
+ device_map="auto",
32
+ quantization_config=quantization_config)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
+ # Chatbot tab function
35
+ @spaces.GPU()
36
+ def stream_chat(
37
+ message: str,
38
+ history: list,
39
+ system_prompt: str,
40
+ temperature: float = 0.8,
41
+ max_new_tokens: int = 1024,
42
+ top_p: float = 1.0,
43
+ top_k: int = 20,
44
+ penalty: float = 1.2,
45
+ ):
46
+ print(f'message: {message}')
47
+ print(f'history: {history}')
48
 
49
+ conversation = [
50
+ {"role": "system", "content": system_prompt}
51
+ ]
52
+ for prompt, answer in history:
53
+ conversation.extend([
54
+ {"role": "user", "content": prompt},
55
+ {"role": "assistant", "content": answer},
56
+ ])
57
+
58
+ conversation.append({"role": "user", "content": message})
59
+
60
+ input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(model.device)
61
+
62
+ streamer = TextIteratorStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
63
+
64
+ generate_kwargs = dict(
65
+ input_ids=input_ids,
66
+ max_new_tokens = max_new_tokens,
67
+ do_sample = False if temperature == 0 else True,
68
+ top_p = top_p,
69
+ top_k = top_k,
70
+ temperature = temperature,
71
+ eos_token_id=[128001,128008,128009],
72
+ streamer=streamer,
73
+ )
74
+
75
+ with torch.no_grad():
76
+ thread = Thread(target=model.generate, kwargs=generate_kwargs)
77
+ thread.start()
78
 
 
 
 
 
 
 
 
 
 
 
 
79
  buffer = ""
 
80
  for new_text in streamer:
81
  buffer += new_text
 
 
82
  yield buffer
83
 
84
+ # Vision model setup
85
+ models = {
86
+ "microsoft/Phi-3.5-vision-instruct": AutoModelForCausalLM.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
87
+ }
88
+
89
+ processors = {
90
+ "microsoft/Phi-3.5-vision-instruct": AutoProcessor.from_pretrained("microsoft/Phi-3.5-vision-instruct", trust_remote_code=True)
91
+ }
92
+
93
+ user_prompt = '\n'
94
+ assistant_prompt = '\n'
95
+ prompt_suffix = "\n"
96
+
97
+ # Vision model tab function
98
+ @spaces.GPU()
99
+ def stream_vision(image, text_input=None, model_id="microsoft/Phi-3.5-vision-instruct"):
100
+ model = models[model_id]
101
+ processor = processors[model_id]
102
+
103
+ # Prepare the image list and corresponding tags
104
+ images = [Image.fromarray(image).convert("RGB")]
105
+ placeholder = "<|image_1|>\n" # Using the image tag as per the example
106
+
107
+ # Construct the prompt with the image tag and the user's text input
108
+ if text_input:
109
+ prompt_content = placeholder + text_input
110
+ else:
111
+ prompt_content = placeholder
112
+
113
+ messages = [
114
+ {"role": "user", "content": prompt_content},
115
+ ]
116
+
117
+ # Apply the chat template to the messages
118
+ prompt = processor.tokenizer.apply_chat_template(
119
+ messages,
120
+ tokenize=False,
121
+ add_generation_prompt=True
122
+ )
123
+
124
+ # Process the inputs with the processor
125
+ inputs = processor(prompt, images, return_tensors="pt").to("cuda:0")
126
+
127
+ # Generation parameters
128
+ generation_args = {
129
+ "max_new_tokens": 1000,
130
+ "temperature": 0.0,
131
+ "do_sample": False,
132
+ }
133
+
134
+ # Generate the response
135
+ generate_ids = model.generate(
136
+ **inputs,
137
+ eos_token_id=processor.tokenizer.eos_token_id,
138
+ **generation_args
139
+ )
140
+
141
+ # Remove input tokens from the generated response
142
+ generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
143
+
144
+ # Decode the generated output
145
+ response = processor.batch_decode(
146
+ generate_ids,
147
+ skip_special_tokens=True,
148
+ clean_up_tokenization_spaces=False
149
+ )[0]
150
+
151
+ return response
152
+
153
+
154
+
155
  css = """
156
  footer {
157
  visibility: hidden;
158
  }
159
  """
160
 
161
+ # Gradio app with two tabs
162
+ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
163
+
164
+ with gr.Tab("Chatbot"):
165
+ chatbot = gr.Chatbot(height=600, placeholder=PLACEHOLDER)
166
+ gr.ChatInterface(
167
+ fn=stream_chat,
168
+ chatbot=chatbot,
169
+ fill_height=True,
170
+ additional_inputs_accordion=gr.Accordion(label="โš™๏ธ Parameters", open=False, render=False),
171
+ additional_inputs=[
172
+ gr.Textbox(
173
+ value="You are a helpful assistant",
174
+ label="System Prompt",
175
+ render=False,
176
+ ),
177
+ gr.Slider(
178
+ minimum=0,
179
+ maximum=1,
180
+ step=0.1,
181
+ value=0.8,
182
+ label="Temperature",
183
+ render=False,
184
+ ),
185
+ gr.Slider(
186
+ minimum=128,
187
+ maximum=8192,
188
+ step=1,
189
+ value=1024,
190
+ label="Max new tokens",
191
+ render=False,
192
+ ),
193
+ gr.Slider(
194
+ minimum=0.0,
195
+ maximum=1.0,
196
+ step=0.1,
197
+ value=1.0,
198
+ label="top_p",
199
+ render=False,
200
+ ),
201
+ gr.Slider(
202
+ minimum=1,
203
+ maximum=20,
204
+ step=1,
205
+ value=20,
206
+ label="top_k",
207
+ render=False,
208
+ ),
209
+ gr.Slider(
210
+ minimum=0.0,
211
+ maximum=2.0,
212
+ step=0.1,
213
+ value=1.2,
214
+ label="Repetition penalty",
215
+ render=False,
216
+ ),
217
+ ],
218
+ examples=[
219
+ ["How to make a self-driving car?"],
220
+ ["Give me a creative idea to establish a startup"],
221
+ ["How can I improve my programming skills?"],
222
+ ["Show me a code snippet of a website's sticky header in CSS and JavaScript."],
223
+ ],
224
+ cache_examples=False,
225
+ )
226
+ with gr.Tab("Vision"):
227
+ with gr.Row():
228
+ input_img = gr.Image(label="Input Picture")
229
+ with gr.Row():
230
+ model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="microsoft/Phi-3.5-vision-instruct")
231
+ with gr.Row():
232
+ text_input = gr.Textbox(label="Question")
233
+ with gr.Row():
234
+ submit_btn = gr.Button(value="Submit")
235
+ with gr.Row():
236
+ output_text = gr.Textbox(label="Output Text")
237
+
238
+ submit_btn.click(stream_vision, [input_img, text_input, model_selector], [output_text])
239
+
240
+ gr.HTML(footer)
241
+
242
+ # Launch the combined app
243
+ demo.launch(debug=True)
244
+
245
+
246
+