Spaces:
Running
on
Zero
Running
on
Zero
update demo code
#10
by
YifeiXin
- opened
app.py
CHANGED
@@ -22,8 +22,6 @@ title_markdown = ("""
|
|
22 |
<h5 style="margin: 0;">If this demo please you, please give us a star β on Github or π on this space.</h5>
|
23 |
</div>
|
24 |
</div>
|
25 |
-
|
26 |
-
|
27 |
<div align="center">
|
28 |
<div style="display:flex; gap: 0.25rem; margin-top: 10px;" align="center">
|
29 |
<a href="https://github.com/DAMO-NLP-SG/VideoLLaMA2"><img src='https://img.shields.io/badge/Github-VideoLLaMA2-9C276A'></a>
|
@@ -97,7 +95,7 @@ class Chat:
|
|
97 |
|
98 |
|
99 |
@spaces.GPU(duration=120)
|
100 |
-
def generate(video, audio, message, chatbot, va_tag, textbox_in, temperature, top_p, max_output_tokens, dtype=torch.float16):
|
101 |
data = []
|
102 |
image = None
|
103 |
|
@@ -113,6 +111,14 @@ def generate(video, audio, message, chatbot, va_tag, textbox_in, temperature, to
|
|
113 |
else:
|
114 |
video_audio = video_audio.to(handler.model.device, dtype=dtype)
|
115 |
data.append((video_audio, '<video>'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
elif audio is not None:
|
117 |
data.append((processor['audio'](audio).to(handler.model.device, dtype=dtype), '<audio>'))
|
118 |
elif image is None and video is None:
|
@@ -128,7 +134,7 @@ def generate(video, audio, message, chatbot, va_tag, textbox_in, temperature, to
|
|
128 |
show_images = ""
|
129 |
if image is not None:
|
130 |
show_images += f'<img src="./file={image}" style="display: inline-block;width: 250px;max-height: 400px;">'
|
131 |
-
if video is not None:
|
132 |
show_images += f'<video controls playsinline width="500" style="display: inline-block;" src="./file={video}"></video>'
|
133 |
if audio is not None:
|
134 |
show_images += f'<audio controls style="display: inline-block;" src="./file={audio}"></audio>'
|
@@ -142,6 +148,7 @@ def generate(video, audio, message, chatbot, va_tag, textbox_in, temperature, to
|
|
142 |
else:
|
143 |
previous_image = re.findall(r'<img src="./file=(.+?)"', chatbot[0][0])
|
144 |
previous_video = re.findall(r'<video controls playsinline width="500" style="display: inline-block;" src="./file=(.+?)"', chatbot[0][0])
|
|
|
145 |
previous_audio = re.findall(r'<audio controls style="display: inline-block;" src="./file=(.+?)"', chatbot[0][0])
|
146 |
if len(previous_image) > 0:
|
147 |
previous_image = previous_image[0]
|
@@ -155,6 +162,12 @@ def generate(video, audio, message, chatbot, va_tag, textbox_in, temperature, to
|
|
155 |
if video is not None and os.path.basename(previous_video) != os.path.basename(video):
|
156 |
message.clear()
|
157 |
one_turn_chat[0] += "\n" + show_images
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
elif len(previous_audio) > 0:
|
159 |
previous_audio = previous_audio[0]
|
160 |
# 2.3 new audio append or pure text input will start a new conversation
|
@@ -183,7 +196,7 @@ def generate(video, audio, message, chatbot, va_tag, textbox_in, temperature, to
|
|
183 |
one_turn_chat[1] = text_en_out
|
184 |
chatbot.append(one_turn_chat)
|
185 |
|
186 |
-
return gr.update(value=video, interactive=True), gr.update(value=audio, interactive=True), message, chatbot
|
187 |
|
188 |
|
189 |
def regenerate(message, chatbot):
|
@@ -228,6 +241,7 @@ with gr.Blocks(title='VideoLLaMA 2 π₯ππ₯', theme=theme, css=block_css) as
|
|
228 |
with gr.Row():
|
229 |
with gr.Column(scale=3):
|
230 |
video = gr.Video(label="Input Video")
|
|
|
231 |
audio = gr.Audio(label="Input Audio", type="filepath")
|
232 |
|
233 |
with gr.Accordion("Parameters", open=True) as parameter_row:
|
@@ -305,14 +319,14 @@ with gr.Blocks(title='VideoLLaMA 2 π₯ππ₯', theme=theme, css=block_css) as
|
|
305 |
examples=[
|
306 |
[
|
307 |
f"{cur_dir}/examples/00000368.mp4",
|
308 |
-
"
|
309 |
],
|
310 |
[
|
311 |
f"{cur_dir}/examples/00003491.mp4",
|
312 |
"Where is the loudest instrument?",
|
313 |
],
|
314 |
],
|
315 |
-
inputs=[
|
316 |
)
|
317 |
with gr.Column():
|
318 |
# audio
|
@@ -320,11 +334,11 @@ with gr.Blocks(title='VideoLLaMA 2 π₯ππ₯', theme=theme, css=block_css) as
|
|
320 |
examples=[
|
321 |
[
|
322 |
f"{cur_dir}/examples/bird-twitter-car.wav",
|
323 |
-
"Please describe the audio
|
324 |
],
|
325 |
[
|
326 |
f"{cur_dir}/examples/door.of.bar.raining2.wav",
|
327 |
-
"Please describe the audio
|
328 |
],
|
329 |
],
|
330 |
inputs=[audio, textbox],
|
@@ -335,20 +349,22 @@ with gr.Blocks(title='VideoLLaMA 2 π₯ππ₯', theme=theme, css=block_css) as
|
|
335 |
|
336 |
submit_btn.click(
|
337 |
generate,
|
338 |
-
[video, audio, message, chatbot, va_tag, textbox, temperature, top_p, max_output_tokens],
|
339 |
-
[video, audio, message, chatbot])
|
340 |
|
341 |
regenerate_btn.click(
|
342 |
regenerate,
|
343 |
[message, chatbot],
|
344 |
[message, chatbot]).then(
|
345 |
generate,
|
346 |
-
[video, audio, message, chatbot, va_tag, textbox, temperature, top_p, max_output_tokens],
|
347 |
-
[video, audio, message, chatbot])
|
348 |
|
349 |
clear_btn.click(
|
350 |
clear_history,
|
351 |
[message, chatbot],
|
352 |
-
[video, audio, message, chatbot, textbox])
|
353 |
|
354 |
demo.launch(share=False)
|
|
|
|
|
|
22 |
<h5 style="margin: 0;">If this demo please you, please give us a star β on Github or π on this space.</h5>
|
23 |
</div>
|
24 |
</div>
|
|
|
|
|
25 |
<div align="center">
|
26 |
<div style="display:flex; gap: 0.25rem; margin-top: 10px;" align="center">
|
27 |
<a href="https://github.com/DAMO-NLP-SG/VideoLLaMA2"><img src='https://img.shields.io/badge/Github-VideoLLaMA2-9C276A'></a>
|
|
|
95 |
|
96 |
|
97 |
@spaces.GPU(duration=120)
|
98 |
+
def generate(video, av, audio, message, chatbot, va_tag, textbox_in, temperature, top_p, max_output_tokens, dtype=torch.float16):
|
99 |
data = []
|
100 |
image = None
|
101 |
|
|
|
111 |
else:
|
112 |
video_audio = video_audio.to(handler.model.device, dtype=dtype)
|
113 |
data.append((video_audio, '<video>'))
|
114 |
+
elif av is not None:
|
115 |
+
video_audio = processor['video'](av, va=va_tag=="Audio Vision")
|
116 |
+
if va_tag=="Audio Vision":
|
117 |
+
for k,v in video_audio.items():
|
118 |
+
video_audio[k] = v.to(handler.model.device, dtype=dtype)
|
119 |
+
else:
|
120 |
+
video_audio = video_audio.to(handler.model.device, dtype=dtype)
|
121 |
+
data.append((video_audio, '<video>'))
|
122 |
elif audio is not None:
|
123 |
data.append((processor['audio'](audio).to(handler.model.device, dtype=dtype), '<audio>'))
|
124 |
elif image is None and video is None:
|
|
|
134 |
show_images = ""
|
135 |
if image is not None:
|
136 |
show_images += f'<img src="./file={image}" style="display: inline-block;width: 250px;max-height: 400px;">'
|
137 |
+
if video is not None or av is not None:
|
138 |
show_images += f'<video controls playsinline width="500" style="display: inline-block;" src="./file={video}"></video>'
|
139 |
if audio is not None:
|
140 |
show_images += f'<audio controls style="display: inline-block;" src="./file={audio}"></audio>'
|
|
|
148 |
else:
|
149 |
previous_image = re.findall(r'<img src="./file=(.+?)"', chatbot[0][0])
|
150 |
previous_video = re.findall(r'<video controls playsinline width="500" style="display: inline-block;" src="./file=(.+?)"', chatbot[0][0])
|
151 |
+
previous_av = re.findall(r'<video controls playsinline width="500" style="display: inline-block;" src="./file=(.+?)"', chatbot[0][0])
|
152 |
previous_audio = re.findall(r'<audio controls style="display: inline-block;" src="./file=(.+?)"', chatbot[0][0])
|
153 |
if len(previous_image) > 0:
|
154 |
previous_image = previous_image[0]
|
|
|
162 |
if video is not None and os.path.basename(previous_video) != os.path.basename(video):
|
163 |
message.clear()
|
164 |
one_turn_chat[0] += "\n" + show_images
|
165 |
+
elif len(previous_av) > 0:
|
166 |
+
previous_av = previous_av[0]
|
167 |
+
# 2.2 new video append or pure text input will start a new conversation
|
168 |
+
if av is not None and os.path.basename(previous_av) != os.path.basename(av):
|
169 |
+
message.clear()
|
170 |
+
one_turn_chat[0] += "\n" + show_images
|
171 |
elif len(previous_audio) > 0:
|
172 |
previous_audio = previous_audio[0]
|
173 |
# 2.3 new audio append or pure text input will start a new conversation
|
|
|
196 |
one_turn_chat[1] = text_en_out
|
197 |
chatbot.append(one_turn_chat)
|
198 |
|
199 |
+
return gr.update(value=video, interactive=True), gr.update(value=av, interactive=True), gr.update(value=audio, interactive=True), message, chatbot
|
200 |
|
201 |
|
202 |
def regenerate(message, chatbot):
|
|
|
241 |
with gr.Row():
|
242 |
with gr.Column(scale=3):
|
243 |
video = gr.Video(label="Input Video")
|
244 |
+
av = gr.Video(label="Input Video_Audio")
|
245 |
audio = gr.Audio(label="Input Audio", type="filepath")
|
246 |
|
247 |
with gr.Accordion("Parameters", open=True) as parameter_row:
|
|
|
319 |
examples=[
|
320 |
[
|
321 |
f"{cur_dir}/examples/00000368.mp4",
|
322 |
+
"Who plays the instrument louder?",
|
323 |
],
|
324 |
[
|
325 |
f"{cur_dir}/examples/00003491.mp4",
|
326 |
"Where is the loudest instrument?",
|
327 |
],
|
328 |
],
|
329 |
+
inputs=[av, textbox],
|
330 |
)
|
331 |
with gr.Column():
|
332 |
# audio
|
|
|
334 |
examples=[
|
335 |
[
|
336 |
f"{cur_dir}/examples/bird-twitter-car.wav",
|
337 |
+
"Please describe the audio:",
|
338 |
],
|
339 |
[
|
340 |
f"{cur_dir}/examples/door.of.bar.raining2.wav",
|
341 |
+
"Please describe the audio:",
|
342 |
],
|
343 |
],
|
344 |
inputs=[audio, textbox],
|
|
|
349 |
|
350 |
submit_btn.click(
|
351 |
generate,
|
352 |
+
[video, av, audio, message, chatbot, va_tag, textbox, temperature, top_p, max_output_tokens],
|
353 |
+
[video, av, audio, message, chatbot])
|
354 |
|
355 |
regenerate_btn.click(
|
356 |
regenerate,
|
357 |
[message, chatbot],
|
358 |
[message, chatbot]).then(
|
359 |
generate,
|
360 |
+
[video, av, audio, message, chatbot, va_tag, textbox, temperature, top_p, max_output_tokens],
|
361 |
+
[video, av, audio, message, chatbot])
|
362 |
|
363 |
clear_btn.click(
|
364 |
clear_history,
|
365 |
[message, chatbot],
|
366 |
+
[video, av, audio, message, chatbot, textbox])
|
367 |
|
368 |
demo.launch(share=False)
|
369 |
+
|
370 |
+
|