ClownRat commited on
Commit
6c205ce
β€’
1 Parent(s): 7b6b89a

Update demo.

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. VideoLLaMA2/.gitignore +58 -0
  3. VideoLLaMA2/LICENSE +201 -0
  4. VideoLLaMA2/README.md +329 -0
  5. VideoLLaMA2/assets/cat_and_chicken.mp4 +3 -0
  6. VideoLLaMA2/assets/logo.png +3 -0
  7. VideoLLaMA2/assets/pipeline.png +3 -0
  8. VideoLLaMA2/assets/sora.mp4 +3 -0
  9. VideoLLaMA2/assets/sora.png +3 -0
  10. VideoLLaMA2/pyproject.toml +41 -0
  11. VideoLLaMA2/requirements.txt +39 -0
  12. VideoLLaMA2/scripts/custom/finetune.sh +74 -0
  13. VideoLLaMA2/scripts/custom/finetune_lora.sh +75 -0
  14. VideoLLaMA2/scripts/custom/finetune_qlora.sh +75 -0
  15. VideoLLaMA2/scripts/eval/eval_video_cap_msvc.sh +67 -0
  16. VideoLLaMA2/scripts/eval/eval_video_mcqa_egoschema.sh +41 -0
  17. VideoLLaMA2/scripts/eval/eval_video_mcqa_mvbench.sh +46 -0
  18. VideoLLaMA2/scripts/eval/eval_video_mcqa_perception_test_mcqa.sh +45 -0
  19. VideoLLaMA2/scripts/eval/eval_video_mcqa_videomme.sh +84 -0
  20. VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_1_correctness.sh +58 -0
  21. VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_2_detail.sh +58 -0
  22. VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_3_context.sh +58 -0
  23. VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_4_temporal.sh +54 -0
  24. VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_5_consistency.sh +54 -0
  25. VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_activitynet.sh +54 -0
  26. VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_msvd.sh +54 -0
  27. VideoLLaMA2/scripts/siglip/finetune_gemma2.sh +75 -0
  28. VideoLLaMA2/scripts/siglip/finetune_mistral.sh +75 -0
  29. VideoLLaMA2/scripts/siglip/finetune_phi3.sh +75 -0
  30. VideoLLaMA2/scripts/siglip/finetune_qwen2.sh +75 -0
  31. VideoLLaMA2/scripts/siglip/pretrain_gemma2.sh +75 -0
  32. VideoLLaMA2/scripts/siglip/pretrain_mistral.sh +75 -0
  33. VideoLLaMA2/scripts/siglip/pretrain_phi3.sh +75 -0
  34. VideoLLaMA2/scripts/siglip/pretrain_qwen2.sh +75 -0
  35. VideoLLaMA2/scripts/vllava/finetune.sh +74 -0
  36. VideoLLaMA2/scripts/vllava/finetune_qwen2.sh +74 -0
  37. VideoLLaMA2/scripts/vllava/pretrain.sh +74 -0
  38. VideoLLaMA2/scripts/vllava/pretrain_qwen2.sh +74 -0
  39. VideoLLaMA2/videollama2/__init__.py +109 -0
  40. {videollama2 β†’ VideoLLaMA2/videollama2}/constants.py +17 -23
  41. {videollama2 β†’ VideoLLaMA2/videollama2}/conversation.py +126 -103
  42. VideoLLaMA2/videollama2/eval/eval_video_cap_msvc_correctness.py +259 -0
  43. VideoLLaMA2/videollama2/eval/eval_video_cap_msvc_detailedness.py +257 -0
  44. videollama2/eval/eval_video_qa_mvbench.py β†’ VideoLLaMA2/videollama2/eval/eval_video_mcqa_mvbench.py +0 -0
  45. VideoLLaMA2/videollama2/eval/eval_video_mcqa_videomme.py +277 -0
  46. videollama2/eval/eval_video_qa_gpt.py β†’ VideoLLaMA2/videollama2/eval/eval_video_oqa_activitynet.py +0 -0
  47. videollama2/eval/eval_benchmark_1_correctness.py β†’ VideoLLaMA2/videollama2/eval/eval_video_oqa_vcgpt_1_correctness.py +0 -0
  48. videollama2/eval/eval_benchmark_2_detailed_orientation.py β†’ VideoLLaMA2/videollama2/eval/eval_video_oqa_vcgpt_2_detailed_orientation.py +0 -0
  49. videollama2/eval/eval_benchmark_3_context.py β†’ VideoLLaMA2/videollama2/eval/eval_video_oqa_vcgpt_3_context.py +0 -0
  50. videollama2/eval/eval_benchmark_4_temporal.py β†’ VideoLLaMA2/videollama2/eval/eval_video_oqa_vcgpt_4_temporal.py +0 -0
.gitattributes CHANGED
@@ -35,3 +35,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  *.mp4 filter=lfs diff=lfs merge=lfs -text
37
  *.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  *.mp4 filter=lfs diff=lfs merge=lfs -text
37
  *.png filter=lfs diff=lfs merge=lfs -text
38
+ VideoLLaMA2/assets/cat_and_chicken.mp4 filter=lfs diff=lfs merge=lfs -text
39
+ VideoLLaMA2/assets/logo.png filter=lfs diff=lfs merge=lfs -text
40
+ VideoLLaMA2/assets/pipeline.png filter=lfs diff=lfs merge=lfs -text
41
+ VideoLLaMA2/assets/sora.mp4 filter=lfs diff=lfs merge=lfs -text
42
+ VideoLLaMA2/assets/sora.png filter=lfs diff=lfs merge=lfs -text
VideoLLaMA2/.gitignore ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__
3
+ *.pyc
4
+ *.egg-info
5
+ dist
6
+
7
+ # Log
8
+ *.log
9
+ *.log.*
10
+ *.json
11
+ *.jsonl
12
+ log_dir*/
13
+ temp*/
14
+
15
+ # Data
16
+ !**/alpaca-data-conversation.json
17
+
18
+ # Editor
19
+ .idea
20
+ *.swp
21
+
22
+ # Other
23
+ .DS_Store
24
+ 3rd_parties
25
+
26
+ # jupyter
27
+ .ipynb_checkpoints
28
+ *.ipynb
29
+
30
+ # DevContainer
31
+ !.devcontainer/*
32
+
33
+ # Demo
34
+ serve_images/
35
+ temp/
36
+
37
+ # data folder
38
+ data/
39
+ dataset/
40
+ datasets/
41
+
42
+ # training folder
43
+ wandb
44
+ ckpts*
45
+ output
46
+ output/
47
+ checkpoints
48
+ checkpoints/
49
+ work_dirs*/
50
+
51
+ # evaluation folder
52
+ /eval
53
+ /eval*
54
+
55
+ # pretrained weights
56
+ pretrained/
57
+ publish_models/
58
+ public_models/
VideoLLaMA2/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
VideoLLaMA2/README.md ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <p align="center">
2
+ <img src="https://github.com/DAMO-NLP-SG/VideoLLaMA2/blob/e7bc34e0e9a96d77947a75b54399d9f96ccf209d/assets/logo.png" width="150" style="margin-bottom: 0.2;"/>
3
+ <p>
4
+
5
+ <h3 align="center"><a href="https://arxiv.org/abs/2406.07476" style="color:#9C276A">
6
+ VideoLLaMA 2: Advancing Spatial-Temporal Modeling and Audio Understanding in Video-LLMs</a></h3>
7
+ <h5 align="center"> If our project helps you, please give us a star ⭐ on GitHub to support us. πŸ™πŸ™ </h2>
8
+
9
+ <h5 align="center">
10
+
11
+ [![hf_space](https://img.shields.io/badge/πŸ€—-Demo-9C276A.svg)](https://huggingface.co/spaces/lixin4ever/VideoLLaMA2)
12
+ [![hf_checkpoint](https://img.shields.io/badge/πŸ€—-Checkpoints-9C276A.svg)](https://huggingface.co/collections/DAMO-NLP-SG/videollama-2-6669b6b6f0493188305c87ed)
13
+ [![hf_data](https://img.shields.io/badge/πŸ€—-MSVC-9C276A.svg)](https://huggingface.co/datasets/DAMO-NLP-SG/Multi-Source-Video-Captioning)
14
+ [![arXiv](https://img.shields.io/badge/Arxiv-2406.07476-AD1C18.svg?logo=arXiv)](https://arxiv.org/abs/2406.07476) <br>
15
+ [![License](https://img.shields.io/badge/License-Apache%202.0-yellow)](https://github.com/DAMO-NLP-SG/VideoLLaMA2/blob/main/LICENSE)
16
+ [![Hits](https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fgithub.com%2FDAMO-NLP-SG%2FVideoLLaMA2&count_bg=%2379C83D&title_bg=%23555555&icon=&icon_color=%23E7E7E7&title=Visitor&edge_flat=false)](https://hits.seeyoufarm.com)
17
+ [![GitHub issues](https://img.shields.io/github/issues/DAMO-NLP-SG/VideoLLaMA2?color=critical&label=Issues)](https://github.com/DAMO-NLP-SG/VideoLLaMA2/issues?q=is%3Aopen+is%3Aissue)
18
+ [![GitHub closed issues](https://img.shields.io/github/issues-closed/DAMO-NLP-SG/VideoLLaMA2?color=success&label=Issues)](https://github.com/DAMO-NLP-SG/VideoLLaMA2/issues?q=is%3Aissue+is%3Aclosed) <br>
19
+
20
+ </h5>
21
+
22
+ <details open><summary>πŸ’‘ Some other multimodal-LLM projects from our team may interest you ✨. </summary><p>
23
+ <!-- may -->
24
+
25
+ > [**Video-LLaMA: An Instruction-tuned Audio-Visual Language Model for Video Understanding**](https://github.com/DAMO-NLP-SG/Video-LLaMA) <br>
26
+ > Hang Zhang, Xin Li, Lidong Bing <br>
27
+ [![github](https://img.shields.io/badge/-Github-black?logo=github)](https://github.com/DAMO-NLP-SG/Video-LLaMA) [![github](https://img.shields.io/github/stars/DAMO-NLP-SG/Video-LLaMA.svg?style=social)](https://github.com/DAMO-NLP-SG/Video-LLaMA) [![arXiv](https://img.shields.io/badge/Arxiv-2306.02858-b31b1b.svg?logo=arXiv)](https://arxiv.org/abs/2306.02858) <br>
28
+
29
+ > [**VCD: Mitigating Object Hallucinations in Large Vision-Language Models through Visual Contrastive Decoding**](https://arxiv.org/abs/2311.16922) <br>
30
+ > Sicong Leng, Hang Zhang, Guanzheng Chen, Xin Li, Shijian Lu, Chunyan Miao, Lidong Bing <br>
31
+ [![github](https://img.shields.io/badge/-Github-black?logo=github)](https://github.com/DAMO-NLP-SG/VCD) [![github](https://img.shields.io/github/stars/DAMO-NLP-SG/VCD.svg?style=social)](https://github.com/DAMO-NLP-SG/VCD) [![arXiv](https://img.shields.io/badge/Arxiv-2311.16922-b31b1b.svg?logo=arXiv)](https://arxiv.org/abs/2311.16922) <br>
32
+
33
+ </p></details>
34
+
35
+ <div align="center"><video src="https://github.com/DAMO-NLP-SG/VideoLLaMA2/assets/18526640/e0e7951c-f392-42ed-afad-b2c7984d3e38" width="800"></div>
36
+
37
+
38
+ ## πŸ“° News
39
+ * **[2024.07.30]** Release checkpoints of [VideoLLaMA2-8x7B-Base](https://huggingface.co/DAMO-NLP-SG/VideoLLaMA2-8x7B-Base) and [VideoLLaMA2-8x7B](https://huggingface.co/DAMO-NLP-SG/VideoLLaMA2-8x7B).
40
+ * **[2024.06.25]** πŸ”₯πŸ”₯ As of Jun 25, our [VideoLLaMA2-7B-16F](https://huggingface.co/DAMO-NLP-SG/VideoLLaMA2-7B-16F) is the **Top-1** ~7B-sized VideoLLM on the [MLVU Leaderboard](https://github.com/JUNJIE99/MLVU?tab=readme-ov-file#trophy-mini-leaderboard).
41
+ * **[2024.06.18]** πŸ”₯πŸ”₯ As of Jun 18, our [VideoLLaMA2-7B-16F](https://huggingface.co/DAMO-NLP-SG/VideoLLaMA2-7B-16F) is the **Top-1** ~7B-sized VideoLLM on the [VideoMME Leaderboard](https://video-mme.github.io/home_page.html#leaderboard).
42
+ * **[2024.06.17]** πŸ‘‹πŸ‘‹ Update technical report with the latest results and the missing references. If you have works closely related to VideoLLaMA 2 but not mentioned in the paper, feel free to let us know.
43
+ * **[2024.06.14]** πŸ”₯πŸ”₯ [Online Demo](https://huggingface.co/spaces/lixin4ever/VideoLLaMA2) is available.
44
+ * **[2024.06.03]** Release training, evaluation, and serving codes of VideoLLaMA 2.
45
+
46
+
47
+ <img src="https://github.com/DAMO-NLP-SG/VideoLLaMA2/assets/18526640/b9faf24f-bdd2-4728-9385-acea17ea086d" width="800" />
48
+
49
+ ## πŸ› οΈ Requirements and Installation
50
+ Basic Dependencies:
51
+ * Python >= 3.8
52
+ * Pytorch >= 2.2.0
53
+ * CUDA Version >= 11.8
54
+ * transformers >= 4.41.2 (for mistral tokenizer)
55
+ * tokenizers >= 0.19.1 (for mistral tokenizer)
56
+
57
+ **[Online Mode]** Install required packages (better for development):
58
+ ```bash
59
+ git clone https://github.com/DAMO-NLP-SG/VideoLLaMA2
60
+ cd VideoLLaMA2
61
+ pip install -r requirements.txt
62
+ pip install flash-attn==2.5.8 --no-build-isolation
63
+ ```
64
+
65
+ **[Offline Mode]** Install VideoLLaMA2 as a Python package (better for direct use):
66
+ ```bash
67
+ git clone https://github.com/DAMO-NLP-SG/VideoLLaMA2
68
+ cd VideoLLaMA2
69
+ pip install --upgrade pip # enable PEP 660 support
70
+ pip install -e .
71
+ pip install flash-attn==2.5.8 --no-build-isolation
72
+ ```
73
+
74
+ ## πŸš€ Main Results
75
+
76
+ ### Multi-Choice Video QA & Video Captioning
77
+ <p><img src="https://github.com/DAMO-NLP-SG/VideoLLaMA2/assets/18526640/9cc4a5ae-d850-4eef-bd51-83688b94698e" width="800" "/></p>
78
+
79
+
80
+ ### Open-Ended Video QA
81
+ <p><img src="https://github.com/DAMO-NLP-SG/VideoLLaMA2/assets/18526640/2ed7aa53-db56-4829-8375-85aefbc5120a" width="800" "/></p>
82
+
83
+ ## :earth_americas: Model Zoo
84
+ | Model Name | Model Type | Visual Encoder | Language Decoder | # Training Frames |
85
+ |:----------------|:------------:|:----------------|:------------------|:----------------:|
86
+ | [VideoLLaMA2-7B-Base](https://huggingface.co/DAMO-NLP-SG/VideoLLaMA2-7B-Base) | Base | [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) | [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) | 8 |
87
+ | [VideoLLaMA2-7B](https://huggingface.co/DAMO-NLP-SG/VideoLLaMA2-7B) | Chat | [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) | [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) | 8 |
88
+ | [VideoLLaMA2-7B-16F-Base](https://huggingface.co/DAMO-NLP-SG/VideoLLaMA2-7B-16F-Base) | Base | [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) | [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) | 16 |
89
+ | [VideoLLaMA2-7B-16F](https://huggingface.co/DAMO-NLP-SG/VideoLLaMA2-7B-16F) | Chat | [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) | [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) | 16 |
90
+ | [VideoLLaMA2-8x7B-Base](https://huggingface.co/DAMO-NLP-SG/VideoLLaMA2-8x7B-Base) | Base | [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) | [Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) | 8 |
91
+ | [VideoLLaMA2-8x7B](https://huggingface.co/DAMO-NLP-SG/VideoLLaMA2-8x7B) | Chat | [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) | [Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) | 8 |
92
+
93
+
94
+ ## [πŸ€— Demo](https://huggingface.co/spaces/lixin4ever/VideoLLaMA2)
95
+
96
+ It is highly recommended to try our [online demo](https://huggingface.co/spaces/lixin4ever/VideoLLaMA2) first.
97
+
98
+ To run a video-based LLM (Large Language Model) web demonstration on your device, you will first need to ensure that you have the necessary model checkpoints prepared, followed by adhering to the steps outlined to successfully launch the demo.
99
+
100
+ ### Single-model Version
101
+
102
+ * Launch a gradio app directly ([VideoLLaMA2-7B](https://huggingface.co/DAMO-NLP-SG/VideoLLaMA2-7B) is adopted by default):
103
+ ```bash
104
+ python videollama2/serve/gradio_web_server_adhoc.py
105
+ ```
106
+
107
+ ### Multi-model Version
108
+
109
+ 1. Launch a global controller
110
+ ```bash
111
+ cd /path/to/VideoLLaMA2
112
+ python -m videollama2.serve.controller --host 0.0.0.0 --port 10000
113
+ ```
114
+
115
+ 2. Launch a gradio webserver
116
+ ```bash
117
+ python -m videollama2.serve.gradio_web_server --controller http://localhost:10000 --model-list-mode reload
118
+ ```
119
+
120
+ 3. Launch one or multiple model workers
121
+ ```bash
122
+ # export HF_ENDPOINT=https://hf-mirror.com # If you are unable to access Hugging Face, try to uncomment this line.
123
+ python -m videollama2.serve.model_worker --host 0.0.0.0 --controller http://localhost:10000 --port 40000 --worker http://localhost:40000 --model-path /PATH/TO/MODEL1
124
+ python -m videollama2.serve.model_worker --host 0.0.0.0 --controller http://localhost:10000 --port 40001 --worker http://localhost:40001 --model-path /PATH/TO/MODEL2
125
+ python -m videollama2.serve.model_worker --host 0.0.0.0 --controller http://localhost:10000 --port 40002 --worker http://localhost:40002 --model-path /PATH/TO/MODEL3
126
+ ...
127
+ ```
128
+
129
+
130
+ ## πŸ—οΈ Training & Evaluation
131
+
132
+ ### Quick Start
133
+
134
+ To facilitate further development on top of our codebase, we provide a quick-start guide on how to train a customized [VideoLLaMA2](https://github.com/DAMO-NLP-SG/VideoLLaMA2) with [VideoLLaVA](https://github.com/PKU-YuanGroup/Video-LLaVA) dataset and evaluate the trained model on the mainstream video-llm benchmarks.
135
+
136
+ 1. Training Data Structure:
137
+ ```bash
138
+ VideoLLaMA2
139
+ β”œβ”€β”€ datasets
140
+ β”‚ β”œβ”€β”€ videollava_pt
141
+ | | β”œβ”€β”€ llava_image/ # Available at: https://pan.baidu.com/s/17GYcE69FcJjjUM0e4Gad2w?pwd=9ga3 or https://drive.google.com/drive/folders/1QmFj2FcMAoWNCUyiUtdcW0-IOhLbOBcf?usp=drive_link
142
+ | | β”œβ”€β”€ valley/ # Available at: https://pan.baidu.com/s/1jluOimE7mmihEBfnpwwCew?pwd=jyjz or https://drive.google.com/drive/folders/1QmFj2FcMAoWNCUyiUtdcW0-IOhLbOBcf?usp=drive_link
143
+ | | └── valley_llavaimage.json # Available at: https://drive.google.com/file/d/1zGRyVSUMoczGq6cjQFmT0prH67bu2wXD/view, including 703K video-text and 558K image-text pairs
144
+ β”‚ β”œβ”€β”€ videollava_sft
145
+ | | β”œβ”€β”€ llava_image_tune/ # Available at: https://pan.baidu.com/s/1l-jT6t_DlN5DTklwArsqGw?pwd=o6ko
146
+ | | β”œβ”€β”€ videochatgpt_tune/ # Available at: https://pan.baidu.com/s/10hJ_U7wVmYTUo75YHc_n8g?pwd=g1hf
147
+ | | └── videochatgpt_llavaimage_tune.json # Available at: https://drive.google.com/file/d/1zGRyVSUMoczGq6cjQFmT0prH67bu2wXD/view, including 100K video-centric, 625K image-centric and 40K text-only conversations
148
+ ```
149
+ 2. Command:
150
+ ```bash
151
+ # VideoLLaMA2-vllava pretraining
152
+ bash scripts/vllava/pretrain.sh
153
+ # VideoLLaMA2-vllava finetuning
154
+ bash scripts/vllava/finetune.sh
155
+ ```
156
+ 3. Evaluation Data Structure:
157
+ ```bash
158
+ VideoLLaMA2
159
+ β”œβ”€β”€ eval
160
+ β”‚ β”œβ”€β”€ egoschema # Official website: https://github.com/egoschema/EgoSchema
161
+ | | β”œβ”€β”€ good_clips_git/ # Available at: https://drive.google.com/drive/folders/1SS0VVz8rML1e5gWq7D7VtP1oxE2UtmhQ
162
+ | | └── questions.json # Available at: https://github.com/egoschema/EgoSchema/blob/main/questions.json
163
+ β”‚ β”œβ”€β”€ mvbench # Official website: https://huggingface.co/datasets/OpenGVLab/MVBench
164
+ | | β”œβ”€β”€ video/
165
+ | | | β”œβ”€β”€ clever/
166
+ | | | └── ...
167
+ | | └── json/
168
+ | | | β”œβ”€β”€ action_antonym.json
169
+ | | | └── ...
170
+ β”‚ β”œβ”€β”€ perception_test_mcqa # Official website: https://huggingface.co/datasets/OpenGVLab/MVBench
171
+ | | β”œβ”€β”€ videos/ # Available at: https://storage.googleapis.com/dm-perception-test/zip_data/test_videos.zip
172
+ | | └── mc_question_test.json # Download from https://storage.googleapis.com/dm-perception-test/zip_data/mc_question_test_annotations.zip
173
+ β”‚ β”œβ”€β”€ videomme # Official website: https://video-mme.github.io/home_page.html#leaderboard
174
+ | | β”œβ”€β”€ test-00000-of-00001.parquet
175
+ | | β”œβ”€β”€ videos/
176
+ | | └── subtitles/
177
+ β”‚ β”œβ”€β”€ Activitynet_Zero_Shot_QA # Official website: https://github.com/MILVLG/activitynet-qa
178
+ | | β”œβ”€β”€ all_test/ # Available at: https://mbzuaiac-my.sharepoint.com/:u:/g/personal/hanoona_bangalath_mbzuai_ac_ae/EatOpE7j68tLm2XAd0u6b8ABGGdVAwLMN6rqlDGM_DwhVA?e=90WIuW
179
+ | | β”œβ”€β”€ test_q.json # Available at: https://github.com/MILVLG/activitynet-qa/tree/master/dataset
180
+ | | └── test_a.json # Available at: https://github.com/MILVLG/activitynet-qa/tree/master/dataset
181
+ β”‚ β”œβ”€β”€ MSVD_Zero_Shot_QA # Official website: https://github.com/xudejing/video-question-answering
182
+ | | β”œβ”€β”€ videos/
183
+ | | β”œβ”€β”€ test_q.json
184
+ | | └── test_a.json
185
+ β”‚ β”œβ”€β”€ videochatgpt_gen # Official website: https://github.com/mbzuai-oryx/Video-ChatGPT/tree/main/quantitative_evaluation
186
+ | | β”œβ”€β”€ Test_Videos/ # Available at: https://mbzuaiac-my.sharepoint.com/:u:/g/personal/hanoona_bangalath_mbzuai_ac_ae/EatOpE7j68tLm2XAd0u6b8ABGGdVAwLMN6rqlDGM_DwhVA?e=90WIuW
187
+ | | β”œβ”€β”€ Test_Human_Annotated_Captions/ # Available at: https://mbzuaiac-my.sharepoint.com/personal/hanoona_bangalath_mbzuai_ac_ae/_layouts/15/onedrive.aspx?id=%2Fpersonal%2Fhanoona%5Fbangalath%5Fmbzuai%5Fac%5Fae%2FDocuments%2FVideo%2DChatGPT%2FData%5FCode%5FModel%5FRelease%2FQuantitative%5FEvaluation%2Fbenchamarking%2FTest%5FHuman%5FAnnotated%5FCaptions%2Ezip&parent=%2Fpersonal%2Fhanoona%5Fbangalath%5Fmbzuai%5Fac%5Fae%2FDocuments%2FVideo%2DChatGPT%2FData%5FCode%5FModel%5FRelease%2FQuantitative%5FEvaluation%2Fbenchamarking&ga=1
188
+ | | β”œβ”€β”€ generic_qa.json # These three json files available at: https://mbzuaiac-my.sharepoint.com/personal/hanoona_bangalath_mbzuai_ac_ae/_layouts/15/onedrive.aspx?id=%2Fpersonal%2Fhanoona%5Fbangalath%5Fmbzuai%5Fac%5Fae%2FDocuments%2FVideo%2DChatGPT%2FData%5FCode%5FModel%5FRelease%2FQuantitative%5FEvaluation%2Fbenchamarking%2FBenchmarking%5FQA&ga=1
189
+ | | β”œβ”€β”€ temporal_qa.json
190
+ | | └── consistency_qa.json
191
+ ```
192
+ 4. Command:
193
+ ```bash
194
+ # mvbench evaluation
195
+ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/eval/eval_video_qa_mvbench.sh
196
+ # activitynet-qa evaluation (need to set azure openai key/endpoint/deployname)
197
+ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash scripts/eval/eval_video_qa_mvbench.sh
198
+ ```
199
+
200
+ ### Data Format
201
+
202
+ If you want to train a video-llm on your data, you need to follow the procedures below to prepare the video/image sft data:
203
+
204
+ 1. Suppose your data structure is like:
205
+ ```bash
206
+ VideoLLaMA2
207
+ β”œβ”€β”€ datasets
208
+ β”‚ β”œβ”€β”€ custom_sft
209
+ β”‚ | β”œβ”€β”€ images
210
+ β”‚ | β”œβ”€β”€ videos
211
+ | | └── custom.json
212
+ ```
213
+ 2. Then you should re-organize the annotated video/image sft data according to the following format:
214
+ ```json
215
+ [
216
+ {
217
+ "id": 0,
218
+ "video": "images/xxx.jpg",
219
+ "conversations": [
220
+ {
221
+ "from": "human",
222
+ "value": "<image>\nWhat are the colors of the bus in the image?"
223
+ },
224
+ {
225
+ "from": "gpt",
226
+ "value": "The bus in the image is white and red."
227
+ },
228
+ ...
229
+ ],
230
+ }
231
+ {
232
+ "id": 1,
233
+ "video": "videos/xxx.mp4",
234
+ "conversations": [
235
+ {
236
+ "from": "human",
237
+ "value": "<video>\nWhat are the main activities that take place in the video?"
238
+ },
239
+ {
240
+ "from": "gpt",
241
+ "value": "The main activities that take place in the video are the preparation of camera equipment by a man, a group of men riding a helicopter, and a man sailing a boat through the water."
242
+ },
243
+ ...
244
+ ],
245
+ },
246
+ ...
247
+ ]
248
+ ```
249
+ 3. Modify the `scripts/custom/finetune.sh`:
250
+ ```bash
251
+ ...
252
+ --data_path datasets/custom_sft/custom.json
253
+ --data_folder datasets/custom_sft/
254
+ --pretrain_mm_mlp_adapter CONNECTOR_DOWNLOAD_PATH (e.g., DAMO-NLP-SG/VideoLLaMA2-7B-Base)
255
+ ...
256
+ ```
257
+
258
+ ## πŸ€– Inference
259
+
260
+ Video/Image Inference:
261
+ ```python
262
+ import sys
263
+ sys.path.append('./')
264
+ from videollama2 import model_init, mm_infer
265
+ from videollama2.utils import disable_torch_init
266
+
267
+
268
+ def inference():
269
+ disable_torch_init()
270
+
271
+ # Video Inference
272
+ modal = 'videp'
273
+ modal_path = 'assets/cat_and_chicken.mp4'
274
+ instruct = 'What animals are in the video, what are they doing, and how does the video feel?'
275
+ # Reply:
276
+ # The video features a kitten and a baby chick playing together. The kitten is seen laying on the floor while the baby chick hops around. The two animals interact playfully with each other, and the video has a cute and heartwarming feel to it.
277
+
278
+ # Image Inference
279
+ modal = 'image'
280
+ modal_path = 'assets/sora.png'
281
+ instruct = 'What is the woman wearing, what is she doing, and how does the image feel?'
282
+ # Reply:
283
+ # The woman in the image is wearing a black coat and sunglasses, and she is walking down a rain-soaked city street. The image feels vibrant and lively, with the bright city lights reflecting off the wet pavement, creating a visually appealing atmosphere. The woman's presence adds a sense of style and confidence to the scene, as she navigates the bustling urban environment.
284
+
285
+ model_path = 'DAMO-NLP-SG/VideoLLaMA2-7B'
286
+ # Base model inference (only need to replace model_path)
287
+ # model_path = 'DAMO-NLP-SG/VideoLLaMA2-7B-Base'
288
+ model, processor, tokenizer = model_init(model_path)
289
+ output = mm_infer(processor[modal](modal_path), instruct, model=model, tokenizer=tokenizer, do_sample=False, modal=modal)
290
+
291
+ print(output)
292
+
293
+ if __name__ == "__main__":
294
+ inference()
295
+ ```
296
+
297
+ ## πŸ“‘ Citation
298
+
299
+ If you find VideoLLaMA useful for your research and applications, please cite using this BibTeX:
300
+ ```bibtex
301
+ @article{damonlpsg2024videollama2,
302
+ title={VideoLLaMA 2: Advancing Spatial-Temporal Modeling and Audio Understanding in Video-LLMs},
303
+ author={Cheng, Zesen and Leng, Sicong and Zhang, Hang and Xin, Yifei and Li, Xin and Chen, Guanzheng and Zhu, Yongxin and Zhang, Wenqi and Luo, Ziyang and Zhao, Deli and Bing, Lidong},
304
+ journal={arXiv preprint arXiv:2406.07476},
305
+ year={2024},
306
+ url = {https://arxiv.org/abs/2406.07476}
307
+ }
308
+
309
+ @article{damonlpsg2023videollama,
310
+ title = {Video-LLaMA: An Instruction-tuned Audio-Visual Language Model for Video Understanding},
311
+ author = {Zhang, Hang and Li, Xin and Bing, Lidong},
312
+ journal = {arXiv preprint arXiv:2306.02858},
313
+ year = {2023},
314
+ url = {https://arxiv.org/abs/2306.02858}
315
+ }
316
+ ```
317
+
318
+ ## πŸ‘ Acknowledgement
319
+ The codebase of VideoLLaMA 2 is adapted from [**LLaVA 1.5**](https:github.com/haotian-liu/LLaVA) and [**FastChat**](https://github.com/lm-sys/FastChat). We are also grateful for the following projects our VideoLLaMA 2 arise from:
320
+ * [**LLaMA 2**](https://github.com/meta-llama/llama), [**Mistral-7B**](https://mistral.ai/news/announcing-mistral-7b/), [**OpenAI CLIP**](https://openai.com/index/clip/), [**Honeybee**](https://github.com/kakaobrain/honeybee).
321
+ * [**Video-ChatGPT**](https://github.com/mbzuai-oryx/Video-ChatGPT), [**Video-LLaVA**](https://github.com/PKU-YuanGroup/Video-LLaVA).
322
+ * [**WebVid**](https://github.com/m-bain/webvid), [**Panda-70M**](https://github.com/snap-research/Panda-70M), [**LanguageBind**](https://github.com/PKU-YuanGroup/LanguageBind), [**InternVid**](https://github.com/OpenGVLab/InternVideo/tree/main/Data/InternVid).
323
+ * [**VideoChat2**](https://github.com/OpenGVLab/Ask-Anything/tree/main/video_chat2), [**Valley**](https://github.com/RupertLuo/Valley), [**VTimeLLM**](https://github.com/huangb23/VTimeLLM), [**ShareGPT4V**](https://sharegpt4v.github.io/).
324
+
325
+
326
+ ## πŸ”’ License
327
+
328
+ This project is released under the Apache 2.0 license as found in the LICENSE file.
329
+ The service is a research preview intended for **non-commercial use ONLY**, subject to the model Licenses of LLaMA and Mistral, Terms of Use of the data generated by OpenAI, and Privacy Practices of ShareGPT. Please get in touch with us if you find any potential violations.
VideoLLaMA2/assets/cat_and_chicken.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f24723064ee27ea8fc7a30b4542601ed03a42952c0d20fe918213cf876bfec4
3
+ size 18956323
VideoLLaMA2/assets/logo.png ADDED

Git LFS Details

  • SHA256: cd9a3a969f931fb23ed371de960ddc589136a937df901b2b08e2750fabf6dd8e
  • Pointer size: 131 Bytes
  • Size of remote file: 504 kB
VideoLLaMA2/assets/pipeline.png ADDED

Git LFS Details

  • SHA256: eeab6d9f13787337b40399427e419506f49b55a4fb4fe40ba91e25618d03eeb0
  • Pointer size: 132 Bytes
  • Size of remote file: 4.38 MB
VideoLLaMA2/assets/sora.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24e5f0ea3353f23225d00efcdf136fa6dc346301fc34082790e2152c80fa0490
3
+ size 14978533
VideoLLaMA2/assets/sora.png ADDED

Git LFS Details

  • SHA256: 6b69de5c87b429c7b1a87de6f9cb3f5ec6aec5f58ab6ab7c0f727a5d0ec259a5
  • Pointer size: 132 Bytes
  • Size of remote file: 1.05 MB
VideoLLaMA2/pyproject.toml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "videollama2"
7
+ version = "1.0"
8
+ description = "Release of VideoLLaMA2"
9
+ readme = "README.md"
10
+ requires-python = ">=3.8"
11
+ classifiers = [
12
+ "Programming Language :: Python :: 3",
13
+ "License :: OSI Approved :: Apache Software License",
14
+ ]
15
+ dependencies = [
16
+ "torch==2.2.0", "torchvision==0.17.0",
17
+ "transformers==4.42.3", "tokenizers==0.19.1",
18
+ "deepspeed==0.13.1", "accelerate==0.26.1",
19
+ "peft==0.4.0", "timm==1.0.3", "numpy==1.24.4",
20
+ "decord==0.6.0", "imageio==2.34.0", "imageio-ffmpeg==0.4.9",
21
+ "moviepy==1.0.3", "scenedetect==0.6.3",
22
+ "opencv-python==4.6.0.66", "pysubs2",
23
+ "scikit-learn==1.2.2", "huggingface_hub==0.23.4", "sentencepiece==0.1.99",
24
+ "shortuuid", "einops==0.6.1", "einops-exts==0.0.4",
25
+ "bitsandbytes==0.43.0", "pydantic>=2.0", "markdown2[all]",
26
+ "gradio==3.50.0", "gradio_client==0.6.1", "httpx==0.24.1",
27
+ "requests", "openai", "uvicorn", "fastapi", "tensorboard", "wandb", "tabulate"
28
+ ]
29
+
30
+ [project.optional-dependencies]
31
+ train = ["ninja"]
32
+
33
+ [project.urls]
34
+ "Homepage" = "https://github.com/DAMO-NLP-SG/VideoLLaMA2"
35
+ "Bug Tracker" = "https://github.com/DAMO-NLP-SG/VideoLLaMA2/issues"
36
+
37
+ [tool.setuptools.packages.find]
38
+ exclude = ["assets*", "benchmark*", "docs", "dist*", "playground*", "scripts*", "tests*"]
39
+
40
+ [tool.wheel]
41
+ exclude = ["assets*", "benchmark*", "docs", "dist*", "playground*", "scripts*", "tests*"]
VideoLLaMA2/requirements.txt ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu118
2
+ # basic dependencies
3
+ torch==2.2.0
4
+ torchvision==0.17.0
5
+ transformers==4.42.3
6
+ tokenizers==0.19.1
7
+ deepspeed==0.13.1
8
+ accelerate==0.26.1
9
+ peft==0.4.0
10
+ timm==1.0.3
11
+ numpy==1.24.4
12
+ # data processing
13
+ decord==0.6.0
14
+ imageio==2.34.0
15
+ imageio-ffmpeg==0.4.9
16
+ moviepy==1.0.3
17
+ scenedetect==0.6.3
18
+ opencv-python==4.6.0.66
19
+ pysubs2
20
+ # misc
21
+ scikit-learn==1.2.2
22
+ huggingface_hub==0.23.4
23
+ sentencepiece==0.1.99
24
+ shortuuid
25
+ einops==0.6.1
26
+ einops-exts==0.0.4
27
+ bitsandbytes==0.43.0
28
+ pydantic>=2.0
29
+ markdown2[all]
30
+ gradio==3.50.0
31
+ gradio_client==0.6.1
32
+ httpx==0.24.1
33
+ requests
34
+ openai
35
+ uvicorn
36
+ fastapi
37
+ tensorboard
38
+ wandb
39
+ tabulate
VideoLLaMA2/scripts/custom/finetune.sh ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16666
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=128
26
+ LOCAL_BATCH_SIZE=4
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+
29
+ # Log Arguments
30
+ export TRANSFORMERS_OFFLINE=1
31
+ export WANDB_PROJECT=videollama2
32
+ RUN_NAME=downstream_sft_settings
33
+ DATA_DIR=datasets
34
+ OUTP_DIR=work_dirs
35
+
36
+ torchrun --nnodes $WORLD_SIZE \
37
+ --nproc_per_node $NPROC_PER_NODE \
38
+ --master_addr=$MASTER_ADDR \
39
+ --master_port=$MASTER_PORT \
40
+ --node_rank $RANK \
41
+ videollama2/train_flash_attn.py \
42
+ --deepspeed scripts/zero3.json \
43
+ --model_type videollama2 \
44
+ --model_path mistralai/Mistral-7B-Instruct-v0.2 \
45
+ --vision_tower openai/clip-vit-large-patch14-336 \
46
+ --mm_projector_type stc_connector \
47
+ --pretrain_mm_mlp_adapter DAMO-NLP-SG/VideoLLaMA2-7B-Base/mm_projector.bin \
48
+ --data_path ${DATA_DIR}/videollava_sft/videochatgpt_llavaimage_tune.json \
49
+ --data_folder ${DATA_DIR}/videollava_sft/ \
50
+ --mm_vision_select_layer -2 \
51
+ --image_aspect_ratio pad \
52
+ --num_frames 8 \
53
+ --bf16 True \
54
+ --tf32 True \
55
+ --fp16 False \
56
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/finetune_${RUN_NAME} \
57
+ --num_train_epochs 1 \
58
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
59
+ --per_device_eval_batch_size 4 \
60
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
61
+ --evaluation_strategy "no" \
62
+ --save_strategy "steps" \
63
+ --save_steps 500 \
64
+ --save_total_limit 99 \
65
+ --learning_rate 2e-5 \
66
+ --weight_decay 0. \
67
+ --warmup_ratio 0.03 \
68
+ --lr_scheduler_type "cosine" \
69
+ --logging_steps 1 \
70
+ --model_max_length 2048 \
71
+ --gradient_checkpointing True \
72
+ --dataloader_num_workers 4 \
73
+ --report_to tensorboard \
74
+ --run_name $RUN_NAME \
VideoLLaMA2/scripts/custom/finetune_lora.sh ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16666
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=128
26
+ LOCAL_BATCH_SIZE=4
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+
29
+ # Log Arguments
30
+ export TRANSFORMERS_OFFLINE=1
31
+ export WANDB_PROJECT=videollama2
32
+ RUN_NAME=downstream_sft_settings_lora
33
+ DATA_DIR=datasets
34
+ OUTP_DIR=work_dirs
35
+
36
+ torchrun --nnodes $WORLD_SIZE \
37
+ --nproc_per_node $NPROC_PER_NODE \
38
+ --master_addr=$MASTER_ADDR \
39
+ --master_port=$MASTER_PORT \
40
+ --node_rank $RANK \
41
+ videollama2/train_flash_attn.py \
42
+ --lora_enable True --lora_r 128 --lora_alpha 256 --mm_projector_lr 2e-5 \
43
+ --deepspeed scripts/zero3.json \
44
+ --model_type videollama2 \
45
+ --model_path mistralai/Mistral-7B-Instruct-v0.2 \
46
+ --vision_tower openai/clip-vit-large-patch14-336 \
47
+ --mm_projector_type stc_connector \
48
+ --pretrain_mm_mlp_adapter DAMO-NLP-SG/VideoLLaMA2-7B-Base/mm_projector.bin \
49
+ --data_path ${DATA_DIR}/videollava_sft/videochatgpt_llavaimage_tune.json \
50
+ --data_folder ${DATA_DIR}/videollava_sft/ \
51
+ --mm_vision_select_layer -2 \
52
+ --image_aspect_ratio pad \
53
+ --num_frames 8 \
54
+ --bf16 True \
55
+ --tf32 True \
56
+ --fp16 False \
57
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/finetune_${RUN_NAME} \
58
+ --num_train_epochs 1 \
59
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
60
+ --per_device_eval_batch_size 4 \
61
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
62
+ --evaluation_strategy "no" \
63
+ --save_strategy "steps" \
64
+ --save_steps 500 \
65
+ --save_total_limit 99 \
66
+ --learning_rate 2e-5 \
67
+ --weight_decay 0. \
68
+ --warmup_ratio 0.03 \
69
+ --lr_scheduler_type "cosine" \
70
+ --logging_steps 1 \
71
+ --model_max_length 2048 \
72
+ --gradient_checkpointing True \
73
+ --dataloader_num_workers 4 \
74
+ --report_to tensorboard \
75
+ --run_name $RUN_NAME \
VideoLLaMA2/scripts/custom/finetune_qlora.sh ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16666
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=128
26
+ LOCAL_BATCH_SIZE=4
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+
29
+ # Log Arguments
30
+ export TRANSFORMERS_OFFLINE=1
31
+ export WANDB_PROJECT=videollama2
32
+ RUN_NAME=downstream_sft_settings_qlora
33
+ DATA_DIR=datasets
34
+ OUTP_DIR=work_dirs
35
+
36
+ torchrun --nnodes $WORLD_SIZE \
37
+ --nproc_per_node $NPROC_PER_NODE \
38
+ --master_addr=$MASTER_ADDR \
39
+ --master_port=$MASTER_PORT \
40
+ --node_rank $RANK \
41
+ videollama2/train_flash_attn.py \
42
+ --lora_enable True --lora_r 128 --lora_alpha 256 --mm_projector_lr 2e-5 --bits 4 \
43
+ --deepspeed scripts/zero2.json \
44
+ --model_type videollama2 \
45
+ --model_path mistralai/Mistral-7B-Instruct-v0.2 \
46
+ --vision_tower openai/clip-vit-large-patch14-336 \
47
+ --mm_projector_type stc_connector \
48
+ --pretrain_mm_mlp_adapter DAMO-NLP-SG/VideoLLaMA2-7B-Base/mm_projector.bin \
49
+ --data_path ${DATA_DIR}/videollava_sft/videochatgpt_llavaimage_tune.json \
50
+ --data_folder ${DATA_DIR}/videollava_sft/ \
51
+ --mm_vision_select_layer -2 \
52
+ --image_aspect_ratio pad \
53
+ --num_frames 8 \
54
+ --bf16 True \
55
+ --tf32 True \
56
+ --fp16 False \
57
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/finetune_${RUN_NAME} \
58
+ --num_train_epochs 1 \
59
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
60
+ --per_device_eval_batch_size 4 \
61
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
62
+ --evaluation_strategy "no" \
63
+ --save_strategy "steps" \
64
+ --save_steps 500 \
65
+ --save_total_limit 99 \
66
+ --learning_rate 2e-5 \
67
+ --weight_decay 0. \
68
+ --warmup_ratio 0.03 \
69
+ --lr_scheduler_type "cosine" \
70
+ --logging_steps 1 \
71
+ --model_max_length 2048 \
72
+ --gradient_checkpointing True \
73
+ --dataloader_num_workers 4 \
74
+ --report_to tensorboard \
75
+ --run_name $RUN_NAME \
VideoLLaMA2/scripts/eval/eval_video_cap_msvc.sh ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ EVAL_DATA_DIR=eval
4
+ OUTPUT_DIR=eval_output
5
+ CKPT=DAMO-NLP-SG/VideoLLaMA2-7B
6
+ CKPT_NAME=$(echo $CKPT | rev | cut -d'/' -f1 | rev)
7
+
8
+ gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
9
+ IFS=',' read -ra GPULIST <<< "$gpu_list"
10
+
11
+ # divide data via the number of GPUs per task
12
+ GPUS_PER_TASK=1
13
+ CHUNKS=$((${#GPULIST[@]}/$GPUS_PER_TASK))
14
+
15
+ output_file=${OUTPUT_DIR}/MSVC/answers/${CKPT_NAME}/merge.json
16
+
17
+ # judge if the number of json lines is 0
18
+ if [ ! -f "$output_file" ] || [ $(cat "$output_file" | wc -l) -eq 0 ]; then
19
+ rm -f ${OUTPUT_DIR}/MSVC/answers/${CKPT_NAME}/*.json
20
+ fi
21
+
22
+ if [ ! -f "$output_file" ]; then
23
+ for IDX in $(seq 0 $((CHUNKS-1))); do
24
+ # select the GPUs for the task
25
+ gpu_devices=$(IFS=,; echo "${GPULIST[*]:$(($IDX*$GPUS_PER_TASK)):$GPUS_PER_TASK}")
26
+ TRANSFORMERS_OFFLINE=1 CUDA_VISIBLE_DEVICES=${gpu_devices} python3 videollama2/eval/inference_video_cap_msvc.py \
27
+ --model-path ${CKPT} \
28
+ --video-folder ${EVAL_DATA_DIR}/MSVC \
29
+ --question-file ${EVAL_DATA_DIR}/MSVC/msvc.json \
30
+ --output-file ${OUTPUT_DIR}/MSVC/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.json \
31
+ --num-chunks $CHUNKS \
32
+ --chunk-idx $IDX &
33
+ done
34
+
35
+ wait
36
+
37
+ # Clear out the output file if it exists.
38
+ > "$output_file"
39
+
40
+ #Loop through the indices and concatenate each file.
41
+ for IDX in $(seq 0 $((CHUNKS-1))); do
42
+ cat ${OUTPUT_DIR}/MSVC/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.json >> "$output_file"
43
+ done
44
+ fi
45
+
46
+
47
+ AZURE_API_KEY=""
48
+ AZURE_API_ENDPOINT=""
49
+ AZURE_API_DEPLOYNAME=""
50
+
51
+ python3 videollama2/new_eval/eval_video_cap_msvc_correctness.py \
52
+ --pred-path $output_file \
53
+ --output-dir ${OUTPUT_DIR}/MSVC/answers/${CKPT_NAME}/correctness_gpt \
54
+ --output-json ${OUTPUT_DIR}/MSVC/answers/${CKPT_NAME}/correctness_results.json \
55
+ --api-key $AZURE_API_KEY \
56
+ --api-endpoint $AZURE_API_ENDPOINT \
57
+ --api-deployname $AZURE_API_DEPLOYNAME \
58
+ --num-tasks 4 \
59
+
60
+ python3 videollama2/new_eval/eval_video_cap_msvc_detailedness.py \
61
+ --pred-path $output_file \
62
+ --output-dir ${OUTPUT_DIR}/MSVC/answers/${CKPT_NAME}/detailedness_gpt \
63
+ --output-json ${OUTPUT_DIR}/MSVC/answers/${CKPT_NAME}/detailedness_results.json \
64
+ --api-key $AZURE_API_KEY \
65
+ --api-endpoint $AZURE_API_ENDPOINT \
66
+ --api-deployname $AZURE_API_DEPLOYNAME \
67
+ --num-tasks 4 \
VideoLLaMA2/scripts/eval/eval_video_mcqa_egoschema.sh ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ EVAL_DATA_DIR=eval
4
+ OUTPUT_DIR=eval_output
5
+ CKPT=DAMO-NLP-SG/VideoLLaMA2-7B
6
+ CKPT_NAME=$(echo $CKPT | rev | cut -d'/' -f1 | rev)
7
+
8
+ gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
9
+ IFS=',' read -ra GPULIST <<< "$gpu_list"
10
+
11
+ # divide data via the number of GPUs per task
12
+ GPUS_PER_TASK=1
13
+ CHUNKS=$((${#GPULIST[@]}/$GPUS_PER_TASK))
14
+
15
+ output_file=${OUTPUT_DIR}/egoschema/answers/${CKPT_NAME}/merge.csv
16
+
17
+ if [ ! -f "$output_file" ]; then
18
+ for IDX in $(seq 0 $((CHUNKS-1))); do
19
+ # select the GPUs for the task
20
+ gpu_devices=$(IFS=,; echo "${GPULIST[*]:$(($IDX*$GPUS_PER_TASK)):$GPUS_PER_TASK}")
21
+ TRANSFORMERS_OFFLINE=1 CUDA_VISIBLE_DEVICES=${gpu_devices} python3 videollama2/eval/inference_video_mcqa_egoschema.py \
22
+ --model-path ${CKPT} \
23
+ --video-folder ${EVAL_DATA_DIR}/egoschema/good_clips_git \
24
+ --question-file ${EVAL_DATA_DIR}/egoschema/questions.json \
25
+ --answer-file ${OUTPUT_DIR}/egoschema/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.csv \
26
+ --num-chunks $CHUNKS \
27
+ --chunk-idx $IDX &
28
+ done
29
+
30
+ wait
31
+
32
+ # Clear out the output file if it exists.
33
+ > "$output_file"
34
+
35
+ echo 'q_uid, answer' >> "$output_file"
36
+
37
+ # Loop through the indices and concatenate each file.
38
+ for IDX in $(seq 0 $((CHUNKS-1))); do
39
+ cat ${OUTPUT_DIR}/egoschema/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.csv >> "$output_file"
40
+ done
41
+ fi
VideoLLaMA2/scripts/eval/eval_video_mcqa_mvbench.sh ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ EVAL_DATA_DIR=eval
4
+ OUTPUT_DIR=eval_output
5
+ CKPT=DAMO-NLP-SG/VideoLLaMA2-7B
6
+ CKPT_NAME=$(echo $CKPT | rev | cut -d'/' -f1 | rev)
7
+
8
+ gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
9
+ IFS=',' read -ra GPULIST <<< "$gpu_list"
10
+
11
+ # divide data via the number of GPUs per task
12
+ GPUS_PER_TASK=1
13
+ CHUNKS=$((${#GPULIST[@]}/$GPUS_PER_TASK))
14
+
15
+ output_file=${OUTPUT_DIR}/mvbench/answers/${CKPT_NAME}/merge.json
16
+
17
+ # judge if the number of json lines is 0
18
+ if [ ! -f "$output_file" ] || [ $(cat "$output_file" | wc -l) -eq 0 ]; then
19
+ rm -f ${OUTPUT_DIR}/mvbench/answers/${CKPT_NAME}/*.json
20
+ fi
21
+
22
+ if [ ! -f "$output_file" ]; then
23
+ for IDX in $(seq 0 $((CHUNKS-1))); do
24
+ gpu_devices=$(IFS=,; echo "${GPULIST[*]:$(($IDX*$GPUS_PER_TASK)):$GPUS_PER_TASK}")
25
+ TRANSFORMERS_OFFLINE=1 CUDA_VISIBLE_DEVICES=${gpu_devices} python3 videollama2/eval/inference_video_mcqa_mvbench.py \
26
+ --model-path ${CKPT} \
27
+ --video-folder ${EVAL_DATA_DIR}/mvbench/video \
28
+ --question-file ${EVAL_DATA_DIR}/mvbench/json \
29
+ --answer-file ${OUTPUT_DIR}/mvbench/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.json \
30
+ --num-chunks $CHUNKS \
31
+ --chunk-idx $IDX &
32
+ done
33
+
34
+ wait
35
+
36
+ # Clear out the output file if it exists.
37
+ > "$output_file"
38
+
39
+ # Loop through the indices and concatenate each file.
40
+ for IDX in $(seq 0 $((CHUNKS-1))); do
41
+ cat ${OUTPUT_DIR}/mvbench/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.json >> "$output_file"
42
+ done
43
+ fi
44
+
45
+ python3 videollama2/eval/eval_video_mcqa_mvbench.py \
46
+ --pred_path ${output_file} \
VideoLLaMA2/scripts/eval/eval_video_mcqa_perception_test_mcqa.sh ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ EVAL_DATA_DIR=eval
4
+ OUTPUT_DIR=eval_output
5
+ CKPT=DAMO-NLP-SG/VideoLLaMA2-7B
6
+ CKPT_NAME=$(echo $CKPT | rev | cut -d'/' -f1 | rev)
7
+
8
+ gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
9
+ IFS=',' read -ra GPULIST <<< "$gpu_list"
10
+
11
+ # divide data via the number of GPUs per task
12
+ GPUS_PER_TASK=1
13
+ CHUNKS=$((${#GPULIST[@]}/$GPUS_PER_TASK))
14
+
15
+ output_file=${OUTPUT_DIR}/perception_test_mcqa/answers/${CKPT_NAME}/merge.json
16
+
17
+ if [ ! -f "$output_file" ]; then
18
+ for IDX in $(seq 0 $((CHUNKS-1))); do
19
+ # select the GPUs for the task
20
+ gpu_devices=$(IFS=,; echo "${GPULIST[*]:$(($IDX*$GPUS_PER_TASK)):$GPUS_PER_TASK}")
21
+ TRANSFORMERS_OFFLINE=1 CUDA_VISIBLE_DEVICES=${gpu_devices} python3 videollama2/eval/inference_video_mcqa_perception_test_mcqa.py \
22
+ --model-path ${CKPT} \
23
+ --video-folder ${EVAL_DATA_DIR}/perception_test_mcqa/videos \
24
+ --question-file ${EVAL_DATA_DIR}/perception_test_mcqa/mc_question_test.json \
25
+ --answer-file ${OUTPUT_DIR}/perception_test_mcqa/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.json \
26
+ --num-chunks $CHUNKS \
27
+ --chunk-idx $IDX &
28
+ done
29
+
30
+ wait
31
+
32
+ # Clear out the output file if it exists.
33
+ > "$output_file"
34
+
35
+ echo "{" >> "$output_file"
36
+
37
+ # Loop through the indices and concatenate each file.
38
+ for IDX in $(seq 0 $((CHUNKS-1))); do
39
+ cat ${OUTPUT_DIR}/perception_test_mcqa/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.json >> "$output_file"
40
+ done
41
+
42
+ sed -i '$s/.$//' $output_file
43
+
44
+ echo "}" >> "$output_file"
45
+ fi
VideoLLaMA2/scripts/eval/eval_video_mcqa_videomme.sh ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ EVAL_DATA_DIR=eval
4
+ OUTPUT_DIR=eval_output
5
+ CKPT=DAMO-NLP-SG/VideoLLaMA2-7B-16F
6
+ CKPT_NAME=$(echo $CKPT | rev | cut -d'/' -f1 | rev)
7
+
8
+ gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
9
+ IFS=',' read -ra GPULIST <<< "$gpu_list"
10
+
11
+ # divide data via the number of GPUs per task
12
+ GPUS_PER_TASK=1
13
+ CHUNKS=$((${#GPULIST[@]}/$GPUS_PER_TASK))
14
+
15
+ output_file=${OUTPUT_DIR}/videomme/answers/${CKPT_NAME}/merge.json
16
+ output_sub_file=${OUTPUT_DIR}/videomme/answers/${CKPT_NAME}/merge_sub.json
17
+
18
+ # judge if the number of json lines is 0
19
+ if [ ! -f "$output_file" ] || [ $(cat "$output_file" | wc -l) -eq 0 ]; then
20
+ rm -f ${OUTPUT_DIR}/videomme/answers/${CKPT_NAME}/*.json
21
+ fi
22
+
23
+
24
+ if [ ! -f "$output_file" ]; then
25
+ for IDX in $(seq 0 $((CHUNKS-1))); do
26
+ # select the GPUs for the task
27
+ gpu_devices=$(IFS=,; echo "${GPULIST[*]:$(($IDX*$GPUS_PER_TASK)):$GPUS_PER_TASK}")
28
+ TRANSFORMERS_OFFLINE=1 CUDA_VISIBLE_DEVICES=${gpu_devices} python3 videollama2/eval/inference_video_mcqa_videomme.py \
29
+ --model-path ${CKPT} \
30
+ --video-folder ${EVAL_DATA_DIR}/videomme/videos \
31
+ --subtitle-folder ${EVAL_DATA_DIR}/videomme/subtitles \
32
+ --question-file ${EVAL_DATA_DIR}/videomme/test-00000-of-00001.parquet \
33
+ --answer-file ${OUTPUT_DIR}/videomme/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.json \
34
+ --num-chunks $CHUNKS \
35
+ --chunk-idx $IDX &
36
+ done
37
+
38
+ wait
39
+
40
+ # Clear out the output file if it exists.
41
+ > "$output_file"
42
+
43
+ echo "[" >> "$output_file"
44
+
45
+ #Loop through the indices and concatenate each file.
46
+ for IDX in $(seq 0 $((CHUNKS-1))); do
47
+ cat ${OUTPUT_DIR}/videomme/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.json >> "$output_file"
48
+ done
49
+
50
+ sed -i '$s/.$//' $output_file
51
+
52
+ echo "]" >> "$output_file"
53
+
54
+ # Clear out the output file if it exists.
55
+ > "$output_sub_file"
56
+
57
+ echo "[" >> "$output_sub_file"
58
+
59
+ #Loop through the indices and concatenate each file.
60
+ for IDX in $(seq 0 $((CHUNKS-1))); do
61
+ cat ${OUTPUT_DIR}/videomme/answers/${CKPT_NAME}/${CHUNKS}_${IDX}_sub.json >> "$output_sub_file"
62
+ done
63
+
64
+ sed -i '$s/.$//' $output_sub_file
65
+
66
+ echo "]" >> "$output_sub_file"
67
+ fi
68
+
69
+
70
+ python videollama2/eval/eval_video_mcqa_videomme.py \
71
+ --results_file $output_file \
72
+ --video_duration_type "short,medium,long" \
73
+ --return_categories_accuracy \
74
+ --return_sub_categories_accuracy \
75
+ --return_task_types_accuracy \
76
+ --skip_missing \
77
+
78
+ python videollama2/eval/eval_video_mcqa_videomme.py \
79
+ --results_file $output_sub_file \
80
+ --video_duration_type "short,medium,long" \
81
+ --return_categories_accuracy \
82
+ --return_sub_categories_accuracy \
83
+ --return_task_types_accuracy \
84
+ --skip_missing \
VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_1_correctness.sh ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ EVAL_DATA_DIR=dataset/videollm_eval
4
+ OUTPUT_DIR=eval
5
+ CKPT=DAMO-NLP-SG/VideoLLaMA2-7B
6
+ CKPT_NAME=$(echo $CKPT | rev | cut -d'/' -f1 | rev)
7
+
8
+ gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
9
+ IFS=',' read -ra GPULIST <<< "$gpu_list"
10
+
11
+ # divide data via the number of GPUs per task
12
+ GPUS_PER_TASK=1
13
+ CHUNKS=$((${#GPULIST[@]}/$GPUS_PER_TASK))
14
+
15
+ output_file=${OUTPUT_DIR}/videochatgpt_gen/answers/correctness/${CKPT_NAME}/merge.json
16
+
17
+ if [ ! -f "$output_file" ]; then
18
+ for IDX in $(seq 0 $((CHUNKS-1))); do
19
+ # select the GPUs for the task
20
+ gpu_devices=$(IFS=,; echo "${GPULIST[*]:$(($IDX*$GPUS_PER_TASK)):$GPUS_PER_TASK}")
21
+ TRANSFORMERS_OFFLINE=1 CUDA_VISIBLE_DEVICES=${gpu_devices} python3 videollama2/new_eval/inference_video_oqa_vcgpt_general.py \
22
+ --model-path ${CKPT} \
23
+ --video-folder ${EVAL_DATA_DIR}/videochatgpt_gen/Test_Videos \
24
+ --question-file ${EVAL_DATA_DIR}/videochatgpt_gen/generic_qa.json \
25
+ --answer-file ${OUTPUT_DIR}/videochatgpt_gen/answers/correctness/${CKPT_NAME}/${CHUNKS}_${IDX}.json \
26
+ --num-chunks $CHUNKS \
27
+ --chunk-idx $IDX &
28
+ done
29
+
30
+ wait
31
+
32
+ # Clear out the output file if it exists.
33
+ > "$output_file"
34
+
35
+ #Loop through the indices and concatenate each file.
36
+ for IDX in $(seq 0 $((CHUNKS-1))); do
37
+ cat ${OUTPUT_DIR}/videochatgpt_gen/answers/correctness/${CKPT_NAME}/${CHUNKS}_${IDX}.json >> "$output_file"
38
+ done
39
+
40
+ mkdir -p ${OUTPUT_DIR}/videochatgpt_gen/answers/detail/${CKPT_NAME}
41
+ mkdir -p ${OUTPUT_DIR}/videochatgpt_gen/answers/context/${CKPT_NAME}
42
+ cp ${output_file} ${OUTPUT_DIR}/videochatgpt_gen/answers/detail/${CKPT_NAME}/merge.json
43
+ cp ${output_file} ${OUTPUT_DIR}/videochatgpt_gen/answers/context/${CKPT_NAME}/merge.json
44
+ fi
45
+
46
+
47
+ AZURE_API_KEY=your_key
48
+ AZURE_API_ENDPOINT=your_endpoint
49
+ AZURE_API_DEPLOYNAME=your_deployname
50
+
51
+ python3 videollama2/new_eval/eval_video_oqa_vcgpt_1_correctness.py \
52
+ --pred-path ${output_file} \
53
+ --output-dir ${OUTPUT_DIR}/videochatgpt_gen/answers/correctness/${CKPT_NAME}/gpt \
54
+ --output-json ${OUTPUT_DIR}/videochatgpt_gen/answers/correctness/${CKPT_NAME}/results.json \
55
+ --api-key $AZURE_API_KEY \
56
+ --api-endpoint $AZURE_API_ENDPOINT \
57
+ --api-deployname $AZURE_API_DEPLOYNAME \
58
+ --num-tasks 4
VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_2_detail.sh ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ EVAL_DATA_DIR=dataset/videollm_eval
4
+ OUTPUT_DIR=eval
5
+ CKPT=DAMO-NLP-SG/VideoLLaMA2-7B
6
+ CKPT_NAME=$(echo $CKPT | rev | cut -d'/' -f1 | rev)
7
+
8
+ gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
9
+ IFS=',' read -ra GPULIST <<< "$gpu_list"
10
+
11
+ # divide data via the number of GPUs per task
12
+ GPUS_PER_TASK=1
13
+ CHUNKS=$((${#GPULIST[@]}/$GPUS_PER_TASK))
14
+
15
+ output_file=${OUTPUT_DIR}/videochatgpt_gen/answers/detail/${CKPT_NAME}/merge.json
16
+
17
+ if [ ! -f "$output_file" ]; then
18
+ for IDX in $(seq 0 $((CHUNKS-1))); do
19
+ # select the GPUs for the task
20
+ gpu_devices=$(IFS=,; echo "${GPULIST[*]:$(($IDX*$GPUS_PER_TASK)):$GPUS_PER_TASK}")
21
+ TRANSFORMERS_OFFLINE=1 CUDA_VISIBLE_DEVICES=${gpu_devices} python3 videollama2/eval/run_inference_video_qa_gpt_general.py \
22
+ --model-path ${CKPT} \
23
+ --video-folder ${EVAL_DATA_DIR}/videochatgpt_gen/Test_Videos \
24
+ --question-file ${EVAL_DATA_DIR}/videochatgpt_gen/generic_qa.json \
25
+ --answer-file ${OUTPUT_DIR}/videochatgpt_gen/answers/detail/${CKPT_NAME}/${CHUNKS}_${IDX}.json \
26
+ --num-chunks $CHUNKS \
27
+ --chunk-idx $IDX &
28
+ done
29
+
30
+ wait
31
+
32
+ # Clear out the output file if it exists.
33
+ > "$output_file"
34
+
35
+ #Loop through the indices and concatenate each file.
36
+ for IDX in $(seq 0 $((CHUNKS-1))); do
37
+ cat ${OUTPUT_DIR}/videochatgpt_gen/answers/detail/${CKPT_NAME}/${CHUNKS}_${IDX}.json >> "$output_file"
38
+ done
39
+
40
+ mkdir -p ${OUTPUT_DIR}/videochatgpt_gen/answers/correctness/${CKPT_NAME}
41
+ mkdir -p ${OUTPUT_DIR}/videochatgpt_gen/answers/context/${CKPT_NAME}
42
+ cp ${output_file} ${OUTPUT_DIR}/videochatgpt_gen/answers/correctness/${CKPT_NAME}/merge.json
43
+ cp ${output_file} ${OUTPUT_DIR}/videochatgpt_gen/answers/context/${CKPT_NAME}/merge.json
44
+ fi
45
+
46
+
47
+ AZURE_API_KEY=your_key
48
+ AZURE_API_ENDPOINT=your_endpoint
49
+ AZURE_API_DEPLOYNAME=your_deployname
50
+
51
+ python3 videollama2/eval/eval_benchmark_2_detailed_orientation.py \
52
+ --pred-path ${output_file} \
53
+ --output-dir ${OUTPUT_DIR}/videochatgpt_gen/answers/detail/${CKPT_NAME}/gpt \
54
+ --output-json ${OUTPUT_DIR}/videochatgpt_gen/answers/detail/${CKPT_NAME}/results.json \
55
+ --api-key "35632dae7dd94d0a93338db373c63893" \
56
+ --api-endpoint https://damo-openai-gpt4v-test.openai.azure.com \
57
+ --api-deployname gpt-35-turbo \
58
+ --num-tasks 4
VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_3_context.sh ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ EVAL_DATA_DIR=dataset/videollm_eval
4
+ OUTPUT_DIR=eval
5
+ CKPT=DAMO-NLP-SG/VideoLLaMA2-7B
6
+ CKPT_NAME=$(echo $CKPT | rev | cut -d'/' -f1 | rev)
7
+
8
+ gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
9
+ IFS=',' read -ra GPULIST <<< "$gpu_list"
10
+
11
+ # divide data via the number of GPUs per task
12
+ GPUS_PER_TASK=1
13
+ CHUNKS=$((${#GPULIST[@]}/$GPUS_PER_TASK))
14
+
15
+ output_file=${OUTPUT_DIR}/videochatgpt_gen/answers/context/${CKPT_NAME}/merge.json
16
+
17
+ if [ ! -f "$output_file" ]; then
18
+ for IDX in $(seq 0 $((CHUNKS-1))); do
19
+ # select the GPUs for the task
20
+ gpu_devices=$(IFS=,; echo "${GPULIST[*]:$(($IDX*$GPUS_PER_TASK)):$GPUS_PER_TASK}")
21
+ TRANSFORMERS_OFFLINE=1 CUDA_VISIBLE_DEVICES=${gpu_devices} python3 videollama2/eval/run_inference_video_qa_gpt_general.py \
22
+ --model-path ${CKPT} \
23
+ --video-folder ${EVAL_DATA_DIR}/videochatgpt_gen/Test_Videos \
24
+ --question-file ${EVAL_DATA_DIR}/videochatgpt_gen/generic_qa.json \
25
+ --answer-file ${OUTPUT_DIR}/videochatgpt_gen/answers/detail/${CKPT_NAME}/${CHUNKS}_${IDX}.json \
26
+ --num-chunks $CHUNKS \
27
+ --chunk-idx $IDX &
28
+ done
29
+
30
+ wait
31
+
32
+ # Clear out the output file if it exists.
33
+ > "$output_file"
34
+
35
+ #Loop through the indices and concatenate each file.
36
+ for IDX in $(seq 0 $((CHUNKS-1))); do
37
+ cat ${OUTPUT_DIR}/videochatgpt_gen/answers/context/${CKPT_NAME}/${CHUNKS}_${IDX}.json >> "$output_file"
38
+ done
39
+
40
+ mkdir -p ${OUTPUT_DIR}/videochatgpt_gen/answers/correctness/${CKPT_NAME}
41
+ mkdir -p ${OUTPUT_DIR}/videochatgpt_gen/answers/detail/${CKPT_NAME}
42
+ cp ${output_file} ${OUTPUT_DIR}/videochatgpt_gen/answers/correctness/${CKPT_NAME}/merge.json
43
+ cp ${output_file} ${OUTPUT_DIR}/videochatgpt_gen/answers/detail/${CKPT_NAME}/merge.json
44
+ fi
45
+
46
+
47
+ AZURE_API_KEY=your_key
48
+ AZURE_API_ENDPOINT=your_endpoint
49
+ AZURE_API_DEPLOYNAME=your_deployname
50
+
51
+ python3 videollama2/eval/eval_benchmark_3_context.py \
52
+ --pred-path ${output_file} \
53
+ --output-dir ${OUTPUT_DIR}/videochatgpt_gen/answers/context/${CKPT_NAME}/gpt \
54
+ --output-json ${OUTPUT_DIR}/videochatgpt_gen/answers/context/${CKPT_NAME}/results.json \
55
+ --api-key $AZURE_API_KEY \
56
+ --api-endpoint $AZURE_API_ENDPOINT \
57
+ --api-deployname $AZURE_API_DEPLOYNAME \
58
+ --num-tasks 4
VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_4_temporal.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ EVAL_DATA_DIR=eval
4
+ OUTPUT_DIR=eval_output
5
+ CKPT=DAMO-NLP-SG/VideoLLaMA2-7B
6
+ CKPT_NAME=$(echo $CKPT | rev | cut -d'/' -f1 | rev)
7
+
8
+ gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
9
+ IFS=',' read -ra GPULIST <<< "$gpu_list"
10
+
11
+ # divide data via the number of GPUs per task
12
+ GPUS_PER_TASK=1
13
+ CHUNKS=$((${#GPULIST[@]}/$GPUS_PER_TASK))
14
+
15
+ output_file=${OUTPUT_DIR}/videochatgpt_gen/answers/temporal/${CKPT_NAME}/merge.json
16
+
17
+ # if output_file not exists then inference
18
+ if [ ! -f "$output_file" ]; then
19
+ for IDX in $(seq 0 $((CHUNKS-1))); do
20
+ # select the GPUs for the task
21
+ gpu_devices=$(IFS=,; echo "${GPULIST[*]:$(($IDX*$GPUS_PER_TASK)):$GPUS_PER_TASK}")
22
+ TRANSFORMERS_OFFLINE=1 CUDA_VISIBLE_DEVICES=${gpu_devices} python3 videollama2/eval/inference_video_oqa_vcgpt_general.py \
23
+ --model-path ${CKPT} \
24
+ --video-folder ${EVAL_DATA_DIR}/videochatgpt_gen/Test_Videos \
25
+ --question-file ${EVAL_DATA_DIR}/videochatgpt_gen/temporal_qa.json \
26
+ --answer-file ${OUTPUT_DIR}/videochatgpt_gen/answers/temporal/${CKPT_NAME}/${CHUNKS}_${IDX}.json \
27
+ --num-chunks $CHUNKS \
28
+ --chunk-idx $IDX &
29
+ done
30
+
31
+ wait
32
+
33
+ # Clear out the output file if it exists.
34
+ > "$output_file"
35
+
36
+ #Loop through the indices and concatenate each file.
37
+ for IDX in $(seq 0 $((CHUNKS-1))); do
38
+ cat ${OUTPUT_DIR}/videochatgpt_gen/answers/temporal/${CKPT_NAME}/${CHUNKS}_${IDX}.json >> "$output_file"
39
+ done
40
+ fi
41
+
42
+
43
+ AZURE_API_KEY=a7f9bc087b7143a69d59a68f01a2b450
44
+ AZURE_API_ENDPOINT=https://vl-australiaeast.openai.azure.com
45
+ AZURE_API_DEPLOYNAME=gpt35-turbo-0613
46
+
47
+ python3 videollama2/eval/eval_video_oqa_vcgpt_4_temporal.py \
48
+ --pred-path ${output_file} \
49
+ --output-dir ${OUTPUT_DIR}/videochatgpt_gen/answers/temporal/${CKPT_NAME}/gpt \
50
+ --output-json ${OUTPUT_DIR}/videochatgpt_gen/answers/temporal/${CKPT_NAME}/results.json \
51
+ --api-key $AZURE_API_KEY \
52
+ --api-endpoint $AZURE_API_ENDPOINT \
53
+ --api-deployname $AZURE_API_DEPLOYNAME \
54
+ --num-tasks 4
VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_5_consistency.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ EVAL_DATA_DIR=eval
4
+ OUTPUT_DIR=eval_output
5
+ CKPT=DAMO-NLP-SG/VideoLLaMA2-7B
6
+ CKPT_NAME=$(echo $CKPT | rev | cut -d'/' -f1 | rev)
7
+
8
+ gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
9
+ IFS=',' read -ra GPULIST <<< "$gpu_list"
10
+
11
+ # divide data via the number of GPUs per task
12
+ GPUS_PER_TASK=1
13
+ CHUNKS=$((${#GPULIST[@]}/$GPUS_PER_TASK))
14
+
15
+ output_file=${OUTPUT_DIR}/videochatgpt_gen/answers/consistency/${CKPT_NAME}/merge.json
16
+
17
+ # if output_file not exists then inference
18
+ if [ ! -f "$output_file" ]; then
19
+ for IDX in $(seq 0 $((CHUNKS-1))); do
20
+ # select the GPUs for the task
21
+ gpu_devices=$(IFS=,; echo "${GPULIST[*]:$(($IDX*$GPUS_PER_TASK)):$GPUS_PER_TASK}")
22
+ TRANSFORMERS_OFFLINE=1 CUDA_VISIBLE_DEVICES=${gpu_devices} python3 videollama2/eval/inference_video_oqa_vcgpt_consistency.py \
23
+ --model-path ${CKPT} \
24
+ --video-folder ${EVAL_DATA_DIR}/videochatgpt_gen/Test_Videos \
25
+ --question-file ${EVAL_DATA_DIR}/videochatgpt_gen/consistency_qa.json \
26
+ --answer-file ${OUTPUT_DIR}/videochatgpt_gen/answers/consistency/${CKPT_NAME}/${CHUNKS}_${IDX}.json \
27
+ --num-chunks $CHUNKS \
28
+ --chunk-idx $IDX &
29
+ done
30
+
31
+ wait
32
+
33
+ # Clear out the output file if it exists.
34
+ > "$output_file"
35
+
36
+ #Loop through the indices and concatenate each file.
37
+ for IDX in $(seq 0 $((CHUNKS-1))); do
38
+ cat ${OUTPUT_DIR}/videochatgpt_gen/answers/consistency/${CKPT_NAME}/${CHUNKS}_${IDX}.json >> "$output_file"
39
+ done
40
+ fi
41
+
42
+
43
+ AZURE_API_KEY=your_key
44
+ AZURE_API_ENDPOINT=your_endpoint
45
+ AZURE_API_DEPLOYNAME=your_deployname
46
+
47
+ python3 videollama2/eval/eval_video_oqa_vcgpt_5_consistency.py \
48
+ --pred-path ${output_file} \
49
+ --output-dir ${OUTPUT_DIR}/videochatgpt_gen/answers/consistency/${CKPT_NAME}/gpt \
50
+ --output-json ${OUTPUT_DIR}/videochatgpt_gen/answers/consistency/${CKPT_NAME}/results.json \
51
+ --api-key $AZURE_API_KEY \
52
+ --api-endpoint $AZURE_API_ENDPOINT \
53
+ --api-deployname $AZURE_API_DEPLOYNAME \
54
+ --num-tasks 4
VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_activitynet.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ EVAL_DATA_DIR=eval
4
+ OUTPUT_DIR=eval_output
5
+ CKPT=DAMO-NLP-SG/VideoLLaMA2-7B
6
+ CKPT_NAME=$(echo $CKPT | rev | cut -d'/' -f1 | rev)
7
+
8
+ gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
9
+ IFS=',' read -ra GPULIST <<< "$gpu_list"
10
+
11
+ # divide data via the number of GPUs per task
12
+ GPUS_PER_TASK=1
13
+ CHUNKS=$((${#GPULIST[@]}/$GPUS_PER_TASK))
14
+
15
+ output_file=${OUTPUT_DIR}/Activitynet_Zero_Shot_QA/answers/${CKPT_NAME}/merge.json
16
+
17
+ if [ ! -f "$output_file" ]; then
18
+ for IDX in $(seq 0 $((CHUNKS-1))); do
19
+ # select the GPUs for the task
20
+ gpu_devices=$(IFS=,; echo "${GPULIST[*]:$(($IDX*$GPUS_PER_TASK)):$GPUS_PER_TASK}")
21
+ TRANSFORMERS_OFFLINE=1 CUDA_VISIBLE_DEVICES=${gpu_devices} python3 videollama2/eval/inference_video_oqa_activitynet.py \
22
+ --model-path ${CKPT} \
23
+ --video-folder ${EVAL_DATA_DIR}/Activitynet_Zero_Shot_QA/all_test \
24
+ --question-file ${EVAL_DATA_DIR}/Activitynet_Zero_Shot_QA/test_q.json \
25
+ --answer-file ${EVAL_DATA_DIR}/Activitynet_Zero_Shot_QA/test_a.json \
26
+ --output-file ${OUTPUT_DIR}/Activitynet_Zero_Shot_QA/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.json \
27
+ --num-chunks $CHUNKS \
28
+ --chunk-idx $IDX &
29
+ done
30
+
31
+ wait
32
+
33
+ # Clear out the output file if it exists.
34
+ > "$output_file"
35
+
36
+ #Loop through the indices and concatenate each file.
37
+ for IDX in $(seq 0 $((CHUNKS-1))); do
38
+ cat ${OUTPUT_DIR}/Activitynet_Zero_Shot_QA/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.json >> "$output_file"
39
+ done
40
+ fi
41
+
42
+
43
+ AZURE_API_KEY=your_key
44
+ AZURE_API_ENDPOINT=your_endpoint
45
+ AZURE_API_DEPLOYNAME=your_deployname
46
+
47
+ python3 videollama2/eval/eval_video_oqa_activitynet.py \
48
+ --pred-path ${output_file} \
49
+ --output-dir ${OUTPUT_DIR}/Activitynet_Zero_Shot_QA/answers/${CKPT_NAME}/gpt \
50
+ --output-json ${OUTPUT_DIR}/Activitynet_Zero_Shot_QA/answers/${CKPT_NAME}/results.json \
51
+ --api-key $AZURE_API_KEY \
52
+ --api-endpoint $AZURE_API_ENDPOINT \
53
+ --api-deployname $AZURE_API_DEPLOYNAME \
54
+ --num-tasks 4
VideoLLaMA2/scripts/eval/eval_video_oqa_vcgpt_msvd.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ set -x
2
+
3
+ EVAL_DATA_DIR=eval
4
+ OUTPUT_DIR=eval_output
5
+ CKPT=DAMO-NLP-SG/VideoLLaMA2-7B
6
+ CKPT_NAME=$(echo $CKPT | rev | cut -d'/' -f1 | rev)
7
+
8
+ gpu_list="${CUDA_VISIBLE_DEVICES:-0}"
9
+ IFS=',' read -ra GPULIST <<< "$gpu_list"
10
+
11
+ # divide data via the number of GPUs per task
12
+ GPUS_PER_TASK=1
13
+ CHUNKS=$((${#GPULIST[@]}/$GPUS_PER_TASK))
14
+
15
+ output_file=${OUTPUT_DIR}/MSVD_Zero_Shot_QA/answers/${CKPT_NAME}/merge.json
16
+
17
+ if [ ! -f "$output_file" ]; then
18
+ for IDX in $(seq 0 $((CHUNKS-1))); do
19
+ # select the GPUs for the task
20
+ gpu_devices=$(IFS=,; echo "${GPULIST[*]:$(($IDX*$GPUS_PER_TASK)):$GPUS_PER_TASK}")
21
+ TRANSFORMERS_OFFLINE=1 CUDA_VISIBLE_DEVICES=${gpu_devices} python3 videollama2/eval/inference_video_oqa_activitynet.py \
22
+ --model-path ${CKPT} \
23
+ --video-folder ${EVAL_DATA_DIR}/MSVD_Zero_Shot_QA/videos \
24
+ --question-file ${EVAL_DATA_DIR}/MSVD_Zero_Shot_QA/test_q.json \
25
+ --answer-file ${EVAL_DATA_DIR}/MSVD_Zero_Shot_QA/test_a.json \
26
+ --output-file ${OUTPUT_DIR}/MSVD_Zero_Shot_QA/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.json \
27
+ --num-chunks $CHUNKS \
28
+ --chunk-idx $IDX &
29
+ done
30
+
31
+ wait
32
+
33
+ # Clear out the output file if it exists.
34
+ > "$output_file"
35
+
36
+ #Loop through the indices and concatenate each file.
37
+ for IDX in $(seq 0 $((CHUNKS-1))); do
38
+ cat ${OUTPUT_DIR}/MSVD_Zero_Shot_QA/answers/${CKPT_NAME}/${CHUNKS}_${IDX}.json >> "$output_file"
39
+ done
40
+ fi
41
+
42
+
43
+ AZURE_API_KEY=your_key
44
+ AZURE_API_ENDPOINT=your_endpoint
45
+ AZURE_API_DEPLOYNAME=your_deployname
46
+
47
+ python3 videollama2/eval/eval_video_oqa_activitynet.py \
48
+ --pred-path ${output_file} \
49
+ --output-dir ${OUTPUT_DIR}/MSVD_Zero_Shot_QA/answers/${CKPT_NAME}/gpt \
50
+ --output-json ${OUTPUT_DIR}/MSVD_Zero_Shot_QA/answers/${CKPT_NAME}/results.json \
51
+ --api-key $AZURE_API_KEY \
52
+ --api-endpoint $AZURE_API_ENDPOINT \
53
+ --api-deployname $AZURE_API_DEPLOYNAME \
54
+ --num-tasks 4
VideoLLaMA2/scripts/siglip/finetune_gemma2.sh ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16667
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=128
26
+ LOCAL_BATCH_SIZE=4
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+ echo $GRADIENT_ACCUMULATION_STEPS
29
+
30
+ # Log Arguments
31
+ export TRANSFORMERS_OFFLINE=1
32
+ export WANDB_PROJECT=videollama2gemma2_siglip
33
+ RUN_NAME=vllava_settings
34
+ DATA_DIR=datasets
35
+ OUTP_DIR=work_dirs
36
+
37
+ torchrun --nnodes $WORLD_SIZE \
38
+ --nproc_per_node $NPROC_PER_NODE \
39
+ --master_addr=$MASTER_ADDR \
40
+ --master_port=$MASTER_PORT \
41
+ --node_rank $RANK \
42
+ videollama2/train_flash_attn.py \
43
+ --deepspeed scripts/zero3.json \
44
+ --model_type videollama2_gemma2 \
45
+ --model_path google/gemma-2-2b-it \
46
+ --vision_tower google/siglip-so400m-patch14-384 \
47
+ --mm_projector_type stc_connector_v35 \
48
+ --pretrain_mm_mlp_adapter ${OUTP_DIR}/${WANDB_PROJECT}/pretrain_${RUN_NAME}/mm_projector.bin \
49
+ --data_path ${DATA_DIR}/videollava_sft/videochatgpt_llavaimage_tune.json \
50
+ --data_folder ${DATA_DIR}/videollava_sft/ \
51
+ --mm_vision_select_layer -2 \
52
+ --image_aspect_ratio pad \
53
+ --num_frames 8 \
54
+ --bf16 True \
55
+ --tf32 True \
56
+ --fp16 False \
57
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/finetune_${RUN_NAME} \
58
+ --num_train_epochs 3 \
59
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
60
+ --per_device_eval_batch_size 4 \
61
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
62
+ --evaluation_strategy "no" \
63
+ --save_strategy "steps" \
64
+ --save_steps 200 \
65
+ --save_total_limit 99 \
66
+ --learning_rate 2e-5 \
67
+ --weight_decay 0. \
68
+ --warmup_ratio 0.03 \
69
+ --lr_scheduler_type "cosine" \
70
+ --logging_steps 1 \
71
+ --model_max_length 2048 \
72
+ --gradient_checkpointing True \
73
+ --dataloader_num_workers 4 \
74
+ --report_to tensorboard \
75
+ --run_name finetune_$RUN_NAME \
VideoLLaMA2/scripts/siglip/finetune_mistral.sh ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16667
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=128
26
+ LOCAL_BATCH_SIZE=4
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+ echo $GRADIENT_ACCUMULATION_STEPS
29
+
30
+ # Log Arguments
31
+ export TRANSFORMERS_OFFLINE=1
32
+ export WANDB_PROJECT=videollama2mistral_siglip
33
+ RUN_NAME=vllava_settings
34
+ DATA_DIR=datasets
35
+ OUTP_DIR=work_dirs
36
+
37
+ torchrun --nnodes $WORLD_SIZE \
38
+ --nproc_per_node $NPROC_PER_NODE \
39
+ --master_addr=$MASTER_ADDR \
40
+ --master_port=$MASTER_PORT \
41
+ --node_rank $RANK \
42
+ videollama2/train_flash_attn.py \
43
+ --deepspeed scripts/zero3.json \
44
+ --model_type videollama2 \
45
+ --model_path mistralai/Mistral-7B-Instruct-v0.2 \
46
+ --vision_tower google/siglip-so400m-patch14-384 \
47
+ --mm_projector_type stc_connector_v35 \
48
+ --pretrain_mm_mlp_adapter ${OUTP_DIR}/${WANDB_PROJECT}/pretrain_${RUN_NAME}/mm_projector.bin \
49
+ --data_path ${DATA_DIR}/videollava_sft/videochatgpt_llavaimage_tune.json \
50
+ --data_folder ${DATA_DIR}/videollava_sft/ \
51
+ --mm_vision_select_layer -2 \
52
+ --image_aspect_ratio pad \
53
+ --num_frames 8 \
54
+ --bf16 True \
55
+ --tf32 True \
56
+ --fp16 False \
57
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/finetune_${RUN_NAME} \
58
+ --num_train_epochs 3 \
59
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
60
+ --per_device_eval_batch_size 4 \
61
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
62
+ --evaluation_strategy "no" \
63
+ --save_strategy "steps" \
64
+ --save_steps 200 \
65
+ --save_total_limit 99 \
66
+ --learning_rate 2e-5 \
67
+ --weight_decay 0. \
68
+ --warmup_ratio 0.03 \
69
+ --lr_scheduler_type "cosine" \
70
+ --logging_steps 1 \
71
+ --model_max_length 2048 \
72
+ --gradient_checkpointing True \
73
+ --dataloader_num_workers 4 \
74
+ --report_to wandb \
75
+ --run_name finetune_$RUN_NAME \
VideoLLaMA2/scripts/siglip/finetune_phi3.sh ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16667
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=128
26
+ LOCAL_BATCH_SIZE=4
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+ echo $GRADIENT_ACCUMULATION_STEPS
29
+
30
+ # Log Arguments
31
+ export TRANSFORMERS_OFFLINE=1
32
+ export WANDB_PROJECT=videollama2phi3_siglip
33
+ RUN_NAME=vllava_settings
34
+ DATA_DIR=datasets
35
+ OUTP_DIR=work_dirs
36
+
37
+ torchrun --nnodes $WORLD_SIZE \
38
+ --nproc_per_node $NPROC_PER_NODE \
39
+ --master_addr=$MASTER_ADDR \
40
+ --master_port=$MASTER_PORT \
41
+ --node_rank $RANK \
42
+ videollama2/train_flash_attn.py \
43
+ --deepspeed scripts/zero3.json \
44
+ --model_type videollama2_phi3 \
45
+ --model_path microsoft/Phi-3-mini-4k-instruct \
46
+ --vision_tower google/siglip-so400m-patch14-384 \
47
+ --mm_projector_type stc_connector_v35 \
48
+ --pretrain_mm_mlp_adapter ${OUTP_DIR}/${WANDB_PROJECT}/pretrain_${RUN_NAME}/mm_projector.bin \
49
+ --data_path ${DATA_DIR}/videollava_sft/videochatgpt_llavaimage_tune.json \
50
+ --data_folder ${DATA_DIR}/videollava_sft/ \
51
+ --mm_vision_select_layer -2 \
52
+ --image_aspect_ratio pad \
53
+ --num_frames 8 \
54
+ --bf16 True \
55
+ --tf32 True \
56
+ --fp16 False \
57
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/finetune_${RUN_NAME} \
58
+ --num_train_epochs 3 \
59
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
60
+ --per_device_eval_batch_size 4 \
61
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
62
+ --evaluation_strategy "no" \
63
+ --save_strategy "steps" \
64
+ --save_steps 200 \
65
+ --save_total_limit 99 \
66
+ --learning_rate 2e-5 \
67
+ --weight_decay 0. \
68
+ --warmup_ratio 0.03 \
69
+ --lr_scheduler_type "cosine" \
70
+ --logging_steps 1 \
71
+ --model_max_length 2048 \
72
+ --gradient_checkpointing True \
73
+ --dataloader_num_workers 4 \
74
+ --report_to tensorboard \
75
+ --run_name finetune_$RUN_NAME \
VideoLLaMA2/scripts/siglip/finetune_qwen2.sh ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16666
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=128
26
+ LOCAL_BATCH_SIZE=4
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+ echo $GRADIENT_ACCUMULATION_STEPS
29
+
30
+ # Log Arguments
31
+ export TRANSFORMERS_OFFLINE=1
32
+ export WANDB_PROJECT=videollama2qwen2_siglip
33
+ RUN_NAME=vllava_settings
34
+ DATA_DIR=datasets
35
+ OUTP_DIR=work_dirs
36
+
37
+ torchrun --nnodes $WORLD_SIZE \
38
+ --nproc_per_node $NPROC_PER_NODE \
39
+ --master_addr=$MASTER_ADDR \
40
+ --master_port=$MASTER_PORT \
41
+ --node_rank $RANK \
42
+ videollama2/train_flash_attn.py \
43
+ --deepspeed scripts/zero3.json \
44
+ --model_type videollama2_qwen2 \
45
+ --model_path Qwen/Qwen2-7B-Instruct \
46
+ --vision_tower google/siglip-so400m-patch14-384 \
47
+ --mm_projector_type stc_connector_v35 \
48
+ --pretrain_mm_mlp_adapter ${OUTP_DIR}/${WANDB_PROJECT}/pretrain_${RUN_NAME}/mm_projector.bin \
49
+ --data_path ${DATA_DIR}/videollava_sft/videochatgpt_llavaimage_tune.json \
50
+ --data_folder ${DATA_DIR}/videollava_sft/ \
51
+ --mm_vision_select_layer -2 \
52
+ --image_aspect_ratio pad \
53
+ --num_frames 8 \
54
+ --bf16 True \
55
+ --tf32 True \
56
+ --fp16 False \
57
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/finetune_${RUN_NAME} \
58
+ --num_train_epochs 1 \
59
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
60
+ --per_device_eval_batch_size 4 \
61
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
62
+ --evaluation_strategy "no" \
63
+ --save_strategy "steps" \
64
+ --save_steps 500 \
65
+ --save_total_limit 99 \
66
+ --learning_rate 2e-5 \
67
+ --weight_decay 0. \
68
+ --warmup_ratio 0.03 \
69
+ --lr_scheduler_type "cosine" \
70
+ --logging_steps 1 \
71
+ --model_max_length 2048 \
72
+ --gradient_checkpointing True \
73
+ --dataloader_num_workers 4 \
74
+ --report_to tensorboard \
75
+ --run_name $RUN_NAME \
VideoLLaMA2/scripts/siglip/pretrain_gemma2.sh ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16666
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=256
26
+ LOCAL_BATCH_SIZE=4
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+ echo $GRADIENT_ACCUMULATION_STEPS
29
+
30
+ # Log Arguments
31
+ export TRANSFORMERS_OFFLINE=1
32
+ export WANDB_PROJECT=videollama2gemma2_siglip
33
+ RUN_NAME=vllava_settings
34
+ DATA_DIR=datasets
35
+ OUTP_DIR=work_dirs
36
+
37
+ torchrun --nnodes $WORLD_SIZE \
38
+ --nproc_per_node $NPROC_PER_NODE \
39
+ --master_addr=$MASTER_ADDR \
40
+ --master_port=$MASTER_PORT \
41
+ --node_rank $RANK \
42
+ videollama2/train_flash_attn.py \
43
+ --deepspeed scripts/zero3.json \
44
+ --model_type videollama2_gemma2 \
45
+ --model_path google/gemma-2-2b-it \
46
+ --vision_tower google/siglip-so400m-patch14-384 \
47
+ --mm_projector_type stc_connector_v35 \
48
+ --tune_mm_mlp_adapter True \
49
+ --data_path ${DATA_DIR}/videollava_pt/valley_llavaimage.json \
50
+ --data_folder ${DATA_DIR}/videollava_pt/ \
51
+ --mm_vision_select_layer -2 \
52
+ --num_frames 8 \
53
+ --bf16 True \
54
+ --tf32 True \
55
+ --fp16 False \
56
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/pretrain_${RUN_NAME} \
57
+ --num_train_epochs 1 \
58
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
59
+ --per_device_eval_batch_size 4 \
60
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
61
+ --evaluation_strategy "no" \
62
+ --save_strategy "steps" \
63
+ --save_steps 500 \
64
+ --save_total_limit 99 \
65
+ --learning_rate 1e-3 \
66
+ --weight_decay 0. \
67
+ --warmup_ratio 0.03 \
68
+ --lr_scheduler_type "cosine" \
69
+ --logging_steps 1 \
70
+ --model_max_length 2048 \
71
+ --gradient_checkpointing True \
72
+ --dataloader_num_workers 4 \
73
+ --lazy_preprocess True \
74
+ --report_to tensorboard \
75
+ --run_name pretrain_$RUN_NAME \
VideoLLaMA2/scripts/siglip/pretrain_mistral.sh ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16666
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=256
26
+ LOCAL_BATCH_SIZE=8
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+ echo $GRADIENT_ACCUMULATION_STEPS
29
+
30
+ # Log Arguments
31
+ export TRANSFORMERS_OFFLINE=1
32
+ export WANDB_PROJECT=videollama2mistral_siglip
33
+ RUN_NAME=vllava_settings
34
+ DATA_DIR=datasets
35
+ OUTP_DIR=work_dirs
36
+
37
+ torchrun --nnodes $WORLD_SIZE \
38
+ --nproc_per_node $NPROC_PER_NODE \
39
+ --master_addr=$MASTER_ADDR \
40
+ --master_port=$MASTER_PORT \
41
+ --node_rank $RANK \
42
+ videollama2/train_flash_attn.py \
43
+ --deepspeed scripts/zero3.json \
44
+ --model_type videollama2 \
45
+ --model_path mistralai/Mistral-7B-Instruct-v0.2 \
46
+ --vision_tower google/siglip-so400m-patch14-384 \
47
+ --mm_projector_type stc_connector_v35 \
48
+ --tune_mm_mlp_adapter True \
49
+ --data_path ${DATA_DIR}/videollava_pt/valley_llavaimage.json \
50
+ --data_folder ${DATA_DIR}/videollava_pt/ \
51
+ --mm_vision_select_layer -2 \
52
+ --num_frames 8 \
53
+ --bf16 True \
54
+ --tf32 True \
55
+ --fp16 False \
56
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/pretrain_${RUN_NAME} \
57
+ --num_train_epochs 1 \
58
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
59
+ --per_device_eval_batch_size 4 \
60
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
61
+ --evaluation_strategy "no" \
62
+ --save_strategy "steps" \
63
+ --save_steps 500 \
64
+ --save_total_limit 99 \
65
+ --learning_rate 1e-3 \
66
+ --weight_decay 0. \
67
+ --warmup_ratio 0.03 \
68
+ --lr_scheduler_type "cosine" \
69
+ --logging_steps 1 \
70
+ --model_max_length 2048 \
71
+ --gradient_checkpointing True \
72
+ --dataloader_num_workers 16 \
73
+ --lazy_preprocess True \
74
+ --report_to tensorboard \
75
+ --run_name pretrain_$RUN_NAME \
VideoLLaMA2/scripts/siglip/pretrain_phi3.sh ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16666
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=256
26
+ LOCAL_BATCH_SIZE=8
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+ echo $GRADIENT_ACCUMULATION_STEPS
29
+
30
+ # Log Arguments
31
+ export TRANSFORMERS_OFFLINE=1
32
+ export WANDB_PROJECT=videollama2phi3_siglip
33
+ RUN_NAME=vllava_settings
34
+ DATA_DIR=datasets
35
+ OUTP_DIR=work_dirs
36
+
37
+ torchrun --nnodes $WORLD_SIZE \
38
+ --nproc_per_node $NPROC_PER_NODE \
39
+ --master_addr=$MASTER_ADDR \
40
+ --master_port=$MASTER_PORT \
41
+ --node_rank $RANK \
42
+ videollama2/train_flash_attn.py \
43
+ --deepspeed scripts/zero3.json \
44
+ --model_type videollama2_phi3 \
45
+ --model_path microsoft/Phi-3-mini-4k-instruct \
46
+ --vision_tower google/siglip-so400m-patch14-384 \
47
+ --mm_projector_type stc_connector_v35 \
48
+ --tune_mm_mlp_adapter True \
49
+ --data_path ${DATA_DIR}/videollava_pt/valley_llavaimage.json \
50
+ --data_folder ${DATA_DIR}/videollava_pt/ \
51
+ --mm_vision_select_layer -2 \
52
+ --num_frames 8 \
53
+ --bf16 True \
54
+ --tf32 True \
55
+ --fp16 False \
56
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/pretrain_${RUN_NAME} \
57
+ --num_train_epochs 1 \
58
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
59
+ --per_device_eval_batch_size 4 \
60
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
61
+ --evaluation_strategy "no" \
62
+ --save_strategy "steps" \
63
+ --save_steps 500 \
64
+ --save_total_limit 99 \
65
+ --learning_rate 1e-3 \
66
+ --weight_decay 0. \
67
+ --warmup_ratio 0.03 \
68
+ --lr_scheduler_type "cosine" \
69
+ --logging_steps 1 \
70
+ --model_max_length 2048 \
71
+ --gradient_checkpointing True \
72
+ --dataloader_num_workers 4 \
73
+ --lazy_preprocess True \
74
+ --report_to tensorboard \
75
+ --run_name pretrain_$RUN_NAME \
VideoLLaMA2/scripts/siglip/pretrain_qwen2.sh ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16666
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=256
26
+ LOCAL_BATCH_SIZE=8
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+ echo $GRADIENT_ACCUMULATION_STEPS
29
+
30
+ # Log Arguments
31
+ export TRANSFORMERS_OFFLINE=1
32
+ export WANDB_PROJECT=videollama2qwen2_siglip
33
+ RUN_NAME=vllava_settings
34
+ DATA_DIR=datasets
35
+ OUTP_DIR=work_dirs
36
+
37
+ torchrun --nnodes $WORLD_SIZE \
38
+ --nproc_per_node $NPROC_PER_NODE \
39
+ --master_addr=$MASTER_ADDR \
40
+ --master_port=$MASTER_PORT \
41
+ --node_rank $RANK \
42
+ videollama2/train_flash_attn.py \
43
+ --deepspeed scripts/zero3.json \
44
+ --model_type videollama2_qwen2 \
45
+ --model_path Qwen/Qwen2-7B-Instruct \
46
+ --vision_tower google/siglip-so400m-patch14-384 \
47
+ --mm_projector_type stc_connector_v35 \
48
+ --tune_mm_mlp_adapter True \
49
+ --data_path ${DATA_DIR}/videollava_pt/valley_llavaimage.json \
50
+ --data_folder ${DATA_DIR}/videollava_pt/ \
51
+ --mm_vision_select_layer -2 \
52
+ --num_frames 8 \
53
+ --bf16 True \
54
+ --tf32 True \
55
+ --fp16 False \
56
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/pretrain_${RUN_NAME} \
57
+ --num_train_epochs 1 \
58
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
59
+ --per_device_eval_batch_size 4 \
60
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
61
+ --evaluation_strategy "no" \
62
+ --save_strategy "steps" \
63
+ --save_steps 500 \
64
+ --save_total_limit 99 \
65
+ --learning_rate 1e-3 \
66
+ --weight_decay 0. \
67
+ --warmup_ratio 0.03 \
68
+ --lr_scheduler_type "cosine" \
69
+ --logging_steps 1 \
70
+ --model_max_length 2048 \
71
+ --gradient_checkpointing True \
72
+ --dataloader_num_workers 4 \
73
+ --lazy_preprocess True \
74
+ --report_to tensorboard \
75
+ --run_name $RUN_NAME \
VideoLLaMA2/scripts/vllava/finetune.sh ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16666
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=128
26
+ LOCAL_BATCH_SIZE=4
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+
29
+ # Log Arguments
30
+ export TRANSFORMERS_OFFLINE=1
31
+ export WANDB_PROJECT=videollama2
32
+ RUN_NAME=vllava_settings
33
+ DATA_DIR=datasets
34
+ OUTP_DIR=work_dirs
35
+
36
+ torchrun --nnodes $WORLD_SIZE \
37
+ --nproc_per_node $NPROC_PER_NODE \
38
+ --master_addr=$MASTER_ADDR \
39
+ --master_port=$MASTER_PORT \
40
+ --node_rank $RANK \
41
+ videollama2/train_flash_attn.py \
42
+ --deepspeed scripts/zero3.json \
43
+ --model_type videollama2 \
44
+ --model_path mistralai/Mistral-7B-Instruct-v0.2 \
45
+ --vision_tower openai/clip-vit-large-patch14-336 \
46
+ --mm_projector_type stc_connector \
47
+ --pretrain_mm_mlp_adapter ${OUTP_DIR}/${WANDB_PROJECT}/pretrain_${RUN_NAME}/mm_projector.bin \
48
+ --data_path ${DATA_DIR}/videollava_sft/videochatgpt_llavaimage_tune.json \
49
+ --data_folder ${DATA_DIR}/videollava_sft/ \
50
+ --mm_vision_select_layer -2 \
51
+ --image_aspect_ratio pad \
52
+ --num_frames 8 \
53
+ --bf16 True \
54
+ --tf32 True \
55
+ --fp16 False \
56
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/finetune_${RUN_NAME} \
57
+ --num_train_epochs 1 \
58
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
59
+ --per_device_eval_batch_size 4 \
60
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
61
+ --evaluation_strategy "no" \
62
+ --save_strategy "steps" \
63
+ --save_steps 500 \
64
+ --save_total_limit 99 \
65
+ --learning_rate 2e-5 \
66
+ --weight_decay 0. \
67
+ --warmup_ratio 0.03 \
68
+ --lr_scheduler_type "cosine" \
69
+ --logging_steps 1 \
70
+ --model_max_length 2048 \
71
+ --gradient_checkpointing True \
72
+ --dataloader_num_workers 4 \
73
+ --report_to tensorboard \
74
+ --run_name $RUN_NAME \
VideoLLaMA2/scripts/vllava/finetune_qwen2.sh ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16666
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=128
26
+ LOCAL_BATCH_SIZE=4
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+
29
+ # Log Arguments
30
+ export TRANSFORMERS_OFFLINE=1
31
+ export WANDB_PROJECT=videollama2qwen2
32
+ RUN_NAME=vllava_settings
33
+ DATA_DIR=datasets
34
+ OUTP_DIR=work_dirs
35
+
36
+ torchrun --nnodes $WORLD_SIZE \
37
+ --nproc_per_node $NPROC_PER_NODE \
38
+ --master_addr=$MASTER_ADDR \
39
+ --master_port=$MASTER_PORT \
40
+ --node_rank $RANK \
41
+ videollama2/train_flash_attn.py \
42
+ --deepspeed scripts/zero3.json \
43
+ --model_type videollama2_qwen2 \
44
+ --model_path Qwen/Qwen2-7B-Instruct \
45
+ --vision_tower openai/clip-vit-large-patch14-336 \
46
+ --mm_projector_type stc_connector \
47
+ --pretrain_mm_mlp_adapter ${OUTP_DIR}/${WANDB_PROJECT}/pretrain_${RUN_NAME}/mm_projector.bin \
48
+ --data_path ${DATA_DIR}/videollava_sft/videochatgpt_llavaimage_tune.json \
49
+ --data_folder ${DATA_DIR}/videollava_sft/ \
50
+ --mm_vision_select_layer -2 \
51
+ --image_aspect_ratio pad \
52
+ --num_frames 8 \
53
+ --bf16 True \
54
+ --tf32 True \
55
+ --fp16 False \
56
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/finetune_${RUN_NAME} \
57
+ --num_train_epochs 1 \
58
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
59
+ --per_device_eval_batch_size 4 \
60
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
61
+ --evaluation_strategy "no" \
62
+ --save_strategy "steps" \
63
+ --save_steps 500 \
64
+ --save_total_limit 99 \
65
+ --learning_rate 2e-5 \
66
+ --weight_decay 0. \
67
+ --warmup_ratio 0.03 \
68
+ --lr_scheduler_type "cosine" \
69
+ --logging_steps 1 \
70
+ --model_max_length 2048 \
71
+ --gradient_checkpointing True \
72
+ --dataloader_num_workers 4 \
73
+ --report_to tensorboard \
74
+ --run_name $RUN_NAME \
VideoLLaMA2/scripts/vllava/pretrain.sh ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16666
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=256
26
+ LOCAL_BATCH_SIZE=8
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+
29
+ # Log Arguments
30
+ export TRANSFORMERS_OFFLINE=1
31
+ export WANDB_PROJECT=videollama2
32
+ RUN_NAME=vllava_settings
33
+ DATA_DIR=datasets
34
+ OUTP_DIR=work_dirs
35
+
36
+ torchrun --nnodes $WORLD_SIZE \
37
+ --nproc_per_node $NPROC_PER_NODE \
38
+ --master_addr=$MASTER_ADDR \
39
+ --master_port=$MASTER_PORT \
40
+ --node_rank $RANK \
41
+ videollama2/train_flash_attn.py \
42
+ --deepspeed scripts/zero3.json \
43
+ --model_type videollama2 \
44
+ --model_path mistralai/Mistral-7B-Instruct-v0.2 \
45
+ --vision_tower openai/clip-vit-large-patch14-336 \
46
+ --mm_projector_type stc_connector \
47
+ --tune_mm_mlp_adapter True \
48
+ --data_path ${DATA_DIR}/videollava_pt/valley_llavaimage.json \
49
+ --data_folder ${DATA_DIR}/videollava_pt/ \
50
+ --mm_vision_select_layer -2 \
51
+ --num_frames 8 \
52
+ --bf16 True \
53
+ --tf32 True \
54
+ --fp16 False \
55
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/pretrain_${RUN_NAME} \
56
+ --num_train_epochs 1 \
57
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
58
+ --per_device_eval_batch_size 4 \
59
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
60
+ --evaluation_strategy "no" \
61
+ --save_strategy "steps" \
62
+ --save_steps 500 \
63
+ --save_total_limit 99 \
64
+ --learning_rate 1e-3 \
65
+ --weight_decay 0. \
66
+ --warmup_ratio 0.03 \
67
+ --lr_scheduler_type "cosine" \
68
+ --logging_steps 1 \
69
+ --model_max_length 2048 \
70
+ --gradient_checkpointing True \
71
+ --dataloader_num_workers 4 \
72
+ --lazy_preprocess True \
73
+ --report_to tensorboard \
74
+ --run_name $RUN_NAME \
VideoLLaMA2/scripts/vllava/pretrain_qwen2.sh ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Environment Variables
4
+ ARG_WORLD_SIZE=${1:-1}
5
+ ARG_NPROC_PER_NODE=${2:-8}
6
+ ARG_MASTER_ADDR="127.0.0.1"
7
+ ARG_MASTER_PORT=16666
8
+ ARG_RANK=0
9
+
10
+ # Multiple conditions
11
+ if [ ! -n "$WORLD_SIZE" ] || [ ! -n "$NPROC_PER_NODE" ]; then
12
+ WORLD_SIZE=$ARG_WORLD_SIZE
13
+ NPROC_PER_NODE=$ARG_NPROC_PER_NODE
14
+ fi
15
+ if [ ! -n "$MASTER_ADDR" ] || [ ! -n "$MASTER_PORT" ] || [ ! -n "$RANK" ]; then
16
+ MASTER_ADDR=$ARG_MASTER_ADDR
17
+ MASTER_PORT=$ARG_MASTER_PORT
18
+ RANK=$ARG_RANK
19
+ fi
20
+
21
+ echo "WORLD_SIZE: $WORLD_SIZE"
22
+ echo "NPROC_PER_NODE: $NPROC_PER_NODE"
23
+
24
+ # Training Arguments
25
+ GLOBAL_BATCH_SIZE=256
26
+ LOCAL_BATCH_SIZE=8
27
+ GRADIENT_ACCUMULATION_STEPS=$[$GLOBAL_BATCH_SIZE/($WORLD_SIZE*$NPROC_PER_NODE*$LOCAL_BATCH_SIZE)]
28
+
29
+ # Log Arguments
30
+ export TRANSFORMERS_OFFLINE=1
31
+ export WANDB_PROJECT=videollama2qwen2
32
+ RUN_NAME=vllava_settings
33
+ DATA_DIR=datasets
34
+ OUTP_DIR=work_dirs
35
+
36
+ torchrun --nnodes $WORLD_SIZE \
37
+ --nproc_per_node $NPROC_PER_NODE \
38
+ --master_addr=$MASTER_ADDR \
39
+ --master_port=$MASTER_PORT \
40
+ --node_rank $RANK \
41
+ videollama2/train_flash_attn.py \
42
+ --deepspeed scripts/zero3.json \
43
+ --model_type videollama2_qwen2 \
44
+ --model_path Qwen/Qwen2-7B-Instruct \
45
+ --vision_tower openai/clip-vit-large-patch14-336 \
46
+ --mm_projector_type stc_connector \
47
+ --tune_mm_mlp_adapter True \
48
+ --data_path ${DATA_DIR}/videollava_pt/valley_llavaimage.json \
49
+ --data_folder ${DATA_DIR}/videollava_pt/ \
50
+ --mm_vision_select_layer -2 \
51
+ --num_frames 8 \
52
+ --bf16 True \
53
+ --tf32 True \
54
+ --fp16 False \
55
+ --output_dir ${OUTP_DIR}/${WANDB_PROJECT}/pretrain_${RUN_NAME} \
56
+ --num_train_epochs 1 \
57
+ --per_device_train_batch_size $LOCAL_BATCH_SIZE \
58
+ --per_device_eval_batch_size 4 \
59
+ --gradient_accumulation_steps $GRADIENT_ACCUMULATION_STEPS \
60
+ --evaluation_strategy "no" \
61
+ --save_strategy "steps" \
62
+ --save_steps 500 \
63
+ --save_total_limit 99 \
64
+ --learning_rate 1e-3 \
65
+ --weight_decay 0. \
66
+ --warmup_ratio 0.03 \
67
+ --lr_scheduler_type "cosine" \
68
+ --logging_steps 1 \
69
+ --model_max_length 2048 \
70
+ --gradient_checkpointing True \
71
+ --dataloader_num_workers 4 \
72
+ --lazy_preprocess True \
73
+ --report_to tensorboard \
74
+ --run_name $RUN_NAME \
VideoLLaMA2/videollama2/__init__.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import copy
3
+ import warnings
4
+ import shutil
5
+ from functools import partial
6
+
7
+ import torch
8
+
9
+ from .model import load_pretrained_model
10
+ from .mm_utils import process_image, process_video, tokenizer_multimodal_token, get_model_name_from_path, KeywordsStoppingCriteria
11
+ from .constants import NUM_FRAMES, DEFAULT_IMAGE_TOKEN, DEFAULT_VIDEO_TOKEN, MODAL_INDEX_MAP
12
+
13
+
14
+ def model_init(model_path=None, **kwargs):
15
+ model_path = "DAMO-NLP-SG/VideoLLaMA2-7B" if model_path is None else model_path
16
+ model_name = get_model_name_from_path(model_path)
17
+ tokenizer, model, processor, context_len = load_pretrained_model(model_path, None, model_name, **kwargs)
18
+
19
+ if tokenizer.pad_token is None and tokenizer.unk_token is not None:
20
+ tokenizer.pad_token = tokenizer.unk_token
21
+
22
+ num_frames = model.config.num_frames if hasattr(model.config, "num_frames") else NUM_FRAMES
23
+
24
+ processor = {
25
+ 'image': partial(process_image, processor=processor, aspect_ratio=None),
26
+ 'video': partial(process_video, processor=processor, aspect_ratio=None, num_frames=num_frames),
27
+ }
28
+
29
+ return model, processor, tokenizer
30
+
31
+
32
+ def mm_infer(image_or_video, instruct, model, tokenizer, modal='video', **kwargs):
33
+ """inference api of VideoLLaMA2 for video understanding.
34
+
35
+ Args:
36
+ model: VideoLLaMA2 model.
37
+ image_or_video (torch.Tensor): image tensor (1, C, H, W) / video tensor (T, C, H, W).
38
+ instruct (str): text instruction for understanding video.
39
+ tokenizer: tokenizer.
40
+ do_sample (bool): whether to sample.
41
+ modal (str): inference modality.
42
+ Returns:
43
+ str: response of the model.
44
+ """
45
+
46
+ # 1. text preprocess (tag process & generate prompt).
47
+ if modal == 'image':
48
+ modal_token = DEFAULT_IMAGE_TOKEN
49
+ elif modal == 'video':
50
+ modal_token = DEFAULT_VIDEO_TOKEN
51
+ else:
52
+ raise ValueError(f"Unsupported modal: {modal}")
53
+
54
+ if isinstance(instruct, str):
55
+ message = [{'role': 'user', 'content': modal_token + '\n' + instruct}]
56
+ elif isinstance(instruct, list):
57
+ message = copy.deepcopy(instruct)
58
+ message[0]['content'] = modal_token + '\n' + message[0]['content']
59
+ else:
60
+ raise ValueError(f"Unsupported type of instruct: {type(instruct)}")
61
+
62
+ if model.config.model_type in ['videollama2', 'videollama2_mistral', 'videollama2_mixtral']:
63
+ system_message = [
64
+ {'role': 'system', 'content': (
65
+ """<<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature."""
66
+ """\n"""
67
+ """If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n<</SYS>>""")
68
+ }
69
+ ]
70
+ else:
71
+ system_message = []
72
+
73
+ message = system_message + message
74
+ prompt = tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True)
75
+
76
+ input_ids = tokenizer_multimodal_token(prompt, tokenizer, modal_token, return_tensors='pt').unsqueeze(0).long().cuda()
77
+ attention_masks = input_ids.ne(tokenizer.pad_token_id).long().cuda()
78
+
79
+ # 2. vision preprocess (load & transform image or video).
80
+ tensor = image_or_video.half().cuda()
81
+
82
+ tensor = [(tensor, modal_token)]
83
+
84
+ # 3. generate response according to visual signals and prompts.
85
+ keywords = [tokenizer.eos_token]
86
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
87
+
88
+ do_sample = kwargs.get('do_sample', False)
89
+ temperature = kwargs.get('temperature', 0.2 if do_sample else 0.0)
90
+ top_p = kwargs.get('top_p', 0.9)
91
+ max_new_tokens = kwargs.get('max_new_tokens', 1024)
92
+
93
+ with torch.inference_mode():
94
+ output_ids = model.generate(
95
+ input_ids,
96
+ attention_mask=attention_masks,
97
+ images=tensor,
98
+ do_sample=do_sample,
99
+ temperature=temperature,
100
+ max_new_tokens=max_new_tokens,
101
+ top_p=top_p,
102
+ use_cache=True,
103
+ stopping_criteria=[stopping_criteria],
104
+ pad_token_id=tokenizer.eos_token_id,
105
+ )
106
+
107
+ outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip()
108
+
109
+ return outputs
{videollama2 β†’ VideoLLaMA2/videollama2}/constants.py RENAMED
@@ -1,38 +1,32 @@
1
  CONTROLLER_HEART_BEAT_EXPIRATION = 30
2
  WORKER_HEART_BEAT_INTERVAL = 15
3
 
4
- LOGDIR = "./log_dir"
5
-
6
- NUM_FRAMES = 8
7
- MAX_FRAMES = 32
8
- NUM_FRAMES_PER_SECOND = 1
9
- Grids = [(2, 2), (1, 2), (1, 3), (1, 4), (2, 1), (3, 1), (4, 1)]
10
 
11
  # Model Constants
12
  IGNORE_INDEX = -100
13
- IMAGE_TOKEN_INDEX = -200
14
- DEFAULT_IMAGE_TOKEN = "<image>"
15
- DEFAULT_VIDEO_TOKEN = "<video>"
16
- DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
17
- DEFAULT_IM_START_TOKEN = "<im_start>"
18
- DEFAULT_IM_END_TOKEN = "<im_end>"
19
- IMAGE_PLACEHOLDER = "<image-placeholder>"
20
-
21
 
 
 
22
  DEFAULT_IMAGE_TOKEN = "<image>"
23
  DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
24
  DEFAULT_IM_START_TOKEN = "<im_start>"
25
  DEFAULT_IM_END_TOKEN = "<im_end>"
26
  IMAGE_PLACEHOLDER = "<image-placeholder>"
27
 
 
 
 
 
 
 
28
 
29
- MMODAL_TOKEN_INDEX = {"IMAGE": -200, "VIDEO": -201, "AUDIO": -202}
30
- MMODAL_INDEX_TOKEN = {v: k for k, v in MMODAL_TOKEN_INDEX.items()}
31
- MMODAL_START_TOKEN_INDEX = {"IMAGE": "<im_start>", "VIDEO": "<vid_start>", "AUDIO": "<ad_start>"}
32
- MMODAL_END_TOKEN_INDEX = {"IMAGE": "<im_end>", "VIDEO": "<vid_end>", "AUDIO": "<ad_end>"}
33
-
34
 
35
- DEFAULT_MMODAL_TOKEN = {"IMAGE": "<image>", "VIDEO": "<video>", "AUDIO": "<audio>"}
36
- DEFAULT_MMODAL_PATCH_TOKEN = {"IMAGE": "<im_patch>", "VIDEO": "<vid_patch>", "AUDIO": "<ad_patch>"}
37
- DEFAULT_MMODAL_START_TOKEN = {"IMAGE": "<Image>", "VIDEO": "<Video>", "AUDIO": "<ad_start>"}
38
- DEFAULT_MMODAL_END_TOKEN = {"IMAGE": "<\Image>", "VIDEO": "<\Video>", "AUDIO": "<\Audio>"}
 
 
1
  CONTROLLER_HEART_BEAT_EXPIRATION = 30
2
  WORKER_HEART_BEAT_INTERVAL = 15
3
 
4
+ LOGDIR = "."
 
 
 
 
 
5
 
6
  # Model Constants
7
  IGNORE_INDEX = -100
 
 
 
 
 
 
 
 
8
 
9
+ # Image arguments
10
+ IMAGE_TOKEN_INDEX = -200
11
  DEFAULT_IMAGE_TOKEN = "<image>"
12
  DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
13
  DEFAULT_IM_START_TOKEN = "<im_start>"
14
  DEFAULT_IM_END_TOKEN = "<im_end>"
15
  IMAGE_PLACEHOLDER = "<image-placeholder>"
16
 
17
+ # Video arguments
18
+ VIDEO_TOKEN_INDEX = -201
19
+ DEFAULT_VIDEO_TOKEN = "<video>"
20
+ NUM_FRAMES = 8
21
+ MAX_FRAMES = 32
22
+ NUM_FRAMES_PER_SECOND = 1
23
 
24
+ # Audio arguments
25
+ AUDIO_TOKEN_INDEX = -202
26
+ DEFAULT_AUDIO_TOKEN = "<audio>"
 
 
27
 
28
+ MODAL_INDEX_MAP = {
29
+ "<image>": -200,
30
+ "<video>": -201,
31
+ "<audio>": -202,
32
+ }
{videollama2 β†’ VideoLLaMA2/videollama2}/conversation.py RENAMED
@@ -12,10 +12,9 @@ class SeparatorStyle(Enum):
12
  """Different separator style."""
13
  SINGLE = auto()
14
  TWO = auto()
15
- MPT = auto()
16
  PLAIN = auto()
17
- LLAMA_2 = auto()
18
-
19
 
20
  @dataclasses.dataclass
21
  class Conversation:
@@ -65,16 +64,7 @@ class Conversation:
65
  ret += role + ": " + message + seps[i % 2]
66
  else:
67
  ret += role + ":"
68
- elif self.sep_style == SeparatorStyle.MPT:
69
- ret = self.system + self.sep
70
- for role, message in messages:
71
- if message:
72
- if type(message) is tuple:
73
- message, _, _ = message
74
- ret += role + message + self.sep
75
- else:
76
- ret += role
77
- elif self.sep_style == SeparatorStyle.LLAMA_2:
78
  wrap_sys = lambda msg: f"<<SYS>>\n{msg}\n<</SYS>>\n\n"
79
  wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
80
  ret = ""
@@ -95,6 +85,23 @@ class Conversation:
95
  else:
96
  ret += ""
97
  ret = ret.lstrip(self.sep)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  elif self.sep_style == SeparatorStyle.PLAIN:
99
  seps = [self.sep, self.sep2]
100
  ret = self.system
@@ -102,9 +109,9 @@ class Conversation:
102
  if message:
103
  if type(message) is tuple:
104
  message, _, _ = message
105
- ret += message + seps[i % 2]
106
  else:
107
- ret += ""
108
  else:
109
  raise ValueError(f"Invalid style: {self.sep_style}")
110
 
@@ -113,7 +120,6 @@ class Conversation:
113
  def append_message(self, role, message):
114
  self.messages.append([role, message])
115
 
116
-
117
  def process_image(self, image, image_process_mode, return_pil=False, image_format='PNG', max_len=800, min_len=400):
118
  if image_process_mode == "Pad":
119
  def expand2square(pil_img, background_color=(122, 116, 104)):
@@ -308,17 +314,7 @@ class Conversation:
308
  "sep2": self.sep2,
309
  }
310
 
311
- conv_mistral_instruct = Conversation(
312
- system="A chat between a curious user and an artificial intelligence assistant. "
313
- "The assistant gives helpful, detailed, and polite answers to the user's questions.",
314
- roles=("USER", "ASSISTANT"),
315
- version="llama_v2",
316
- messages=(),
317
- offset=0,
318
- sep_style=SeparatorStyle.LLAMA_2,
319
- sep="",
320
- sep2="</s>",
321
- )
322
  conv_vicuna_v0 = Conversation(
323
  system="A chat between a curious human and an artificial intelligence assistant. "
324
  "The assistant gives helpful, detailed, and polite answers to the human's questions.",
@@ -350,92 +346,43 @@ conv_vicuna_v0 = Conversation(
350
  sep="###",
351
  )
352
 
353
- conv_vicuna_v1 = Conversation(
354
- system="A chat between a curious user and an artificial intelligence assistant. "
355
- "The assistant gives helpful, detailed, and polite answers to the user's questions.",
356
- roles=("USER", "ASSISTANT"),
357
- version="v1",
358
- messages=(),
359
- offset=0,
360
- sep_style=SeparatorStyle.TWO,
361
- sep=" ",
362
- sep2="</s>",
363
- )
364
-
365
- conv_llama_2 = Conversation(
366
- system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
367
-
368
- If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
369
- roles=("USER", "ASSISTANT"),
370
- version="llama_v2",
371
- messages=(),
372
- offset=0,
373
- sep_style=SeparatorStyle.LLAMA_2,
374
- sep="<s>",
375
- sep2="</s>",
376
- )
377
-
378
- conv_llava_llama_2 = Conversation(
379
- system="You are a helpful language and vision assistant. "
380
- "You are able to understand the visual content that the user provides, "
381
- "and assist the user with a variety of tasks using natural language.",
382
- roles=("USER", "ASSISTANT"),
383
- version="llama_v2",
384
- messages=(),
385
- offset=0,
386
- sep_style=SeparatorStyle.LLAMA_2,
387
- sep="<s>",
388
- sep2="</s>",
389
- )
390
-
391
- conv_mpt = Conversation(
392
- system="""<|im_start|>system
393
- A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
394
- roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
395
- version="mpt",
396
- messages=(),
397
- offset=0,
398
- sep_style=SeparatorStyle.MPT,
399
- sep="<|im_end|>",
400
- )
401
-
402
  conv_llava_plain = Conversation(
403
  system="",
404
  roles=("", ""),
405
- messages=(
406
- ),
407
  offset=0,
408
  sep_style=SeparatorStyle.PLAIN,
409
- sep="\n",
 
410
  )
411
 
412
- conv_llava_v0 = Conversation(
413
- system="A chat between a curious human and an artificial intelligence assistant. "
414
- "The assistant gives helpful, detailed, and polite answers to the human's questions.",
 
415
  roles=("Human", "Assistant"),
416
  messages=(
417
  ),
418
  offset=0,
419
  sep_style=SeparatorStyle.SINGLE,
420
  sep="###",
 
421
  )
422
 
423
- conv_llava_v0_mmtag = Conversation(
424
- system="A chat between a curious user and an artificial intelligence assistant. "
425
- "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
426
- "The visual content will be provided with the following format: <Image>visual content</Image>.",
427
  roles=("Human", "Assistant"),
428
  messages=(
429
  ),
430
  offset=0,
431
  sep_style=SeparatorStyle.SINGLE,
432
  sep="###",
433
- version="v0_mmtag",
434
  )
435
 
436
- conv_llava_v1 = Conversation(
437
- system="A chat between a curious human and an artificial intelligence assistant. "
438
- "The assistant gives helpful, detailed, and polite answers to the human's questions.",
439
  roles=("USER", "ASSISTANT"),
440
  version="v1",
441
  messages=(),
@@ -458,25 +405,101 @@ conv_llava_v1_mmtag = Conversation(
458
  version="v1_mmtag",
459
  )
460
 
461
- default_conversation = conv_vicuna_v1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
462
  conv_templates = {
463
  "default": conv_vicuna_v0,
464
- "v0": conv_vicuna_v0,
465
- "v1": conv_vicuna_v1,
466
- "vicuna_v1": conv_vicuna_v1,
467
- "llama_2": conv_llama_2,
468
-
469
  "plain": conv_llava_plain,
 
 
470
  "v0_plain": conv_llava_plain,
471
- "llava_v0": conv_llava_v0,
472
  "v0_mmtag": conv_llava_v0_mmtag,
473
- "llava_v1": conv_llava_v1,
 
 
474
  "v1_mmtag": conv_llava_v1_mmtag,
475
- "llava_llama_2": conv_llava_llama_2,
476
-
477
- "video_llama_beta": conv_llava_llama_2,
478
- "mistral_instruct": conv_mistral_instruct,
479
- "mpt": conv_mpt,
 
 
 
 
 
 
480
  }
481
 
482
 
 
12
  """Different separator style."""
13
  SINGLE = auto()
14
  TWO = auto()
 
15
  PLAIN = auto()
16
+ LLAMA2 = auto()
17
+ QWEN = auto()
18
 
19
  @dataclasses.dataclass
20
  class Conversation:
 
64
  ret += role + ": " + message + seps[i % 2]
65
  else:
66
  ret += role + ":"
67
+ elif self.sep_style == SeparatorStyle.LLAMA2:
 
 
 
 
 
 
 
 
 
68
  wrap_sys = lambda msg: f"<<SYS>>\n{msg}\n<</SYS>>\n\n"
69
  wrap_inst = lambda msg: f"[INST] {msg} [/INST]"
70
  ret = ""
 
85
  else:
86
  ret += ""
87
  ret = ret.lstrip(self.sep)
88
+ elif self.sep_style == SeparatorStyle.QWEN:
89
+ ret = ""
90
+ # 1. Add system prompt
91
+ ret += self.system + self.sep + "\n"
92
+ # 2. Iterate message
93
+ for i, (role, message) in enumerate(messages):
94
+ if i == 0:
95
+ assert message, "first message should not be none"
96
+ assert role == self.roles[0], "first message should come from user"
97
+ if message:
98
+ if type(message) is tuple:
99
+ message, _, _ = message
100
+ # 2.1 Add role and message
101
+ ret += role + message + self.sep + "\n"
102
+ else:
103
+ # 2.2 Add generation prompt
104
+ ret += role
105
  elif self.sep_style == SeparatorStyle.PLAIN:
106
  seps = [self.sep, self.sep2]
107
  ret = self.system
 
109
  if message:
110
  if type(message) is tuple:
111
  message, _, _ = message
112
+ ret += role + message + seps[i % 2]
113
  else:
114
+ ret += role
115
  else:
116
  raise ValueError(f"Invalid style: {self.sep_style}")
117
 
 
120
  def append_message(self, role, message):
121
  self.messages.append([role, message])
122
 
 
123
  def process_image(self, image, image_process_mode, return_pil=False, image_format='PNG', max_len=800, min_len=400):
124
  if image_process_mode == "Pad":
125
  def expand2square(pil_img, background_color=(122, 116, 104)):
 
314
  "sep2": self.sep2,
315
  }
316
 
317
+
 
 
 
 
 
 
 
 
 
 
318
  conv_vicuna_v0 = Conversation(
319
  system="A chat between a curious human and an artificial intelligence assistant. "
320
  "The assistant gives helpful, detailed, and polite answers to the human's questions.",
 
346
  sep="###",
347
  )
348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
  conv_llava_plain = Conversation(
350
  system="",
351
  roles=("", ""),
352
+ messages=(),
 
353
  offset=0,
354
  sep_style=SeparatorStyle.PLAIN,
355
+ sep="",
356
+ sep2="\n"
357
  )
358
 
359
+ conv_llava_v0_mmtag = Conversation(
360
+ system="A chat between a curious user and an artificial intelligence assistant. "
361
+ "The assistant is able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language."
362
+ "The visual content will be provided with the following format: <Image>visual content</Image>.",
363
  roles=("Human", "Assistant"),
364
  messages=(
365
  ),
366
  offset=0,
367
  sep_style=SeparatorStyle.SINGLE,
368
  sep="###",
369
+ version="v0_mmtag",
370
  )
371
 
372
+ conv_llava_v0 = Conversation(
373
+ system="A chat between a curious human and an artificial intelligence assistant. "
374
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
 
375
  roles=("Human", "Assistant"),
376
  messages=(
377
  ),
378
  offset=0,
379
  sep_style=SeparatorStyle.SINGLE,
380
  sep="###",
 
381
  )
382
 
383
+ conv_vicuna_v1 = Conversation(
384
+ system="A chat between a curious user and an artificial intelligence assistant. "
385
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
386
  roles=("USER", "ASSISTANT"),
387
  version="v1",
388
  messages=(),
 
405
  version="v1_mmtag",
406
  )
407
 
408
+ conv_llava_v1 = Conversation(
409
+ system="A chat between a curious human and an artificial intelligence assistant. "
410
+ "The assistant gives helpful, detailed, and polite answers to the human's questions.",
411
+ roles=("USER", "ASSISTANT"),
412
+ version="v1",
413
+ messages=(),
414
+ offset=0,
415
+ sep_style=SeparatorStyle.TWO,
416
+ sep=" ",
417
+ sep2="</s>",
418
+ )
419
+
420
+ conv_llava_llama2 = Conversation(
421
+ system="You are a helpful language and vision assistant. "
422
+ "You are able to understand the visual content that the user provides, "
423
+ "and assist the user with a variety of tasks using natural language.",
424
+ roles=("USER", "ASSISTANT"),
425
+ version="llama2",
426
+ messages=(),
427
+ offset=0,
428
+ sep_style=SeparatorStyle.LLAMA2,
429
+ sep="<s>",
430
+ sep2="</s>",
431
+ )
432
+
433
+ conv_llama2 = Conversation(
434
+ system="""You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
435
+
436
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
437
+ roles=("USER", "ASSISTANT"),
438
+ version="llama2",
439
+ messages=(),
440
+ offset=0,
441
+ sep_style=SeparatorStyle.LLAMA2,
442
+ sep="<s>",
443
+ sep2="</s>",
444
+ )
445
+
446
+ conv_mistral = Conversation(
447
+ system="A chat between a curious user and an artificial intelligence assistant. "
448
+ "The assistant gives helpful, detailed, and polite answers to the user's questions.",
449
+ roles=("USER", "ASSISTANT"),
450
+ version="llama2",
451
+ messages=(),
452
+ offset=0,
453
+ sep_style=SeparatorStyle.LLAMA2,
454
+ sep="",
455
+ sep2="</s>",
456
+ )
457
+
458
+ conv_qwen = Conversation(
459
+ system="<|im_start|>system\nYou are a helpful assistant.",
460
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
461
+ messages=(),
462
+ offset=0,
463
+ sep_style=SeparatorStyle.QWEN,
464
+ sep="<|im_end|>",
465
+ version="qwen",
466
+ )
467
+
468
+ conv_qwen_plain = Conversation(
469
+ system="",
470
+ roles=("<|im_start|>user\n", "<|im_start|>assistant\n"),
471
+ messages=(),
472
+ offset=0,
473
+ sep_style=SeparatorStyle.PLAIN,
474
+ sep="<|im_end|>",
475
+ sep2="<|im_end|>",
476
+ version="qwen_plain",
477
+ )
478
+
479
+ default_conversation = conv_mistral
480
  conv_templates = {
481
  "default": conv_vicuna_v0,
482
+ # pretrain template
 
 
 
 
483
  "plain": conv_llava_plain,
484
+ # llava v0
485
+ "v0": conv_vicuna_v0,
486
  "v0_plain": conv_llava_plain,
 
487
  "v0_mmtag": conv_llava_v0_mmtag,
488
+ "llava_v0": conv_llava_v0,
489
+ # llava v1
490
+ "v1": conv_vicuna_v1,
491
  "v1_mmtag": conv_llava_v1_mmtag,
492
+ "llava_v1": conv_llava_v1,
493
+ "vicuna_v1": conv_vicuna_v1,
494
+ # llava v1.5
495
+ "llava_llama2": conv_llava_llama2,
496
+ # llama2
497
+ "llama2": conv_llama2,
498
+ # mistral
499
+ "mistral": conv_mistral,
500
+ # qwen
501
+ "qwen": conv_qwen,
502
+ "qwen_plain": conv_qwen_plain,
503
  }
504
 
505
 
VideoLLaMA2/videollama2/eval/eval_video_cap_msvc_correctness.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import os
3
+ import ast
4
+ import time
5
+ import json
6
+ import argparse
7
+ from tqdm import tqdm
8
+ from multiprocessing.pool import Pool
9
+
10
+ import openai
11
+ from openai import AzureOpenAI
12
+
13
+
14
+ def init():
15
+ client = AzureOpenAI(
16
+ azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT"),
17
+ api_key=os.getenv("AZURE_OPENAI_KEY"),
18
+ api_version="2024-02-15-preview"
19
+ )
20
+
21
+ return client
22
+
23
+
24
+ def interaction(client, message_text):
25
+ completion = client.chat.completions.create(
26
+ model=os.getenv("AZURE_OPENAI_DEPLOYNAME"),
27
+ messages = message_text,
28
+ temperature=0.7,
29
+ max_tokens=800,
30
+ top_p=0.95,
31
+ frequency_penalty=0,
32
+ presence_penalty=0,
33
+ stop=None
34
+ )
35
+
36
+ return completion
37
+
38
+
39
+ def annotate(prediction_set, caption_files, output_dir):
40
+ """
41
+ Evaluates question and answer pairs using GPT-3
42
+ Returns a score for correctness.
43
+ """
44
+
45
+ for file in tqdm(caption_files):
46
+ key = file[:-5] # Strip file extension
47
+ qa_set = prediction_set[key]
48
+ question = qa_set['q']
49
+ answer = str(qa_set['a'])
50
+ pred = qa_set['pred']
51
+ try:
52
+ message = [
53
+ {
54
+ "role": "system",
55
+ "content":
56
+ "You are an intelligent chatbot designed for evaluating the factual accuracy of generative outputs for video-based question-answer pairs. "
57
+ "Your task is to compare the predicted answer with these correct answers and determine if they are factually consistent. Here's how you can accomplish the task:"
58
+ "------"
59
+ "##INSTRUCTIONS: "
60
+ "- Focus on the factual consistency between the predicted answer and the correct answer. The predicted answer should not contain any misinterpretations or misinformation.\n"
61
+ "- The predicted answer must be factually accurate and align with the video content.\n"
62
+ "- Consider synonyms or paraphrases as valid matches.\n"
63
+ "- Evaluate the factual accuracy of the prediction compared to the answer."
64
+ },
65
+ {
66
+ "role": "user",
67
+ "content":
68
+ "Please evaluate the following video-based question-answer pair:\n\n"
69
+ f"Question: {question}\n"
70
+ f"Correct Answers: {answer}\n"
71
+ f"Predicted Answer: {pred}\n\n"
72
+ "Provide your evaluation only as a factual accuracy score where the factual accuracy score is an integer value between 0 and 5, with 5 indicating the highest level of factual consistency. "
73
+ "Please generate the response in the form of a Python dictionary string with keys 'score', where its value is the factual accuracy score in INTEGER, not STRING."
74
+ "DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. "
75
+ "For example, your response should look like this: {''score': 4.8}."
76
+ }
77
+ ]
78
+ completion = interaction(client, message)
79
+ # Convert response to a Python dictionary.
80
+ response_message = completion.choices[0].message.content
81
+ response_dict = ast.literal_eval(response_message)
82
+ result_qa_pair = [response_dict, qa_set]
83
+ # # Save the question-answer pairs to a json file.
84
+ with open(f"{output_dir}/{key}.json", "w") as f:
85
+ json.dump(result_qa_pair, f)
86
+
87
+ except Exception as e:
88
+ print(f"Error processing file '{key}': {e}")
89
+
90
+ time.sleep(1)
91
+
92
+
93
+ def longest_repeating_substring(s):
94
+ n = len(s)
95
+ dp = [[0] * (n+1) for _ in range(n+1)]
96
+ res = ""
97
+ res_length = 0
98
+
99
+ index = 0
100
+ for i in range(1, n+1):
101
+ for j in range(i+1, n+1):
102
+ if (dp[i-1][j-1] > 0 and dp[i-1][j-1] < (j-i)) or s[i-1] == s[j-1]:
103
+ dp[i][j] = dp[i-1][j-1] + 1
104
+ if dp[i][j] > res_length:
105
+ res_length = dp[i][j]
106
+ index = max(i, index)
107
+ else:
108
+ dp[i][j] = 0
109
+
110
+ if res_length > 0:
111
+ for i in range(index-res_length+1, index+1):
112
+ res = res + s[i-1]
113
+
114
+ return res
115
+
116
+
117
+ def main(args):
118
+ if args.num_chunks > 1:
119
+ pred_contents = []
120
+ for _idx in range(args.num_chunks):
121
+ file = os.path.join(args.pred_path, f"{args.num_chunks}_{_idx}.json")
122
+ pred_contents += [json.loads(line) for line in open(file)]
123
+ else:
124
+ pred_contents = [json.loads(line) for line in open(args.pred_path)]
125
+
126
+ # Dictionary to store the count of occurrences for each video_id
127
+ video_id_counts = {}
128
+ new_pred_contents = []
129
+
130
+ # Iterate through each sample in pred_contents
131
+ for sample in pred_contents:
132
+ video_id = sample["video_name"]
133
+ if video_id in video_id_counts:
134
+ video_id_counts[video_id] += 1
135
+ else:
136
+ video_id_counts[video_id] = 0
137
+
138
+ # Create a new sample with the modified key
139
+ new_sample = sample
140
+ new_sample["video_name"] = f"{video_id.split('/')[-1].split('.')[0]}_{video_id_counts[video_id]}"
141
+ new_pred_contents.append(new_sample)
142
+
143
+ # Generating list of id's and corresponding files
144
+ id_list = [x["video_name"] for x in new_pred_contents]
145
+ caption_files = [f"{id}.json" for id in id_list]
146
+
147
+ output_dir = args.output_dir
148
+ # Generate output directory if not exists.
149
+ if not os.path.exists(output_dir):
150
+ os.makedirs(output_dir)
151
+
152
+ # Preparing dictionary of question-answer sets
153
+ prediction_set = {}
154
+ for sample in new_pred_contents:
155
+ id = sample["video_name"]
156
+ # print(sample)
157
+ question = sample["question"]
158
+ answer = sample["answer"]
159
+ pred = sample["pred"]
160
+ qa_set = {"q": question, "a": answer, "pred": pred}
161
+ prediction_set[id] = qa_set
162
+
163
+ # # Set the OpenAI API key.
164
+ # openai.api_key = args.api_key # Your API key here
165
+ # if args.api_base:
166
+ # openai.api_base = args.api_base # Your API base here
167
+ num_tasks = args.num_tasks
168
+
169
+ # While loop to ensure that all captions are processed.
170
+ while True:
171
+ try:
172
+ # Files that have not been processed yet.
173
+ completed_files = os.listdir(output_dir)
174
+ print(f"completed_files: {len(completed_files)}")
175
+
176
+ # Files that have not been processed yet.
177
+ incomplete_files = [f for f in caption_files if f not in completed_files]
178
+ print(f"incomplete_files: {len(incomplete_files)}")
179
+
180
+ # Break the loop when there are no incomplete files
181
+ if len(incomplete_files) == 0:
182
+ break
183
+ if len(incomplete_files) <= num_tasks:
184
+ num_tasks = 1
185
+
186
+ # Split tasks into parts.
187
+ part_len = len(incomplete_files) // num_tasks
188
+ all_parts = [incomplete_files[i : i + part_len] for i in range(0, len(incomplete_files), part_len)]
189
+ task_args = [(prediction_set, part, args.output_dir) for part in all_parts]
190
+ print("Generate", len(all_parts), "subprocess.")
191
+
192
+ # Use a pool of workers to process the files in parallel.
193
+ # with Pool() as pool:
194
+ # pool.starmap(annotate, task_args)
195
+ # import pdb;pdb.set_trace()
196
+ annotate(*task_args[0])
197
+
198
+ except Exception as e:
199
+ print(f"Error: {e}")
200
+
201
+ # Combine all the processed files into one
202
+ combined_contents = {}
203
+ json_path = args.output_json
204
+
205
+ # Iterate through json files
206
+ for file_name in os.listdir(output_dir):
207
+ if file_name.endswith(".json"):
208
+ file_path = os.path.join(output_dir, file_name)
209
+ with open(file_path, "r") as json_file:
210
+ try:
211
+ content = json.load(json_file)
212
+ combined_contents[file_name[:-5]] = content
213
+ except Exception as e:
214
+ print(f"Error: {e}")
215
+ pass
216
+
217
+ # Calculate average score
218
+ score_sum = 0
219
+ count = 0
220
+ for key, result in combined_contents.items():
221
+ count += 1
222
+ try:
223
+ # key = result[0].keys()[0]
224
+ # import pdb; pdb.set_trace()
225
+ for _ in result[0].keys():
226
+ score_match = result[0][_]
227
+ score = int(score_match)
228
+ score_sum += score
229
+ break
230
+ except Exception as e:
231
+ print(f"Error processing file '{key}': {e}")
232
+ import pdb; pdb.set_trace()
233
+ average_score = score_sum / count
234
+ combined_contents["average_score"] = average_score
235
+ with open(json_path, "w") as json_file:
236
+ json.dump(combined_contents, json_file, indent=4)
237
+ print("Average score for correctness:", average_score)
238
+
239
+
240
+ if __name__ == "__main__":
241
+ parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3")
242
+ parser.add_argument("--pred-path", required=True, help="The path to file containing prediction.")
243
+ parser.add_argument("--output-dir", required=True, help="The path to save annotation json files.")
244
+ parser.add_argument("--output-json", required=True, help="The path to save annotation final combined json file.")
245
+ parser.add_argument("--num-tasks", required=True, type=int, help="Number of splits.")
246
+ parser.add_argument("--num_chunks", default=1, type=int, help="Result splits")
247
+ parser.add_argument("--api-key", required=True, type=str, help="Azure Openai API key.")
248
+ parser.add_argument("--api-endpoint", required=True, type=str, help="Azure Openai API endpoint.")
249
+ parser.add_argument("--api-deployname", required=True, type=str, help="Azure Openai API deployname.")
250
+ args = parser.parse_args()
251
+
252
+ # Set the OpenAI API key.
253
+ os.environ["AZURE_OPENAI_KEY"] = args.api_key
254
+ os.environ["AZURE_OPENAI_ENDPOINT"] = args.api_endpoint
255
+ os.environ["AZURE_OPENAI_DEPLOYNAME"] = args.api_deployname
256
+
257
+ client = init()
258
+
259
+ main(args)
VideoLLaMA2/videollama2/eval/eval_video_cap_msvc_detailedness.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import os
3
+ import ast
4
+ import time
5
+ import json
6
+ import argparse
7
+ from tqdm import tqdm
8
+ from multiprocessing.pool import Pool
9
+
10
+ import openai
11
+ from openai import AzureOpenAI
12
+
13
+
14
+ def init():
15
+ client = AzureOpenAI(
16
+ azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT"),
17
+ api_key=os.getenv("AZURE_OPENAI_KEY"),
18
+ api_version="2024-02-15-preview"
19
+ )
20
+
21
+ return client
22
+
23
+
24
+ def interaction(client, message_text):
25
+ completion = client.chat.completions.create(
26
+ model=os.getenv("AZURE_OPENAI_DEPLOYNAME"),
27
+ messages = message_text,
28
+ temperature=0.7,
29
+ max_tokens=800,
30
+ top_p=0.95,
31
+ frequency_penalty=0,
32
+ presence_penalty=0,
33
+ stop=None
34
+ )
35
+
36
+ return completion
37
+
38
+
39
+ def annotate(prediction_set, caption_files, output_dir):
40
+ """
41
+ Evaluates question and answer pairs using GPT-3
42
+ Returns a score for correctness.
43
+ """
44
+
45
+ for file in tqdm(caption_files):
46
+ key = file[:-5] # Strip file extension
47
+ qa_set = prediction_set[key]
48
+ question = qa_set['q']
49
+ answer = str(qa_set['a'])
50
+ pred = qa_set['pred']
51
+ try:
52
+ message = [
53
+ {
54
+ "role": "system",
55
+ "content": "You are an intelligent chatbot designed for evaluating the detail orientation of generative outputs for video-based question-answer pairs. "
56
+ "Your task is to compare the predicted answer with these correct answers and determine its level of detail, considering both completeness and specificity. Here's how you can accomplish the task:"
57
+ "------"
58
+ "##INSTRUCTIONS: "
59
+ "- Check if the predicted answer covers all major points from the video. The response should not leave out any key aspects.\n"
60
+ "- Evaluate whether the predicted answer includes specific details rather than just generic points. It should provide comprehensive information that is tied to specific elements of the video.\n"
61
+ "- Consider synonyms or paraphrases as valid matches.\n"
62
+ "- Provide a single evaluation score that reflects the level of detail orientation of the prediction, considering both completeness and specificity.",
63
+ },
64
+ {
65
+ "role": "user",
66
+ "content": "Please evaluate the following video-based question-answer pair:\n\n"
67
+ f"Question: {question}\n"
68
+ f"Correct Answers: {answer}\n"
69
+ f"Predicted Answer: {pred}\n\n"
70
+ "Provide your evaluation only as a detail orientation score where the detail orientation score is an integer value between 0 and 5, with 5 indicating the highest level of detail orientation. "
71
+ "Please generate the response in the form of a Python dictionary string with keys 'score', where its value is the detail orientation score in INTEGER, not STRING."
72
+ "DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. "
73
+ "For example, your response should look like this: {''score': 4.8}.",
74
+ },
75
+ ]
76
+ completion = interaction(client, message)
77
+ # Convert response to a Python dictionary.
78
+ response_message = completion.choices[0].message.content
79
+ response_dict = ast.literal_eval(response_message)
80
+ result_qa_pair = [response_dict, qa_set]
81
+ # # Save the question-answer pairs to a json file.
82
+ with open(f"{output_dir}/{key}.json", "w") as f:
83
+ json.dump(result_qa_pair, f)
84
+
85
+ except Exception as e:
86
+ print(f"Error processing file '{key}': {e}")
87
+
88
+ time.sleep(1)
89
+
90
+
91
+ def longest_repeating_substring(s):
92
+ n = len(s)
93
+ dp = [[0] * (n+1) for _ in range(n+1)]
94
+ res = ""
95
+ res_length = 0
96
+
97
+ index = 0
98
+ for i in range(1, n+1):
99
+ for j in range(i+1, n+1):
100
+ if (dp[i-1][j-1] > 0 and dp[i-1][j-1] < (j-i)) or s[i-1] == s[j-1]:
101
+ dp[i][j] = dp[i-1][j-1] + 1
102
+ if dp[i][j] > res_length:
103
+ res_length = dp[i][j]
104
+ index = max(i, index)
105
+ else:
106
+ dp[i][j] = 0
107
+
108
+ if res_length > 0:
109
+ for i in range(index-res_length+1, index+1):
110
+ res = res + s[i-1]
111
+
112
+ return res
113
+
114
+
115
+ def main(args):
116
+ if args.num_chunks > 1:
117
+ pred_contents = []
118
+ for _idx in range(args.num_chunks):
119
+ file = os.path.join(args.pred_path, f"{args.num_chunks}_{_idx}.json")
120
+ pred_contents += [json.loads(line) for line in open(file)]
121
+ else:
122
+ pred_contents = [json.loads(line) for line in open(args.pred_path)]
123
+
124
+ # Dictionary to store the count of occurrences for each video_id
125
+ video_id_counts = {}
126
+ new_pred_contents = []
127
+
128
+ # Iterate through each sample in pred_contents
129
+ for sample in pred_contents:
130
+ video_id = sample["video_name"]
131
+ if video_id in video_id_counts:
132
+ video_id_counts[video_id] += 1
133
+ else:
134
+ video_id_counts[video_id] = 0
135
+
136
+ # Create a new sample with the modified key
137
+ new_sample = sample
138
+ new_sample["video_name"] = f"{video_id.split('/')[-1].split('.')[0]}_{video_id_counts[video_id]}"
139
+ new_pred_contents.append(new_sample)
140
+
141
+ # Generating list of id's and corresponding files
142
+ id_list = [x["video_name"] for x in new_pred_contents]
143
+ caption_files = [f"{id}.json" for id in id_list]
144
+
145
+ output_dir = args.output_dir
146
+ # Generate output directory if not exists.
147
+ if not os.path.exists(output_dir):
148
+ os.makedirs(output_dir)
149
+
150
+ # Preparing dictionary of question-answer sets
151
+ prediction_set = {}
152
+ for sample in new_pred_contents:
153
+ id = sample["video_name"]
154
+ # print(sample)
155
+ question = sample["question"]
156
+ answer = sample["answer"]
157
+ pred = sample["pred"]
158
+ qa_set = {"q": question, "a": answer, "pred": pred}
159
+ prediction_set[id] = qa_set
160
+
161
+ # # Set the OpenAI API key.
162
+ # openai.api_key = args.api_key # Your API key here
163
+ # if args.api_base:
164
+ # openai.api_base = args.api_base # Your API base here
165
+ num_tasks = args.num_tasks
166
+
167
+ # While loop to ensure that all captions are processed.
168
+ while True:
169
+ try:
170
+ # Files that have not been processed yet.
171
+ completed_files = os.listdir(output_dir)
172
+ print(f"completed_files: {len(completed_files)}")
173
+
174
+ # Files that have not been processed yet.
175
+ incomplete_files = [f for f in caption_files if f not in completed_files]
176
+ print(f"incomplete_files: {len(incomplete_files)}")
177
+
178
+ # Break the loop when there are no incomplete files
179
+ if len(incomplete_files) == 0:
180
+ break
181
+ if len(incomplete_files) <= num_tasks:
182
+ num_tasks = 1
183
+
184
+ # Split tasks into parts.
185
+ part_len = len(incomplete_files) // num_tasks
186
+ all_parts = [incomplete_files[i : i + part_len] for i in range(0, len(incomplete_files), part_len)]
187
+ task_args = [(prediction_set, part, args.output_dir) for part in all_parts]
188
+ print("Generate", len(all_parts), "subprocess.")
189
+
190
+ # Use a pool of workers to process the files in parallel.
191
+ # with Pool() as pool:
192
+ # pool.starmap(annotate, task_args)
193
+ # import pdb;pdb.set_trace()
194
+ annotate(*task_args[0])
195
+
196
+ except Exception as e:
197
+ print(f"Error: {e}")
198
+
199
+ # Combine all the processed files into one
200
+ combined_contents = {}
201
+ json_path = args.output_json
202
+
203
+ # Iterate through json files
204
+ for file_name in os.listdir(output_dir):
205
+ if file_name.endswith(".json"):
206
+ file_path = os.path.join(output_dir, file_name)
207
+ with open(file_path, "r") as json_file:
208
+ try:
209
+ content = json.load(json_file)
210
+ combined_contents[file_name[:-5]] = content
211
+ except Exception as e:
212
+ print(f"Error: {e}")
213
+ pass
214
+
215
+ # Calculate average score
216
+ score_sum = 0
217
+ count = 0
218
+ for key, result in combined_contents.items():
219
+ count += 1
220
+ try:
221
+ # key = result[0].keys()[0]
222
+ # import pdb; pdb.set_trace()
223
+ for _ in result[0].keys():
224
+ score_match = result[0][_]
225
+ score = int(score_match)
226
+ score_sum += score
227
+ break
228
+ except Exception as e:
229
+ print(f"Error processing file '{key}': {e}")
230
+ import pdb; pdb.set_trace()
231
+ average_score = score_sum / count
232
+ combined_contents["average_score"] = average_score
233
+ with open(json_path, "w") as json_file:
234
+ json.dump(combined_contents, json_file, indent=4)
235
+ print("Average score for detailedness:", average_score)
236
+
237
+
238
+ if __name__ == "__main__":
239
+ parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3")
240
+ parser.add_argument("--pred-path", required=True, help="The path to file containing prediction.")
241
+ parser.add_argument("--output-dir", required=True, help="The path to save annotation json files.")
242
+ parser.add_argument("--output-json", required=True, help="The path to save annotation final combined json file.")
243
+ parser.add_argument("--num-tasks", required=True, type=int, help="Number of splits.")
244
+ parser.add_argument("--num_chunks", default=1, type=int, help="Result splits")
245
+ parser.add_argument("--api-key", required=True, type=str, help="Azure Openai API key.")
246
+ parser.add_argument("--api-endpoint", required=True, type=str, help="Azure Openai API endpoint.")
247
+ parser.add_argument("--api-deployname", required=True, type=str, help="Azure Openai API deployname.")
248
+ args = parser.parse_args()
249
+
250
+ # Set the OpenAI API key.
251
+ os.environ["AZURE_OPENAI_KEY"] = args.api_key
252
+ os.environ["AZURE_OPENAI_ENDPOINT"] = args.api_endpoint
253
+ os.environ["AZURE_OPENAI_DEPLOYNAME"] = args.api_deployname
254
+
255
+ client = init()
256
+
257
+ main(args)
videollama2/eval/eval_video_qa_mvbench.py β†’ VideoLLaMA2/videollama2/eval/eval_video_mcqa_mvbench.py RENAMED
File without changes
VideoLLaMA2/videollama2/eval/eval_video_mcqa_videomme.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import json
4
+ import argparse
5
+ from typing import List, Dict, Optional, Union
6
+
7
+ CATEGORIES = [
8
+ "Knowledge",
9
+ "Film & Television",
10
+ "Sports Competition",
11
+ "Artistic Performance",
12
+ "Life Record",
13
+ "Multilingual"
14
+ ]
15
+
16
+ SUB_CATEGORIES = [
17
+ "Humanity & History",
18
+ "Literature & Art",
19
+ "Biology & Medicine",
20
+ "Finance & Commerce",
21
+ "Astronomy",
22
+ "Geography",
23
+ "Law",
24
+ "Life Tip",
25
+ "Technology",
26
+ "Animation",
27
+ "Movie & TV Show",
28
+ "Documentary",
29
+ "News Report",
30
+ "Esports",
31
+ "Basketball",
32
+ "Football",
33
+ "Athletics",
34
+ "Other Sports",
35
+ "Stage Play",
36
+ "Magic Show",
37
+ "Variety Show",
38
+ "Acrobatics",
39
+ "Handicraft",
40
+ "Food",
41
+ "Fashion",
42
+ "Daily Life",
43
+ "Travel",
44
+ "Pet & Animal",
45
+ "Exercise",
46
+ "Multilingual"
47
+ ]
48
+
49
+ TASK_CATEGORIES = [
50
+ "Temporal Perception",
51
+ "Spatial Perception",
52
+ "Attribute Perception",
53
+ "Action Recognition",
54
+ "Object Recognition",
55
+ "OCR Problems",
56
+ "Counting Problem",
57
+ "Temporal Reasoning",
58
+ "Spatial Reasoning",
59
+ "Action Reasoning",
60
+ "Object Reasoning",
61
+ "Information Synopsis",
62
+ ]
63
+
64
+
65
+ def extract_characters_regex(s):
66
+ s = s.strip()
67
+ answer_prefixes = [
68
+ "The best answer is",
69
+ "The correct answer is",
70
+ "The answer is",
71
+ "The answer",
72
+ "The best option is"
73
+ "The correct option is",
74
+ "Best answer:"
75
+ "Best option:",
76
+ ]
77
+ for answer_prefix in answer_prefixes:
78
+ s = s.replace(answer_prefix, "")
79
+
80
+ if len(s.split()) > 10 and not re.search("[ABCD]", s):
81
+ return ""
82
+ matches = re.search(r'[ABCD]', s)
83
+ if matches is None:
84
+ return ""
85
+ return matches[0]
86
+
87
+
88
+ def eval_your_results(
89
+ your_results_path: str,
90
+ video_types: Optional[Union[List[str], str]] = None,
91
+ skip_missing: Optional[bool] = True,
92
+ return_categories_accuracy: Optional[bool] = True,
93
+ return_sub_categories_accuracy: Optional[bool] = False,
94
+ return_task_types_accuracy: Optional[bool] = False,
95
+ gt_answer_key: Optional[str] = "answer",
96
+ your_answer_key: Optional[str] = "response"
97
+
98
+ ):
99
+ """
100
+ Evaluate your results against the ground truth
101
+
102
+ Args:
103
+ - your_results_path (str): Path to your results file
104
+ - video_types (Optional[List[str], str]): List of video types to evaluate.
105
+ - skip_missing (Optional[bool]): If True, missing files will be skipped. If False, an error will be raised if there are missing files.
106
+ - return_categories_accuracy (Optional[bool]): If True, the accuracy for each video category will be returned.
107
+ - return_sub_categories_accuracy (Optional[bool]): If True, the accuracy for each video sub category will be returned.
108
+ - return_task_types_accuracy (Optional[bool]): If True, the accuracy for each task category will be returned.
109
+ - gt_answer_key (Optional[str]): Key to access the ground truth answer in the results file.
110
+ - your_answer_key (Optional[str]): Key to access your answer in the results file.
111
+ """
112
+
113
+ # Load your results
114
+ with open(your_results_path, 'r') as f:
115
+ your_results = json.load(f)
116
+
117
+ if isinstance(video_types, str):
118
+ video_types = video_types.split(",")
119
+
120
+ q_type_dict = {}
121
+ v_type_dict = {}
122
+ v_sub_type_dict = {}
123
+
124
+
125
+ for video_type in video_types:
126
+
127
+ # Filter your results based on video types
128
+ your_results_video_type = [item for item in your_results if item["duration"] == video_type]
129
+
130
+ # Task Categories
131
+ q_type_dict[video_type] = {}
132
+ for q_type in TASK_CATEGORIES:
133
+ q_type_dict[video_type][q_type] = {"correct": 0, "answered": 0}
134
+
135
+ # Video categories
136
+ v_type_dict[video_type] = {}
137
+ for v_type in CATEGORIES:
138
+ v_type_dict[video_type][v_type] = {"correct": 0, "answered": 0}
139
+
140
+ v_sub_type_dict[video_type] = {}
141
+ for v_sub_type in SUB_CATEGORIES:
142
+ v_sub_type_dict[video_type][v_sub_type] = {"correct": 0, "answered": 0}
143
+
144
+ if not skip_missing:
145
+ # Check if the number of files in your results and ground truth are the same
146
+ assert len(your_results_video_type) == 300, f"Number of files in {video_type} is not 300. Check if there are missing files."
147
+
148
+ for item in your_results_video_type:
149
+
150
+ if skip_missing and item["missing"]:
151
+ continue
152
+
153
+ # Get the video category, sub category and question category
154
+ video_category = item["domain"]
155
+ video_sub_category = item["sub_category"]
156
+
157
+ questions = item["questions"]
158
+
159
+ for question in questions:
160
+ q_type = question["task_type"]
161
+
162
+ # Get the ground truth and your response
163
+ gt_answer = question[gt_answer_key]
164
+ response = question[your_answer_key]
165
+
166
+ # Extract the answer from the response
167
+ extration = extract_characters_regex(response)
168
+
169
+ if extration != "":
170
+ q_type_dict[video_type][q_type]["answered"] += 1
171
+ q_type_dict[video_type][q_type]["correct"] += extration == gt_answer
172
+
173
+ v_type_dict[video_type][video_category]["answered"] += 1
174
+ v_type_dict[video_type][video_category]["correct"] += extration == gt_answer
175
+
176
+ v_sub_type_dict[video_type][video_sub_category]["answered"] += 1
177
+ v_sub_type_dict[video_type][video_sub_category]["correct"] += extration == gt_answer
178
+
179
+
180
+ # Print the results for each video type
181
+ for video_type in video_types:
182
+
183
+ print("=====================================")
184
+ print(f"Evaluation on video Type: {video_type}")
185
+ print("=====================================")
186
+ if return_categories_accuracy:
187
+ print("-------------------------------------")
188
+ print("Video Domains")
189
+ print("-------------------------------------")
190
+ for v_type in v_type_dict[video_type]:
191
+ print(f"{v_type}: {100 * v_type_dict[video_type][v_type]['correct'] / v_type_dict[video_type][v_type]['answered'] if v_type_dict[video_type][v_type]['answered'] > 0 else 0 : .1f}%")
192
+ if return_sub_categories_accuracy:
193
+ print("-------------------------------------")
194
+ print("Video Sub Categories")
195
+ print("-------------------------------------")
196
+ for v_sub_type in v_sub_type_dict[video_type]:
197
+ print(f"{v_sub_type}: {100 * v_sub_type_dict[video_type][v_sub_type]['correct'] / v_sub_type_dict[video_type][v_sub_type]['answered'] if v_sub_type_dict[video_type][v_sub_type]['answered'] > 0 else 0 : .1f}%")
198
+ if return_task_types_accuracy:
199
+ print("-------------------------------------")
200
+ print("Task Categories")
201
+ print("-------------------------------------")
202
+ for q_type in q_type_dict[video_type]:
203
+ print(f"{q_type}: {100 * q_type_dict[video_type][q_type]['correct'] / q_type_dict[video_type][q_type]['answered'] if q_type_dict[video_type][q_type]['answered'] > 0 else 0 : .1f}%")
204
+
205
+ print("-------------------------------------")
206
+ print("Overall Performance")
207
+ print("-------------------------------------")
208
+ total_correct = sum([q_type_dict[video_type][q_type]["correct"] for q_type in TASK_CATEGORIES])
209
+ total_answered = sum([q_type_dict[video_type][q_type]["answered"] for q_type in TASK_CATEGORIES])
210
+ print(f"Overall: {100 * total_correct / total_answered if total_answered > 0 else 0 : .1f}%")
211
+
212
+ print("\n")
213
+
214
+ # Print the results for the entire dataset
215
+ print("=====================================")
216
+ print("Evaluation on the entire dataset")
217
+ print("=====================================")
218
+
219
+ if return_categories_accuracy:
220
+ print("-------------------------------------")
221
+ print("Video Categories")
222
+ print("-------------------------------------")
223
+ for v_type in CATEGORIES:
224
+ total_correct = sum([v_type_dict[video_type][v_type]["correct"] for video_type in video_types])
225
+ total_answered = sum([v_type_dict[video_type][v_type]["answered"] for video_type in video_types])
226
+ print(f"{v_type}: {100 * total_correct / total_answered if total_answered > 0 else 0 : .1f}%")
227
+
228
+
229
+ if return_sub_categories_accuracy:
230
+ print("-------------------------------------")
231
+ print("Video Sub Categories")
232
+ print("-------------------------------------")
233
+
234
+ for v_sub_type in SUB_CATEGORIES:
235
+ total_correct = sum([v_sub_type_dict[video_type][v_sub_type]["correct"] for video_type in video_types])
236
+ total_answered = sum([v_sub_type_dict[video_type][v_sub_type]["answered"] for video_type in video_types])
237
+ print(f"{v_sub_type}: {100 * total_correct / total_answered if total_answered > 0 else 0 : .1f}%")
238
+
239
+
240
+ if return_task_types_accuracy:
241
+ print("-------------------------------------")
242
+ print("Task Categories")
243
+ print("-------------------------------------")
244
+ for q_type in TASK_CATEGORIES:
245
+
246
+ total_correct = sum([q_type_dict[video_type][q_type]["correct"] for video_type in video_types])
247
+ total_answered = sum([q_type_dict[video_type][q_type]["answered"] for video_type in video_types])
248
+ print(f"{q_type}: {100 * total_correct / total_answered if total_answered > 0 else 0 : .1f}%")
249
+
250
+ print("-------------------------------------")
251
+ print("Overall Performance")
252
+ print("-------------------------------------")
253
+ total_correct = sum([sum([q_type_dict[video_type][q_type]["correct"] for q_type in TASK_CATEGORIES]) for video_type in video_types])
254
+ total_answered = sum([sum([q_type_dict[video_type][q_type]["answered"] for q_type in TASK_CATEGORIES]) for video_type in video_types])
255
+ print(f"Overall: {100 * total_correct / total_answered if total_answered > 0 else 0 : .1f}%")
256
+
257
+
258
+
259
+ if __name__ == "__main__":
260
+ parser = argparse.ArgumentParser()
261
+ parser.add_argument("--results_file", type=str, required=True)
262
+ parser.add_argument("--video_duration_type", type=str, required=True)
263
+ parser.add_argument("--return_categories_accuracy", action="store_true")
264
+ parser.add_argument("--return_sub_categories_accuracy", action="store_true")
265
+ parser.add_argument("--return_task_types_accuracy", action="store_true")
266
+ parser.add_argument("--skip_missing", action="store_true")
267
+
268
+ args = parser.parse_args()
269
+
270
+ eval_your_results(
271
+ args.results_file,
272
+ video_types=args.video_duration_type,
273
+ skip_missing=args.skip_missing,
274
+ return_categories_accuracy=args.return_categories_accuracy,
275
+ return_sub_categories_accuracy=args.return_sub_categories_accuracy,
276
+ return_task_types_accuracy=args.return_task_types_accuracy,
277
+ )
videollama2/eval/eval_video_qa_gpt.py β†’ VideoLLaMA2/videollama2/eval/eval_video_oqa_activitynet.py RENAMED
File without changes
videollama2/eval/eval_benchmark_1_correctness.py β†’ VideoLLaMA2/videollama2/eval/eval_video_oqa_vcgpt_1_correctness.py RENAMED
File without changes
videollama2/eval/eval_benchmark_2_detailed_orientation.py β†’ VideoLLaMA2/videollama2/eval/eval_video_oqa_vcgpt_2_detailed_orientation.py RENAMED
File without changes
videollama2/eval/eval_benchmark_3_context.py β†’ VideoLLaMA2/videollama2/eval/eval_video_oqa_vcgpt_3_context.py RENAMED
File without changes
videollama2/eval/eval_benchmark_4_temporal.py β†’ VideoLLaMA2/videollama2/eval/eval_video_oqa_vcgpt_4_temporal.py RENAMED
File without changes