|
--- |
|
dataset_info: |
|
- config_name: ai2d |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 437329975.5701048 |
|
num_examples: 2445 |
|
download_size: 439151768 |
|
dataset_size: 437329975.5701048 |
|
- config_name: aokvqa |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 871997710.0 |
|
num_examples: 16539 |
|
download_size: 893265070 |
|
dataset_size: 871997710.0 |
|
- config_name: chartqa |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 784762327.9534782 |
|
num_examples: 18266 |
|
download_size: 803229473 |
|
dataset_size: 784762327.9534782 |
|
- config_name: finqa |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 135268568.0 |
|
num_examples: 5276 |
|
download_size: 123698250 |
|
dataset_size: 135268568.0 |
|
- config_name: geomverse |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 951640204.0 |
|
num_examples: 9303 |
|
download_size: 323746516 |
|
dataset_size: 951640204.0 |
|
- config_name: hitab |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 161130580.0 |
|
num_examples: 2500 |
|
download_size: 158295807 |
|
dataset_size: 161130580.0 |
|
- config_name: iam |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 1129180352.0 |
|
num_examples: 5663 |
|
download_size: 1128935602 |
|
dataset_size: 1129180352.0 |
|
- config_name: iconqa |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 264523321.3774117 |
|
num_examples: 27308 |
|
download_size: 326653170 |
|
dataset_size: 264523321.3774117 |
|
- config_name: infographic_vqa |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 291677986.0 |
|
num_examples: 2118 |
|
download_size: 292351760 |
|
dataset_size: 291677986.0 |
|
- config_name: intergps |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 25528774.867153686 |
|
num_examples: 1308 |
|
download_size: 25388245 |
|
dataset_size: 25528774.867153686 |
|
- config_name: multihiertt |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 1356766489.046 |
|
num_examples: 7619 |
|
download_size: 1360814135 |
|
dataset_size: 1356766489.046 |
|
- config_name: robut_sqa |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 679135952.0 |
|
num_examples: 8514 |
|
download_size: 678722272 |
|
dataset_size: 679135952.0 |
|
- config_name: screen2words |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 1670723783.0 |
|
num_examples: 15730 |
|
download_size: 1346254268 |
|
dataset_size: 1670723783.0 |
|
- config_name: spot_the_diff |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 1643123792.0 |
|
num_examples: 8566 |
|
download_size: 1526740548 |
|
dataset_size: 1643123792.0 |
|
- config_name: st_vqa |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 696265340.0 |
|
num_examples: 17247 |
|
download_size: 720462890 |
|
dataset_size: 696265340.0 |
|
- config_name: tabmwp |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 265348817.73984778 |
|
num_examples: 22723 |
|
download_size: 306648229 |
|
dataset_size: 265348817.73984778 |
|
- config_name: tat_qa |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 73213942.0 |
|
num_examples: 2199 |
|
download_size: 70862028 |
|
dataset_size: 73213942.0 |
|
- config_name: tqa |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 380346870.806369 |
|
num_examples: 1493 |
|
download_size: 378238311 |
|
dataset_size: 380346870.806369 |
|
- config_name: vistext |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 541250281.0 |
|
num_examples: 9969 |
|
download_size: 386023352 |
|
dataset_size: 541250281.0 |
|
- config_name: visualmrc |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 2941051627.2639995 |
|
num_examples: 3027 |
|
download_size: 2912911810 |
|
dataset_size: 2941051627.2639995 |
|
- config_name: vqarad |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 16561537.0 |
|
num_examples: 313 |
|
download_size: 16226241 |
|
dataset_size: 16561537.0 |
|
- config_name: vsr |
|
features: |
|
- name: images |
|
sequence: image |
|
- name: texts |
|
list: |
|
- name: user |
|
dtype: string |
|
- name: assistant |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 107489763.0 |
|
num_examples: 2157 |
|
download_size: 107576214 |
|
dataset_size: 107489763.0 |
|
configs: |
|
- config_name: ai2d |
|
data_files: |
|
- split: train |
|
path: ai2d/train-* |
|
- config_name: aokvqa |
|
data_files: |
|
- split: train |
|
path: aokvqa/train-* |
|
- config_name: chartqa |
|
data_files: |
|
- split: train |
|
path: chartqa/train-* |
|
- config_name: finqa |
|
data_files: |
|
- split: train |
|
path: finqa/train-* |
|
- config_name: geomverse |
|
data_files: |
|
- split: train |
|
path: geomverse/train-* |
|
- config_name: hitab |
|
data_files: |
|
- split: train |
|
path: hitab/train-* |
|
- config_name: iam |
|
data_files: |
|
- split: train |
|
path: iam/train-* |
|
- config_name: iconqa |
|
data_files: |
|
- split: train |
|
path: iconqa/train-* |
|
- config_name: infographic_vqa |
|
data_files: |
|
- split: train |
|
path: infographic_vqa/train-* |
|
- config_name: intergps |
|
data_files: |
|
- split: train |
|
path: intergps/train-* |
|
- config_name: multihiertt |
|
data_files: |
|
- split: train |
|
path: multihiertt/train-* |
|
- config_name: robut_sqa |
|
data_files: |
|
- split: train |
|
path: robut_sqa/train-* |
|
- config_name: screen2words |
|
data_files: |
|
- split: train |
|
path: screen2words/train-* |
|
- config_name: spot_the_diff |
|
data_files: |
|
- split: train |
|
path: spot_the_diff/train-* |
|
- config_name: st_vqa |
|
data_files: |
|
- split: train |
|
path: st_vqa/train-* |
|
- config_name: tabmwp |
|
data_files: |
|
- split: train |
|
path: tabmwp/train-* |
|
- config_name: tat_qa |
|
data_files: |
|
- split: train |
|
path: tat_qa/train-* |
|
- config_name: tqa |
|
data_files: |
|
- split: train |
|
path: tqa/train-* |
|
- config_name: vistext |
|
data_files: |
|
- split: train |
|
path: vistext/train-* |
|
- config_name: visualmrc |
|
data_files: |
|
- split: train |
|
path: visualmrc/train-* |
|
- config_name: vqarad |
|
data_files: |
|
- split: train |
|
path: vqarad/train-* |
|
- config_name: vsr |
|
data_files: |
|
- split: train |
|
path: vsr/train-* |
|
--- |
|
# Dataset Card for "the_cauldron" |
|
|
|
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |