|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The ELEVATER benchmark""" |
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
from zipfile import ZipFile |
|
from io import BytesIO |
|
from PIL import Image |
|
|
|
_VERSION = "1.0.0" |
|
_BASE_URL = "https://cvinthewildeus.blob.core.windows.net/datasets/" |
|
_FEW_SHOTS_FILE_PATH="subidx/id_label/data_train_#shot/" |
|
|
|
_ELEVATER_CITATION = """\ |
|
@article{li2022elevater, |
|
title={ELEVATER: A Benchmark and Toolkit for Evaluating Language-Augmented Visual Models}, |
|
author={Li, Chunyuan and Liu, Haotian and Li, Liunian Harold and Zhang, Pengchuan and Aneja, Jyoti and Yang, Jianwei and Jin, Ping and Lee, Yong Jae and Hu, Houdong and Liu, Zicheng and Gao, Jianfeng}, |
|
journal={Neural Information Processing Systems}, |
|
year={2022} |
|
} |
|
Note that each ELEVATER dataset has its own citation. Please see the source to |
|
get the correct citation for each contained dataset. |
|
""" |
|
|
|
_CIFAR_10_CITATION="""\ |
|
@article{krizhevsky2009learning, |
|
title={Learning multiple layers of features from tiny images}, |
|
author={Krizhevsky, Alex and Hinton, Geoffrey and others}, |
|
year={2009}, |
|
publisher={Toronto, ON, Canada} |
|
}""" |
|
|
|
_VOC_2007_CLASSIFICATION_CITATION="""\ |
|
@misc{pascal-voc-2007, |
|
author = "Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.", |
|
title = "The {PASCAL} {V}isual {O}bject {C}lasses {C}hallenge 2007 {(VOC2007)} {R}esults", |
|
howpublished = "http://www.pascal-network.org/challenges/VOC/voc2007/workshop/index.html" |
|
}""" |
|
|
|
_GTSRB_CITATION="""\ |
|
@inproceedings{Houben-IJCNN-2013, |
|
author = {Sebastian Houben and Johannes Stallkamp and Jan Salmen and Marc Schlipsing and Christian Igel}, |
|
booktitle = {International Joint Conference on Neural Networks}, |
|
title = {Detection of Traffic Signs in Real-World Images: The {G}erman {T}raffic {S}ign {D}etection {B}enchmark}, |
|
number = {1288}, |
|
year = {2013}, |
|
}""" |
|
|
|
_COUNTRY211_CITATION="""\ |
|
@inproceedings{radford2021learning, |
|
title={Learning transferable visual models from natural language supervision}, |
|
author={Radford, Alec and Kim, Jong Wook and Hallacy, Chris and Ramesh, Aditya and Goh, Gabriel and Agarwal, Sandhini and Sastry, Girish and Askell, Amanda and Mishkin, Pamela and Clark, Jack and others}, |
|
booktitle={International Conference on Machine Learning}, |
|
pages={8748--8763}, |
|
year={2021}, |
|
organization={PMLR} |
|
}""" |
|
|
|
_RENDERED_SST2_CITATION="""\ |
|
@inproceedings{radford2021learning, |
|
title={Learning transferable visual models from natural language supervision}, |
|
author={Radford, Alec and Kim, Jong Wook and Hallacy, Chris and Ramesh, Aditya and Goh, Gabriel and Agarwal, Sandhini and Sastry, Girish and Askell, Amanda and Mishkin, Pamela and Clark, Jack and others}, |
|
booktitle={International Conference on Machine Learning}, |
|
pages={8748--8763}, |
|
year={2021}, |
|
organization={PMLR} |
|
}""" |
|
|
|
_KITTI_DISTANCE_CITATION="""\ |
|
@inproceedings{fritsch2013new, |
|
title={A new performance measure and evaluation benchmark for road detection algorithms}, |
|
author={Fritsch, Jannik and Kuehnl, Tobias and Geiger, Andreas}, |
|
booktitle={16th International IEEE Conference on Intelligent Transportation Systems (ITSC 2013)}, |
|
pages={1693--1700}, |
|
year={2013}, |
|
organization={IEEE} |
|
}""" |
|
|
|
_EOROSAT_CLIP_CITATION="""\ |
|
@article{helber2019eurosat, |
|
title={Eurosat: A novel dataset and deep learning benchmark for land use and land cover classification}, |
|
author={Helber, Patrick and Bischke, Benjamin and Dengel, Andreas and Borth, Damian}, |
|
journal={IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, |
|
volume={12}, |
|
number={7}, |
|
pages={2217--2226}, |
|
year={2019}, |
|
publisher={IEEE} |
|
}""" |
|
|
|
_RESISC45_CLIP_CITATION="""\ |
|
@article{cheng2017remote, |
|
title={Remote sensing image scene classification: Benchmark and state of the art}, |
|
author={Cheng, Gong and Han, Junwei and Lu, Xiaoqiang}, |
|
journal={Proceedings of the IEEE}, |
|
volume={105}, |
|
number={10}, |
|
pages={1865--1883}, |
|
year={2017}, |
|
publisher={IEEE} |
|
}""" |
|
|
|
_CALTECH_101_CITATION="""\ |
|
@inproceedings{fei2004learning, |
|
title={Learning generative visual models from few training examples: An incremental bayesian approach tested on 101 object categories}, |
|
author={Fei-Fei, Li and Fergus, Rob and Perona, Pietro}, |
|
booktitle={2004 conference on computer vision and pattern recognition workshop}, |
|
pages={178--178}, |
|
year={2004}, |
|
organization={IEEE} |
|
}""" |
|
|
|
_CIFAR_100_CITATION="""\ |
|
@article{krizhevsky2009learning, |
|
title={Learning multiple layers of features from tiny images}, |
|
author={Krizhevsky, Alex and Hinton, Geoffrey and others}, |
|
year={2009}, |
|
publisher={Toronto, ON, Canada} |
|
}""" |
|
|
|
_DTD_CITATION="""\ |
|
@inproceedings{cimpoi2014describing, |
|
title={Describing textures in the wild}, |
|
author={Cimpoi, Mircea and Maji, Subhransu and Kokkinos, Iasonas and Mohamed, Sammy and Vedaldi, Andrea}, |
|
booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, |
|
pages={3606--3613}, |
|
year={2014} |
|
}""" |
|
|
|
_FGVC_AIRCRAFT_2013B_VARIANTS102_CITATION="""\ |
|
@article{maji2013fine, |
|
title={Fine-grained visual classification of aircraft}, |
|
author={Maji, Subhransu and Rahtu, Esa and Kannala, Juho and Blaschko, Matthew and Vedaldi, Andrea}, |
|
journal={arXiv preprint arXiv:1306.5151}, |
|
year={2013} |
|
}""" |
|
|
|
_FOOD_101_CITATION="""\ |
|
@inproceedings{bossard2014food, |
|
title={Food-101--mining discriminative components with random forests}, |
|
author={Bossard, Lukas and Guillaumin, Matthieu and Gool, Luc Van}, |
|
booktitle={European conference on computer vision}, |
|
pages={446--461}, |
|
year={2014}, |
|
organization={Springer} |
|
}""" |
|
|
|
_MNIST_CITATION="""\ |
|
@article{deng2012mnist, |
|
title={The mnist database of handwritten digit images for machine learning research [best of the web]}, |
|
author={Deng, Li}, |
|
journal={IEEE signal processing magazine}, |
|
volume={29}, |
|
number={6}, |
|
pages={141--142}, |
|
year={2012}, |
|
publisher={IEEE} |
|
}""" |
|
|
|
_OXFORD_FLOWER_102_CITATION="""\ |
|
@inproceedings{nilsback2008automated, |
|
title={Automated flower classification over a large number of classes}, |
|
author={Nilsback, Maria-Elena and Zisserman, Andrew}, |
|
booktitle={2008 Sixth Indian Conference on Computer Vision, Graphics \& Image Processing}, |
|
pages={722--729}, |
|
year={2008}, |
|
organization={IEEE} |
|
}""" |
|
|
|
_OXFORD_IIIT_PETS_CITATION="""\ |
|
@inproceedings{parkhi2012cats, |
|
title={Cats and dogs}, |
|
author={Parkhi, Omkar M and Vedaldi, Andrea and Zisserman, Andrew and Jawahar, CV}, |
|
booktitle={2012 IEEE conference on computer vision and pattern recognition}, |
|
pages={3498--3505}, |
|
year={2012}, |
|
organization={IEEE} |
|
}""" |
|
|
|
_PATCH_CAMELYON_CITATION="""\ |
|
@inproceedings{veeling2018rotation, |
|
title={Rotation equivariant CNNs for digital pathology}, |
|
author={Veeling, Bastiaan S and Linmans, Jasper and Winkens, Jim and Cohen, Taco and Welling, Max}, |
|
booktitle={International Conference on Medical image computing and computer-assisted intervention}, |
|
pages={210--218}, |
|
year={2018}, |
|
organization={Springer} |
|
}""" |
|
|
|
_STANFORD_CARS_CITATION="""\ |
|
@inproceedings{krause20133d, |
|
title={3d object representations for fine-grained categorization}, |
|
author={Krause, Jonathan and Stark, Michael and Deng, Jia and Fei-Fei, Li}, |
|
booktitle={Proceedings of the IEEE international conference on computer vision workshops}, |
|
pages={554--561}, |
|
year={2013} |
|
}""" |
|
|
|
_FER_2013_CITATION="""\ |
|
@misc{challenges-in-representation-learning-facial-expression-recognition-challenge, |
|
author = {Dumitru, Ian Goodfellow, Yoshua Bengio}, |
|
title = {Challenges in Representation Learning: Facial Expression Recognition Challenge}, |
|
publisher = {Kaggle}, |
|
year = {2013}, |
|
url = {https://kaggle.com/competitions/challenges-in-representation-learning-facial-expression-recognition-challenge} |
|
}""" |
|
|
|
_HATEFUL_MEMES_CITATION="""\ |
|
@article{kiela2020hateful, |
|
title={The hateful memes challenge: Detecting hate speech in multimodal memes}, |
|
author={Kiela, Douwe and Firooz, Hamed and Mohan, Aravind and Goswami, Vedanuj and Singh, Amanpreet and Ringshia, Pratik and Testuggine, Davide}, |
|
journal={Advances in Neural Information Processing Systems}, |
|
volume={33}, |
|
pages={2611--2624}, |
|
year={2020} |
|
}""" |
|
|
|
class ELEVATERConfig(datasets.BuilderConfig): |
|
|
|
"""BuilderConfig for ELEVATER.""" |
|
def __init__(self, name, description, contact, version, type_, format_, |
|
root_folder, labelmap, num_classes, train, val, test, few_shots_file_path, |
|
citation, url, num_shots, random_seed, **kwargs): |
|
"""BuilderConfig for ELEVATER. |
|
Args: |
|
features: `list[string]`, list of the features that will appear in the |
|
feature dict. Should not include "label". |
|
data_url: `string`, url to download the zip file from. |
|
citation: `string`, citation for the data set. |
|
url: `string`, url for information about the data set. |
|
label_classes: `list[string]`, the list of classes for the label if the |
|
label is present as a string. Non-string labels will be cast to either |
|
'False' or 'True'. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(ELEVATERConfig, self).__init__(**kwargs) |
|
self.name = name |
|
self.description = description |
|
self.contact = contact |
|
self.version = version |
|
self.type = type_ |
|
self.format = format_ |
|
self.root_folder = root_folder |
|
self.labelmap = labelmap |
|
self.num_classes = num_classes |
|
self.train = train |
|
self.val = val |
|
self.test = test |
|
self.few_shots_file_path = few_shots_file_path |
|
self.citation = citation |
|
self.url = url |
|
self.num_shots = num_shots |
|
self.random_seed = random_seed |
|
|
|
|
|
class ELEVATER(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = [ |
|
ELEVATERConfig( |
|
name="cifar-10", |
|
description="The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images.", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/cifar_10_20211007", |
|
labelmap="labels.txt", |
|
num_classes=10, |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 50000 |
|
}, |
|
val=None, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["val.zip"], |
|
"num_images": 10000 |
|
}, |
|
citation=_CIFAR_10_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="voc-2007-classification", |
|
description="Voc2007 classification dataset.", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multilabel", |
|
format_=None, |
|
root_folder="classification/voc2007_20211007", |
|
train={ |
|
"index_path": "train_ic.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 2501 |
|
}, |
|
val={ |
|
"index_path": "val_ic.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 2510 |
|
}, |
|
test={ |
|
"index_path": "test_ic.txt", |
|
"files_for_local_usage": ["test.zip"], |
|
"num_images": 4952 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=20, |
|
citation=_VOC_2007_CLASSIFICATION_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="gtsrb", |
|
description="The German Traffic Sign Recognition Benchmark (GTSRB) is a multi-class image classification benchmark in the domain of advanced driver assistance systems and autonomous driving. It was first published at IJCNN 2011.", |
|
contact=None, |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/gtsrb_20210923", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 26640 |
|
}, |
|
val={ |
|
"index_path": "val.txt", |
|
"files_for_local_usage": ["val.zip"], |
|
"num_images": 12569 |
|
}, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["final_test.zip"], |
|
"num_images": 12630 |
|
}, |
|
labelmap="labelmap.txt", |
|
num_classes=43, |
|
citation=_GTSRB_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="country211", |
|
description="Country211 is an internal OpenAI dataset designed to assess the geolocation capability of visual representations. It filters the YFCC100m dataset (Thomee et al., 2016) to find 211 countries (defined as having an ISO-3166 country code) that have at least 300 photos with GPS coordinates. OpenAI built a balanced dataset with 211 categories, by sampling 200 photos for training and 100 photos for testing, for each country.", |
|
contact=None, |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/country211_20210924", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 31650 |
|
}, |
|
val={ |
|
"index_path": "valid.txt", |
|
"files_for_local_usage": ["valid.zip"], |
|
"num_images": 10550 |
|
}, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["test.zip"], |
|
"num_images": 21100 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=211, |
|
citation=_COUNTRY211_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="rendered-sst2", |
|
description="Dataset is from CLIP: The Rendered SST2 dataset is designed to measure the optical character recognition capability of visual representations. To do so, we used the sentences from the Stanford Sentiment Treebank dataset (Socher et al., 2013) and rendered them into images, with black texts on a white background, in a 448×448 resolution.", |
|
contact=None, |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/rendered_sst2_20210924", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 6920 |
|
}, |
|
val={ |
|
"index_path": "valid.txt", |
|
"files_for_local_usage": ["valid.zip"], |
|
"num_images": 827 |
|
}, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["test.zip"], |
|
"num_images": 1821 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=2, |
|
citation=_RENDERED_SST2_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="kitti-distance", |
|
description="The kitti-distance dataset was taken from the VTAB benchmark, and the task was to predict how distant a vehicle is in the photo. More details: https://github.com/openai/CLIP/issues/86", |
|
contact=None, |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_="coco", |
|
root_folder="classification/kitti_distance_20210923", |
|
train={ |
|
"index_path": "train_meta.json", |
|
"files_for_local_usage": ["train_images.zip"], |
|
"num_images": 6347 |
|
}, |
|
val={ |
|
"index_path": "validation_meta.json", |
|
"files_for_local_usage": ["validation_images.zip"], |
|
"num_images": 423 |
|
}, |
|
test={ |
|
"index_path": "test_meta.json", |
|
"files_for_local_usage": ["test_images.zip"], |
|
"num_images": 711 |
|
}, |
|
labelmap=None, |
|
num_classes=4, |
|
citation=_KITTI_DISTANCE_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="eurosat_clip", |
|
description="Dataset sampled by CLIP from Eurosat (EuroSAT dataset is based on Sentinel-2 satellite images covering 13 spectral bands and consisting of 10 classes with 27000 labeled and geo-referenced samples.), see: https://github.com/openai/CLIP/issues/45", |
|
contact=None, |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/eurosat_clip_20210930", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["2750.zip"], |
|
"num_images": 5000 |
|
}, |
|
val={ |
|
"index_path": "val.txt", |
|
"files_for_local_usage": ["2750.zip"], |
|
"num_images": 5000 |
|
}, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["2750.zip"], |
|
"num_images": 5000 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=10, |
|
citation=_EOROSAT_CLIP_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="resisc45_clip", |
|
description="Dataset sampled by CLIP, see: https://github.com/openai/CLIP/issues/45. RESISC45 dataset is a publicly available benchmark for Remote Sensing Image Scene Classification", |
|
contact=None, |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_="coco", |
|
root_folder="classification/resisc45_clip_20210924", |
|
train={ |
|
"index_path": "train.json", |
|
"files_for_local_usage": ["images.zip"], |
|
"num_images": 3150 |
|
}, |
|
val={ |
|
"index_path": "val.json", |
|
"files_for_local_usage": ["images.zip"], |
|
"num_images": 3150 |
|
}, |
|
test={ |
|
"index_path": "test.json", |
|
"files_for_local_usage": ["images.zip"], |
|
"num_images": 25200 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=45, |
|
citation=_RESISC45_CLIP_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="caltech-101", |
|
description="Pictures of objects belonging to 101 categories. About 40 to 800 images per category. Most categories have about 50 images. Collected in September 2003 by Fei-Fei Li, Marco Andreetto, and Marc 'Aurelio Ranzato. The size of each image is roughly 300 x 200 pixels. ", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/caltech_101_20211007", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 3060 |
|
}, |
|
val=None, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["test.zip"], |
|
"num_images": 6084 |
|
}, |
|
labelmap=None, |
|
num_classes=45, |
|
citation=_CALTECH_101_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="cifar-100", |
|
description="This dataset is just like the CIFAR-10, except it has 100 classes containing 600 images each. There are 500 training images and 100 testing images per class. The 100 classes in the CIFAR-100 are grouped into 20 superclasses. Each image comes with a 'fine' label (the class to which it belongs) and a 'coarse' label (the superclass to which it belongs).", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/cifar100_20200721", |
|
train={ |
|
"index_path": "train_images.txt", |
|
"files_for_local_usage": ["train_images.zip"], |
|
"num_images": 50000 |
|
}, |
|
val=None, |
|
test={ |
|
"index_path": "test_images.txt", |
|
"files_for_local_usage": ["test_images.zip"], |
|
"num_images": 10000 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=100, |
|
citation=_CIFAR_100_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="dtd", |
|
description="The Describable Textures Dataset (DTD) is an evolving collection of textural images in the wild, annotated with a series of human-centric attributes, inspired by the perceptual properties of textures. This data is made available to the computer vision community for research purposes.", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/dtd_20211007", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 1880 |
|
}, |
|
val={ |
|
"index_path": "val.txt", |
|
"files_for_local_usage": ["val.zip"], |
|
"num_images": 1880 |
|
}, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["test.zip"], |
|
"num_images": 1880 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=47, |
|
citation=_DTD_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="fgvc-aircraft-2013b-variants102", |
|
description="Fine-Grained Visual Classification of Aircraft (FGVC-Aircraft) is a benchmark dataset for the fine grained visual categorization of aircraft.", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/fgvc_aircraft_2013b_variants102_20211007", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 3334 |
|
}, |
|
val={ |
|
"index_path": "val.txt", |
|
"files_for_local_usage": ["val.zip"], |
|
"num_images": 3333 |
|
}, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["test.zip"], |
|
"num_images": 3333 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=100, |
|
citation=_FGVC_AIRCRAFT_2013B_VARIANTS102_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="food-101", |
|
description="This dataset consists of 101 food categories, with 101000 images. For each class, 250 manually reviewed test images are provided as well as 750 training images. On purpose, the training images were not cleaned, and thus still contain some amount of noise. This comes mostly in the form of intense colors and sometimes wrong labels. All images were rescaled to have a maximum side length of 512 pixels.", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/food_101_20211007", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 75750 |
|
}, |
|
val=None, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["val.zip"], |
|
"num_images": 25250 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=101, |
|
citation=_FOOD_101_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="mnist", |
|
description="The MNIST database of handwritten digits, available from this page, has a training set of 60,000 examples, and a test set of 10,000 examples. It is a subset of a larger set available from NIST. The digits have been size-normalized and centered in a fixed-size image.", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/mnist_20211008", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 60000 |
|
}, |
|
val=None, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["val.zip"], |
|
"num_images": 10000 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=10, |
|
citation=_MNIST_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="oxford-flower-102", |
|
description="A dataset consisting of 102 flower categories. The flowers chosen to be flower commonly occuring in the United Kingdom. Each class consists of between 40 and 258 images.", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/oxford_flower_102_20211007", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 1020 |
|
}, |
|
val={ |
|
"index_path": "val.txt", |
|
"files_for_local_usage": ["val.zip"], |
|
"num_images": 1020 |
|
}, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["test.zip"], |
|
"num_images": 6149 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=102, |
|
citation=_OXFORD_FLOWER_102_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="oxford-iiit-pets", |
|
description="A 37-category pet dataset with roughly 200 images for each class. The images have a large variations in scale, pose and lighting.", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/oxford_iiit_pets_20211007", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 3680 |
|
}, |
|
val=None, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["val.zip"], |
|
"num_images": 3669 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=37, |
|
citation=_OXFORD_IIIT_PETS_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="patch-camelyon", |
|
description="The PatchCamelyon benchmark is a new and challenging image classification dataset. It consists of 327.680 color images (96 x 96px) extracted from histopathologic scans of lymph node sections. Each image is annoted with a binary label indicating presence of metastatic tissue. PCam provides a new benchmark for machine learning models: bigger than CIFAR10, smaller than imagenet, trainable on a single GPU.", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/patch_camelyon_20211007", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 262144 |
|
}, |
|
val={ |
|
"index_path": "val.txt", |
|
"files_for_local_usage": ["validation.zip"], |
|
"num_images": 32768 |
|
}, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["test.zip"], |
|
"num_images": 32768 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=2, |
|
citation=_PATCH_CAMELYON_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="stanford-cars", |
|
description="The Cars dataset contains 16,185 images of 196 classes of cars. The data is split into 8,144 training images and 8,041 testing images, where each class has been split roughly in a 50-50 split. Classes are typically at the level of Make, Model, Year, e.g. 2012 Tesla Model S or 2012 BMW M3 coupe.", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/stanford_cars_20211007", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 8144 |
|
}, |
|
val=None, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["val.zip"], |
|
"num_images": 8041 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=196, |
|
citation=_STANFORD_CARS_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="fer-2013", |
|
description="The data consists of 48x48 pixel grayscale images of faces. The task is to categorize each face based on the emotion shown in the facial expression into one of seven categories (0=Angry, 1=Disgust, 2=Fear, 3=Happy, 4=Sad, 5=Surprise, 6=Neutral).", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_=None, |
|
root_folder="classification/fer_2013_20211008", |
|
train={ |
|
"index_path": "train.txt", |
|
"files_for_local_usage": ["train.zip"], |
|
"num_images": 28709 |
|
}, |
|
val={ |
|
"index_path": "val.txt", |
|
"files_for_local_usage": ["val.zip"], |
|
"num_images": 3589 |
|
}, |
|
test={ |
|
"index_path": "test.txt", |
|
"files_for_local_usage": ["test.zip"], |
|
"num_images": 3589 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=7, |
|
citation=_FER_2013_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
ELEVATERConfig( |
|
name="hateful-memes", |
|
description="At the massive scale of the internet, the task of detecting multimodal hate is both extremely important and particularly difficult. Relying on just text or just images to determine whether a meme is hateful is insufficient. By using certain types of images, text, or combinations, a meme can become a multimodal type of hate speech.", |
|
contact="pinjin", |
|
version=_VERSION, |
|
type_="classification_multiclass", |
|
format_="coco", |
|
root_folder="classification/hateful_memes_20211014", |
|
train={ |
|
"index_path": "train_meta.json", |
|
"files_for_local_usage": ["img.zip"], |
|
"num_images": 8500 |
|
}, |
|
val=None, |
|
test={ |
|
"index_path": "test_meta.json", |
|
"files_for_local_usage": ["img.zip"], |
|
"num_images": 500 |
|
}, |
|
labelmap="labels.txt", |
|
num_classes=2, |
|
citation=_HATEFUL_MEMES_CITATION, |
|
url=_BASE_URL, |
|
few_shots_file_path=_FEW_SHOTS_FILE_PATH, |
|
num_shots=-1, |
|
random_seed=-1, |
|
), |
|
] |
|
|
|
def _info(self): |
|
if self.config.name == "voc-2007-classification": |
|
features = datasets.Features( |
|
{ |
|
"image_file_path": datasets.Value("string"), |
|
"image": datasets.Image(), |
|
"labels": [datasets.Value("int32")] |
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"image_file_path": datasets.Value("string"), |
|
"image": datasets.Image(), |
|
"labels": datasets.Value("int32") |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=self.config.description, |
|
features=features, |
|
citation=self.config.citation + '\n' + _ELEVATER_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
_URL = self.config.url + self.config.root_folder |
|
urls_to_download = { |
|
"train": { |
|
"images": os.path.join(_URL, self.config.train['files_for_local_usage'][0]), |
|
"index": os.path.join(_URL, self.config.train['index_path']), |
|
}, |
|
"test": { |
|
"images": os.path.join(_URL, self.config.test['files_for_local_usage'][0]), |
|
"index": os.path.join(_URL, self.config.test['index_path']), |
|
} |
|
} |
|
if self.config.num_shots in [5, 20, 50]: |
|
assert self.config.random_seed in [0, 1, 2] |
|
few_shots_file_path_temp = _FEW_SHOTS_FILE_PATH.replace('#', str(self.config.num_shots)) |
|
file_name = 'shot' + str(self.config.num_shots) + '_seed' + str(self.config.random_seed) + '.json' |
|
few_shot_path = os.path.join(_BASE_URL, few_shots_file_path_temp, self.config.name, file_name) |
|
urls_to_download["train"]["few_shot"] = few_shot_path |
|
else: |
|
pass |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
try: |
|
few_shot_train_file = downloaded_files["train"]["few_shot"] |
|
except: |
|
few_shot_train_file = None |
|
|
|
SplitGenerator_list = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"images": downloaded_files["train"]["images"], |
|
"index": downloaded_files["train"]["index"], |
|
"few_shot": few_shot_train_file, |
|
"split": datasets.Split.TRAIN, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"images": downloaded_files["test"]["images"], |
|
"index": downloaded_files["test"]["index"], |
|
"few_shot": None, |
|
"split": datasets.Split.TEST, |
|
}, |
|
)] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return SplitGenerator_list |
|
|
|
def _generate_examples(self, images, index, few_shot, split): |
|
|
|
if few_shot is not None: |
|
few_shot_images = [] |
|
with open(few_shot, encoding="utf-8") as f: |
|
data = json.load(f) |
|
for item in data: |
|
few_shot_images.append(item['id'].split('@')[-1]) |
|
|
|
if self.config.name in ["kitti-distance", "resisc45_clip", "hateful-memes"]: |
|
with open(index, encoding="utf-8") as f: |
|
data = json.load(f) |
|
for i in range(len(data['images'])): |
|
label = data['annotations'][i]['category_id'] |
|
path_temp = data['images'][i]['file_name'].split('@')[1] |
|
path = os.path.join(images, path_temp) |
|
|
|
if few_shot is not None: |
|
if path_temp in few_shot_images: |
|
yield i, { |
|
"image_file_path": path, |
|
"image": path, |
|
"labels": label, |
|
} |
|
else: |
|
yield i, { |
|
"image_file_path": path, |
|
"image": path, |
|
"labels": label, |
|
} |
|
|
|
else: |
|
with open(index, "r") as f: |
|
lines = f.readlines() |
|
for i, line in enumerate(lines): |
|
line_split = line[:-1].split(" ") |
|
|
|
if len(line_split) > 3: |
|
image_path_temp = " ".join(line_split[:-1]) |
|
path_temp = image_path_temp.split('@')[1] |
|
else: |
|
path_temp = line_split[0].split('@')[1] |
|
|
|
path = os.path.join(images, path_temp) |
|
|
|
if self.config.type == "classification_multilabel": |
|
label = [int(x) for x in line_split[-1].split(',')] |
|
else: |
|
try: |
|
label = int(line_split[1]) |
|
except: |
|
if self.config.name == "eurosat_clip" and split == 'test': |
|
label = 9 |
|
if few_shot is not None: |
|
if path_temp in few_shot_images: |
|
yield i, { |
|
"image_file_path": path, |
|
"image": path, |
|
"labels": label, |
|
} |
|
else: |
|
pass |
|
else: |
|
yield i, { |
|
"image_file_path": path, |
|
"image": path, |
|
"labels": label, |
|
} |
|
|