File size: 4,510 Bytes
2f23893 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
import json
import datasets
from pathlib import Path
_HOMEPAGE = 'https://cocodataset.org/'
_LICENSE = 'Creative Commons Attribution 4.0 License'
_DESCRIPTION = 'COCO is a large-scale object detection, segmentation, and captioning dataset.'
_CITATION = '''\
@article{cocodataset,
author = {Tsung{-}Yi Lin and Michael Maire and Serge J. Belongie and Lubomir D. Bourdev and Ross B. Girshick and James Hays and Pietro Perona and Deva Ramanan and Piotr Doll{'{a} }r and C. Lawrence Zitnick},
title = {Microsoft {COCO:} Common Objects in Context},
journal = {CoRR},
volume = {abs/1405.0312},
year = {2014},
url = {http://arxiv.org/abs/1405.0312},
archivePrefix = {arXiv},
eprint = {1405.0312},
timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/LinMBHPRDZ14},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
'''
class COCOKeypointsConfig(datasets.BuilderConfig):
'''Builder Config for coco2017'''
def __init__(
self, description, homepage,
annotation_urls, **kwargs
):
super(COCOKeypointsConfig, self).__init__(
version=datasets.Version('1.0.0', ''),
**kwargs
)
self.description = description
self.homepage = homepage
url = 'http://images.cocodataset.org/zips/'
self.train_image_url = url + 'train2017.zip'
self.val_image_url = url + 'val2017.zip'
self.train_annotation_urls = annotation_urls['train']
self.val_annotation_urls = annotation_urls['validation']
class COCOKeypoints(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
COCOKeypointsConfig(
description=_DESCRIPTION,
homepage=_HOMEPAGE,
annotation_urls={
'train': 'data/keypoints_train.zip',
'validation': 'data/keypoints_validation.zip'
},
)
]
def _info(self):
features = datasets.Features({
'image': datasets.Image(mode='RGB', decode=True, id=None),
'bboxes': datasets.Sequence(
feature=datasets.Sequence(
feature=datasets.Value(dtype='float32', id=None),
length=4, id=None
), length=-1, id=None
),
'keypoints': datasets.Sequence(
feature=datasets.Sequence(
feature=datasets.Sequence(
feature=datasets.Value(dtype='int32', id=None),
), length=17, id=None
), length=-1, id=None
)
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
train_image_path = dl_manager.download_and_extract(
self.config.train_image_url
)
validation_image_path = dl_manager.download_and_extract(
self.config.val_image_url
)
train_annotation_paths = dl_manager.download_and_extract(
self.config.train_annotation_urls
)
val_annotation_paths = dl_manager.download_and_extract(
self.config.val_annotation_urls
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
'image_path': f'{train_image_path}/train2017',
'annotation_path': f'{train_annotation_paths}/keypoints_train.jsonl'
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
'image_path': f'{validation_image_path}/val2017',
'annotation_path': f'{val_annotation_paths}/keypoints_validation.jsonl'
}
)
]
def _generate_examples(self, image_path, annotation_path):
idx = 0
image_path = Path(image_path)
with open(annotation_path, 'r', encoding='utf-8') as f:
for line in f:
obj = json.loads(line.strip())
example = {
'image': str(image_path / obj['image']),
'bboxes': obj['bboxes'],
'keypoints': obj['keypoints']
}
yield idx, example
idx += 1
|