khagn commited on
Commit
1f987aa
1 Parent(s): b944585

Upload VALERIE22.py

Browse files
Files changed (1) hide show
  1. VALERIE22.py +231 -0
VALERIE22.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """VALERIE22 dataset"""
16
+
17
+ import os
18
+ import json
19
+ import glob
20
+
21
+ import datasets
22
+
23
+
24
+ _HOMEPAGE = "https://huggingface.co/datasets/Intel/VALERIE22"
25
+
26
+ _LICENSE = "Creative Commons — CC0 1.0 Universal"
27
+
28
+ _CITATION = """\
29
+ tba
30
+ """
31
+
32
+ _DESCRIPTION = """\
33
+ The VALERIE22 dataset was generated with the VALERIE procedural tools pipeline providing a photorealistic sensor simulation rendered from automatically synthesized scenes. The dataset provides a uniquely rich set of metadata, allowing extraction of specific scene and semantic features (like pixel-accurate occlusion rates, positions in the scene and distance + angle to the camera). This enables a multitude of possible tests on the data and we hope to stimulate research on understanding performance of DNNs.
34
+ """
35
+
36
+ _REPO = "https://huggingface.co/datasets/Intel/VALERIE22/resolve/main"
37
+
38
+ _SEQUENCES = {
39
+ "train": ["intel_results_sequence_0050.zip", "intel_results_sequence_0052.zip", "intel_results_sequence_0057.zip", "intel_results_sequence_0058.zip", "intel_results_sequence_0059.zip", "intel_results_sequence_0060.zip", "intel_results_sequence_0062_part1.zip", "intel_results_sequence_0062_part2.zip"],
40
+ "validation":["intel_results_sequence_0062_part1.zip", "intel_results_sequence_0062_part2.zip"],
41
+ "test":["intel_results_sequence_0062_part1.zip", "intel_results_sequence_0062_part2.zip"]
42
+ }
43
+
44
+ _URLS = {
45
+ "train": [f"{_REPO}/data/{sequence}" for sequence in _SEQUENCES["train"]],
46
+ "validation": [f"{_REPO}/data/{sequence}" for sequence in _SEQUENCES["validation"]],
47
+ "test": [f"{_REPO}/data/{sequence}" for sequence in _SEQUENCES["test"]]
48
+ }
49
+
50
+ class VALERIE22(datasets.GeneratorBasedBuilder):
51
+ """VALERIE22 dataset."""
52
+
53
+ VERSION = datasets.Version("1.0.0")
54
+
55
+ def _info(self):
56
+ return datasets.DatasetInfo(
57
+ description=_DESCRIPTION,
58
+ features=datasets.Features(
59
+ {
60
+ "image": datasets.Image(),
61
+ "image_distorted": datasets.Image(),
62
+ "persons_png": datasets.Sequence(
63
+ {
64
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
65
+ "bbox_vis": datasets.Sequence(datasets.Value("float32"), length=4),
66
+ "occlusion": datasets.Value("float32"),
67
+ "distance": datasets.Value("float32"),
68
+ "v_x": datasets.Value("float32"),
69
+ "v_y": datasets.Value("float32"),
70
+ "truncated": datasets.Value("bool"),
71
+ "total_pixels_object": datasets.Value("float32"),
72
+ "total_visible_pixels_object": datasets.Value("float32"),
73
+ "contrast_rgb_full": datasets.Value("float32"),
74
+ "contrast_edge": datasets.Value("float32"),
75
+ "contrast_rgb": datasets.Value("float32"),
76
+ "luminance": datasets.Value("float32"),
77
+ "perceived_lightness": datasets.Value("float32"),
78
+ "3dbbox": datasets.Sequence(datasets.Value("float32"), length=6) # 3center, 3 size
79
+ }
80
+ ),
81
+ "persons_png_distorted": datasets.Sequence(
82
+ {
83
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
84
+ "bbox_vis": datasets.Sequence(datasets.Value("float32"), length=4),
85
+ "occlusion": datasets.Value("float32"),
86
+ "distance": datasets.Value("float32"),
87
+ "v_x": datasets.Value("float32"),
88
+ "v_y": datasets.Value("float32"),
89
+ "truncated": datasets.Value("bool"),
90
+ "total_pixels_object": datasets.Value("float32"),
91
+ "total_visible_pixels_object": datasets.Value("float32"),
92
+ "contrast_rgb_full": datasets.Value("float32"),
93
+ "contrast_edge": datasets.Value("float32"),
94
+ "contrast_rgb": datasets.Value("float32"),
95
+ "luminance": datasets.Value("float32"),
96
+ "perceived_lightness": datasets.Value("float32"),
97
+ "3dbbox": datasets.Sequence(datasets.Value("float32"), length=6) # 3center, 3 size
98
+ }
99
+ ),
100
+ "semantic_group_segmentation": datasets.Image(),
101
+ "semantic_instance_segmentation": datasets.Image()
102
+ }
103
+ ),
104
+ supervised_keys=None,
105
+ homepage=_HOMEPAGE,
106
+ license=_LICENSE,
107
+ citation=_CITATION,
108
+ )
109
+
110
+ def _split_generators(self, dl_manager):
111
+ data_dir = dl_manager.download_and_extract(_URLS)
112
+ return [
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TRAIN,
115
+ gen_kwargs={
116
+ "split": "train",
117
+ "data_dirs": data_dir["train"],
118
+ },
119
+ ),
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.TEST,
122
+ gen_kwargs={
123
+ "split": "test",
124
+ "data_dirs": data_dir["test"],
125
+ },
126
+ ),
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.VALIDATION,
129
+ gen_kwargs={
130
+ "split": "validation",
131
+ "data_dirs": data_dir["validation"],
132
+ },
133
+ ),
134
+ ]
135
+
136
+ def _generate_examples(self, split, data_dirs):
137
+ sequence_dirs = []
138
+ for data_dir, sequence in zip(data_dirs, _SEQUENCES[split]):
139
+ sequence = sequence.replace(".zip","")
140
+ if "_part1" in sequence:
141
+ sequence = sequence.replace("_part1","")
142
+ if "_part2" in sequence:
143
+ sequence_0062_part2_dir = os.path.join(data_dir, sequence.replace("_part2","_b"))
144
+ continue
145
+ sequence_dirs.append(os.path.join(data_dir, sequence))
146
+
147
+ idx = 0
148
+ for sequence_dir in sequence_dirs:
149
+ for filename in glob.glob(os.path.join(os.path.join(sequence_dir, "sensor/camera/left/png"), "*.png")):
150
+ # image_file_path
151
+ image_file_path = filename
152
+
153
+ # image_distorted_file_path
154
+ if "_0062" in sequence_dir:
155
+ image_distorted_file_path = os.path.join(sequence_0062_part2_dir, "sensor/camera/left/png_distorted/", os.path.basename(filename))
156
+ else:
157
+ image_distorted_file_path = filename.replace("/png/", "/png_distorted/")
158
+
159
+ #persons_png
160
+ persons_png_path = filename.replace("sensor/camera/left/png/", "ground-truth/2d-bounding-box_json/")
161
+
162
+ #persons_distorted_png
163
+ persons_distorted_png_path = filename.replace("sensor/camera/left/png/", "ground-truth/2d-bounding-box_json_png_distorted/")
164
+
165
+ #semantic_group_segmentation_file_path
166
+ semantic_group_segmentation_file_path = filename.replace("sensor/camera/left/png/", "ground-truth/semantic-group-segmentation_png/")
167
+
168
+ # semantic_instance_segmentation_file_path
169
+ semantic_instance_segmentation_file_path = filename.replace("sensor/camera/left/png/", "ground-truth/semantic-instance-segmentation_png/")
170
+
171
+ # check if all gt files are available
172
+ if not (os.path.isfile(image_file_path) and os.path.isfile(image_distorted_file_path) and os.path.isfile(persons_png_path.replace(".png",".json")) and os.path.isfile(persons_distorted_png_path.replace(".png",".json")) and os.path.isfile(semantic_group_segmentation_file_path) and os.path.isfile(semantic_instance_segmentation_file_path)):
173
+ continue
174
+
175
+ with open(persons_png_path.replace(".png",".json"), 'r') as json_file:
176
+ bb_person_json = json.load(json_file)
177
+
178
+ with open(persons_distorted_png_path.replace(".png",".json"), 'r') as json_file:
179
+ bb_person_distorted_json = json.load(json_file)
180
+
181
+ threed_bb_person_path = filename.replace("sensor/camera/left/png/", "ground-truth/3d-bounding-box_json/")
182
+ with open(os.path.join(threed_bb_person_path.replace(".png",".json")), 'r') as json_file:
183
+ threed_bb_person_distorted_json = json.load(json_file)
184
+
185
+ persons_png = []
186
+ persons_png_distorted = []
187
+ for key in bb_person_json:
188
+ persons_png.append(
189
+ {
190
+ "bbox": [bb_person_json[key]["bb"]["c_x"], bb_person_json[key]["bb"]["c_y"], bb_person_json[key]["bb"]["w"], bb_person_json[key]["bb"]["h"]],
191
+ "bbox_vis": [bb_person_json[key]["bb_vis"]["c_x"], bb_person_json[key]["bb_vis"]["c_y"], bb_person_json[key]["bb_vis"]["w"], bb_person_json[key]["bb_vis"]["h"]],
192
+ "occlusion": bb_person_json[key]["occlusion"],
193
+ "distance": bb_person_json[key]["distance"],
194
+ "v_x": bb_person_json[key]["v_x"],
195
+ "v_y": bb_person_json[key]["v_y"],
196
+ "truncated": bb_person_json[key]["truncated"],
197
+ "total_pixels_object": bb_person_json[key]["total_pixels_object"],
198
+ "total_visible_pixels_object": bb_person_json[key]["total_visible_pixels_object"],
199
+ "contrast_rgb_full": bb_person_json[key]["contrast_rgb_full"],
200
+ "contrast_edge": bb_person_json[key]["contrast_edge"],
201
+ "contrast_rgb": bb_person_json[key]["contrast_rgb"],
202
+ "luminance": bb_person_json[key]["luminance"],
203
+ "perceived_lightness": bb_person_json[key]["perceived_lightness"],
204
+ "3dbbox": [threed_bb_person_distorted_json[key]["center"][0], threed_bb_person_distorted_json[key]["center"][1], threed_bb_person_distorted_json[key]["center"][2], threed_bb_person_distorted_json[key]["size"][0],
205
+ threed_bb_person_distorted_json[key]["size"][1], threed_bb_person_distorted_json[key]["size"][2]] # 3center, 3 size
206
+ }
207
+ )
208
+
209
+ persons_png_distorted.append(
210
+ {
211
+ "bbox": [bb_person_distorted_json[key]["bb"]["c_x"], bb_person_distorted_json[key]["bb"]["c_y"], bb_person_distorted_json[key]["bb"]["w"], bb_person_distorted_json[key]["bb"]["h"]],
212
+ "bbox_vis": [bb_person_distorted_json[key]["bb_vis"]["c_x"], bb_person_distorted_json[key]["bb_vis"]["c_y"], bb_person_distorted_json[key]["bb_vis"]["w"], bb_person_distorted_json[key]["bb_vis"]["h"]],
213
+ "occlusion": bb_person_distorted_json[key]["occlusion"],
214
+ "distance": bb_person_distorted_json[key]["distance"],
215
+ "v_x": bb_person_distorted_json[key]["v_x"],
216
+ "v_y": bb_person_distorted_json[key]["v_y"],
217
+ "truncated": bb_person_distorted_json[key]["truncated"],
218
+ "total_pixels_object": bb_person_distorted_json[key]["total_pixels_object"],
219
+ "total_visible_pixels_object": bb_person_distorted_json[key]["total_visible_pixels_object"],
220
+ "contrast_rgb_full": bb_person_distorted_json[key]["contrast_rgb_full"],
221
+ "contrast_edge": bb_person_distorted_json[key]["contrast_edge"],
222
+ "contrast_rgb": bb_person_distorted_json[key]["contrast_rgb"],
223
+ "luminance": bb_person_distorted_json[key]["luminance"],
224
+ "perceived_lightness": bb_person_distorted_json[key]["perceived_lightness"],
225
+ "3dbbox": [threed_bb_person_distorted_json[key]["center"][0], threed_bb_person_distorted_json[key]["center"][1], threed_bb_person_distorted_json[key]["center"][2], threed_bb_person_distorted_json[key]["size"][0],
226
+ threed_bb_person_distorted_json[key]["size"][1], threed_bb_person_distorted_json[key]["size"][2]] # 3center, 3 size
227
+ }
228
+ )
229
+
230
+ yield idx, {"image": image_file_path, "image_distorted": image_distorted_file_path, "persons_png": persons_png, "persons_png_distorted":persons_png_distorted, "semantic_group_segmentation": semantic_group_segmentation_file_path, "semantic_instance_segmentation": semantic_instance_segmentation_file_path}
231
+ idx += 1