pp4av / pp4av.py
khaclinh's picture
update
e354384
# coding=utf-8
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PP4AV dataset."""
import os
from glob import glob
from tqdm import tqdm
from pathlib import Path
from typing import List
import re
from collections import defaultdict
import datasets
_HOMEPAGE = "https://github.com/khaclinh/pp4av"
_LICENSE = "Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)"
_CITATION = """\
@article{PP4AV2022,
title = {PP4AV: A benchmarking Dataset for Privacy-preserving Autonomous Driving},
author = {Linh Trinh, Phuong Pham, Hoang Trinh, Nguyen Bach, Dung Nguyen, Giang Nguyen, Huy Nguyen},
booktitle = {IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)},
year = {2023}
}
"""
_DESCRIPTION = """\
PP4AV is the first public dataset with faces and license plates annotated with driving scenarios.
P4AV provides 3,447 annotated driving images for both faces and license plates.
For normal camera data, dataset sampled images from the existing videos in which cameras were mounted in moving vehicles, running around the European cities.
The images in PP4AV were sampled from 6 European cities at various times of day, including nighttime.
This dataset use the fisheye images from the WoodScape dataset to select 244 images from the front, rear, left, and right cameras for fisheye camera data.
PP4AV dataset can be used as a benchmark suite (evaluating dataset) for data anonymization models in autonomous driving.
"""
_REPO = "https://huggingface.co/datasets/khaclinh/pp4av/resolve/main/data"
_URLS = {
"test": f"{_REPO}/images.zip",
"annot": f"{_REPO}/soiling_annotations.zip",
}
IMG_EXT = ['png', 'jpeg', 'jpg']
_SUBREDDITS = ["zurich", "strasbourg", "stuttgart", "switzerland", "netherlands_day", "netherlands_night", "paris"]
class PP4AVConfig(datasets.BuilderConfig):
"""BuilderConfig for PP4AV."""
def __init__(self, name, **kwargs):
"""BuilderConfig for PP4AV.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(PP4AVConfig, self).__init__(version=datasets.Version("1.0.0", ""), name=name, **kwargs)
class PP4AV(datasets.GeneratorBasedBuilder):
"""PP4AV dataset."""
BUILDER_CONFIGS = [
PP4AVConfig("fisheye"),
]
BUILDER_CONFIGS += [PP4AVConfig(subreddit) for subreddit in _SUBREDDITS]
DEFAULT_CONFIG_NAME = "fisheye"
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"faces": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=4)),
"plates": datasets.Sequence(datasets.Sequence(datasets.Value("float32"), length=4)),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"name": self.config.name,
"data_dir": data_dir["test"],
"annot_dir": data_dir["annot"],
},
),
]
def _generate_examples(self, name, data_dir, annot_dir):
image_dir = os.path.join(data_dir, name)
annotation_dir = os.path.join(annot_dir, name)
files = []
idx = 0
for i_file in glob(os.path.join(image_dir, "*.png")):
plates = []
faces = []
img_relative_file = os.path.relpath(i_file, image_dir)
gt_relative_path = img_relative_file.replace(".png", ".txt")
gt_path = os.path.join(annotation_dir, gt_relative_path)
annotation = defaultdict(list)
with open(gt_path, "r", encoding="utf-8") as f:
line = f.readline().strip()
while line:
assert re.match(r"^\d( [\d\.]+){4,5}$", line), "Incorrect line: %s" % line
cls, cx, cy, w, h = line.split()[:5]
cls, cx, cy, w, h = int(cls), float(cx), float(cy), float(w), float(h)
x1, y1, x2, y2 = cx - w / 2, cy - h / 2, cx + w / 2, cy + h / 2
annotation[cls].append([x1, y1, x2, y2])
line = f.readline().strip()
for cls, bboxes in annotation.items():
for x1, y1, x2, y2 in bboxes:
if cls == 0:
faces.append([x1, y1, x2, y2])
else:
plates.append([x1, y1, x2, y2])
yield idx, {"image": i_file, "faces": faces, "plates": plates}
idx += 1