|
import datasets |
|
import numpy as np |
|
import pandas as pd |
|
import PIL.Image |
|
import PIL.ImageOps |
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {face_masks}, |
|
author = {TrainingDataPro}, |
|
year = {2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Dataset includes 250 000 images, 4 types of mask worn on 28 000 unique faces. |
|
All images were collected using the Toloka.ai crowdsourcing service and |
|
validated by TrainingData.pro |
|
""" |
|
_NAME = 'face_masks' |
|
|
|
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}" |
|
|
|
_LICENSE = "cc-by-nc-nd-4.0" |
|
|
|
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/" |
|
|
|
|
|
def exif_transpose(img): |
|
if not img: |
|
return img |
|
|
|
exif_orientation_tag = 274 |
|
|
|
|
|
if hasattr(img, "_getexif") and isinstance( |
|
img._getexif(), dict) and exif_orientation_tag in img._getexif(): |
|
exif_data = img._getexif() |
|
orientation = exif_data[exif_orientation_tag] |
|
|
|
|
|
if orientation == 1: |
|
|
|
pass |
|
elif orientation == 2: |
|
|
|
img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT) |
|
elif orientation == 3: |
|
|
|
img = img.rotate(180) |
|
elif orientation == 4: |
|
|
|
img = img.rotate(180).transpose(PIL.Image.FLIP_LEFT_RIGHT) |
|
elif orientation == 5: |
|
|
|
img = img.rotate(-90, |
|
expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT) |
|
elif orientation == 6: |
|
|
|
img = img.rotate(-90, expand=True) |
|
elif orientation == 7: |
|
|
|
img = img.rotate(90, |
|
expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT) |
|
elif orientation == 8: |
|
|
|
img = img.rotate(90, expand=True) |
|
|
|
return img |
|
|
|
|
|
def load_image_file(file, mode='RGB'): |
|
|
|
img = PIL.Image.open(file) |
|
|
|
if hasattr(PIL.ImageOps, 'exif_transpose'): |
|
|
|
img = PIL.ImageOps.exif_transpose(img) |
|
else: |
|
|
|
img = exif_transpose(img) |
|
|
|
img = img.convert(mode) |
|
|
|
return np.array(img) |
|
|
|
|
|
class FaceMasks(datasets.GeneratorBasedBuilder): |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo(description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
'photo_1': datasets.Image(), |
|
'photo_2': datasets.Image(), |
|
'photo_3': datasets.Image(), |
|
'photo_4': datasets.Image(), |
|
'worker_id': datasets.Value('string'), |
|
'age': datasets.Value('int8'), |
|
'country': datasets.Value('string'), |
|
'sex': datasets.Value('string') |
|
}), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE) |
|
|
|
def _split_generators(self, dl_manager): |
|
images = dl_manager.download_and_extract(f"{_DATA}images.zip") |
|
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv") |
|
images = dl_manager.iter_files(images) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"images": images, |
|
'annotations': annotations |
|
}), |
|
] |
|
|
|
def _generate_examples(self, images, annotations): |
|
annotations_df = pd.read_csv(annotations, sep=',') |
|
images_data = pd.DataFrame(columns=['Link', 'Path']) |
|
for idx, image_path in enumerate(images): |
|
images_data.loc[idx] = { |
|
'Link': '/'.join(image_path.split('/')[-2:]), |
|
'Path': image_path |
|
} |
|
|
|
annotations_df = pd.merge(annotations_df, |
|
images_data, |
|
how='left', |
|
on=['Link']) |
|
for idx, worker_id in enumerate(pd.unique(annotations_df['WorkerId'])): |
|
annotation: pd.DataFrame = annotations_df.loc[ |
|
annotations_df['WorkerId'] == worker_id] |
|
annotation = annotation.sort_values(['Link']) |
|
data = { |
|
f'photo_{row[5]}': load_image_file(row[7]) |
|
for row in annotation.itertuples() |
|
} |
|
|
|
age = annotation.loc[annotation['Type'] == 1]['Age'].values[0] |
|
country = annotation.loc[annotation['Type'] == |
|
1]['Country'].values[0] |
|
sex = annotation.loc[annotation['Type'] == 1]['Sex'].values[0] |
|
|
|
data['worker_id'] = worker_id |
|
data['age'] = age |
|
data['country'] = country |
|
data['sex'] = sex |
|
|
|
yield idx, data |
|
|