Datasets:

Modalities:
Image
Text
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
yalta_ai_tabular_dataset / yalta_ai_tabular_dataset.py
davanstrien's picture
davanstrien HF staff
rename loader script
9d3ff07
raw
history blame
8.68 kB
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for reading 'Object Detection for Chess Pieces' dataset."""
import os
from glob import glob
import datasets
from PIL import Image
_CITATION = """\
@dataset{clerice_thibault_2022_6827706,
author = {Clérice, Thibault},
title = {YALTAi: Tabular Dataset},
month = jul,
year = 2022,
publisher = {Zenodo},
version = {1.0.0},
doi = {10.5281/zenodo.6827706},
url = {https://doi.org/10.5281/zenodo.6827706}
}
"""
_DESCRIPTION = """Yalt AI Tabular Dataset"""
_HOMEPAGE = "https://doi.org/10.5281/zenodo.6827706"
_LICENSE = "Creative Commons Attribution 4.0 International"
_URL = "https://zenodo.org/record/6827706/files/yaltai-table.zip?download=1"
_CATEGORIES = ["Header", "Col", "Marginal", "text"]
class YaltAiTabularDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for YaltAiTabularDataset."""
def __init__(self, name, **kwargs):
"""BuilderConfig for YaltAiTabularDataset."""
super(YaltAiTabularDatasetConfig, self).__init__(
version=datasets.Version("1.0.0"), name=name, description=None, **kwargs
)
class YaltAiTabularDataset(datasets.GeneratorBasedBuilder):
"""Object Detection for historic manuscripts"""
BUILDER_CONFIGS = [
YaltAiTabularDatasetConfig("YOLO"),
YaltAiTabularDatasetConfig("COCO"),
]
def _info(self):
if self.config.name == "COCO":
features = datasets.Features(
{
"image_id": datasets.Value("int64"),
"image": datasets.Image(),
"width": datasets.Value("int32"),
"height": datasets.Value("int32"),
}
)
object_dict = {
"category_id": datasets.ClassLabel(names=_CATEGORIES),
"image_id": datasets.Value("string"),
"id": datasets.Value("int64"),
"area": datasets.Value("int64"),
"bbox": datasets.Sequence(datasets.Value("float32"), length=4),
"segmentation": [[datasets.Value("float32")]],
"iscrowd": datasets.Value("bool"),
}
features["objects"] = [object_dict]
if self.config.name == "YOLO":
features = datasets.Features(
{
"image": datasets.Image(),
"objects": datasets.Sequence(
{
"label": datasets.ClassLabel(names=_CATEGORIES),
"bbox": datasets.Sequence(
datasets.Value("int32"), length=4
),
}
),
}
)
return datasets.DatasetInfo(
features=features,
supervised_keys=None,
description=_DESCRIPTION,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_dir": os.path.join(data_dir, "yaltai-table/", "train")
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data_dir": os.path.join(data_dir, "yaltai-table/", "val")},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_dir": os.path.join(data_dir, "yaltai-table/", "test")
},
),
]
def _generate_examples(self, data_dir):
def create_annotation_from_yolo_format(
min_x,
min_y,
width,
height,
image_id,
category_id,
annotation_id,
segmentation=False,
):
bbox = (float(min_x), float(min_y), float(width), float(height))
area = width * height
max_x = min_x + width
max_y = min_y + height
if segmentation:
seg = [[min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y]]
else:
seg = []
return {
"id": annotation_id,
"image_id": image_id,
"bbox": bbox,
"area": area,
"iscrowd": 0,
"category_id": category_id,
"segmentation": seg,
}
image_dir = os.path.join(data_dir, "images")
label_dir = os.path.join(data_dir, "labels")
image_paths = sorted(glob(f"{image_dir}/*.jpg"))
label_paths = sorted(glob(f"{label_dir}/*.txt"))
if self.config.name == "COCO":
for idx, (image_path, label_path) in enumerate(
zip(image_paths, label_paths)
):
image_id = idx
annotations = []
image = Image.open(image_path) # Possibly conver to RGB?
w, h = image.size
with open(label_path, "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip().split()
category_id = line[0]
x_center = float(line[1])
y_center = float(line[2])
width = float(line[3])
height = float(line[4])
float_x_center = w * x_center
float_y_center = h * y_center
float_width = w * width
float_height = h * height
min_x = int(float_x_center - float_width / 2)
min_y = int(float_y_center - float_height / 2)
width = int(float_width)
height = int(float_height)
annotation = create_annotation_from_yolo_format(
min_x,
min_y,
width,
height,
image_id,
category_id,
image_id,
)
annotations.append(annotation)
example = {
"image_id": image_id,
"image": image,
"width": w,
"height": h,
"objects": annotations,
}
yield idx, example
if self.config.name == "YOLO":
for idx, (image_path, label_path) in enumerate(
zip(image_paths, label_paths)
):
im = Image.open(image_path)
width, height = im.size
image_id = idx
annotations = []
with open(label_path, "r") as f:
lines = f.readlines()
objects = []
for line in lines:
line = line.strip().split()
bbox_class = int(line[0])
bbox_xcenter = int(float(line[1]) * width)
bbox_ycenter = int(float(line[2]) * height)
bbox_width = int(float(line[3]) * width)
bbox_height = int(float(line[4]) * height)
objects.append(
{
"label": bbox_class,
"bbox": [
bbox_xcenter,
bbox_ycenter,
bbox_width,
bbox_height,
],
}
)
yield idx, {
"image": image_path,
"objects": objects,
}