monks / monks.py
mstz's picture
Upload monks.py
53d8025
raw
history blame
5.66 kB
"""Monk Dataset"""
from typing import List
from functools import partial
import datasets
import pandas
VERSION = datasets.Version("1.0.0")
_ORIGINAL_FEATURE_NAMES = [
"empty",
"is_monk",
"head_shape",
"body_shape",
"is_smiling",
"holding",
"jacket_color",
"has_tie",
"ID"
]
_BASE_FEATURE_NAMES = [
"head_shape",
"body_shape",
"is_smiling",
"holding",
"jacket_color",
"has_tie",
"is_monk"
]
_ENCODING_DICS = {
"head_shape": {
1: "round",
2: "square",
3: "octagon",
},
"body_shape": {
1: "round",
2: "square",
3: "octagon",
},
"holding": {
1: "sword",
2: "baloon",
3: "flag",
},
"jacket_color": {
1: "red",
2: "yellow",
3: "green",
4: "blue"
},
"is_smiling": {
1: True,
0: False
},
"has_tie": {
1: True,
0: False
}
}
DESCRIPTION = "Monk quality dataset."
_HOMEPAGE = "https://archive-beta.ics.uci.edu/dataset/70/monk+s+problems"
_URLS = ("https://archive-beta.ics.uci.edu/dataset/70/monk+s+problems")
_CITATION = """
@misc{misc_monk's_problems_70,
author = {Wnek,J.},
title = {{MONK's Problems}},
year = {1992},
howpublished = {UCI Machine Learning Repository},
note = {{DOI}: \\url{10.24432/C5R30R}}
}"""
# Dataset info
urls_per_split = {
"monks1": {
"train": "https://huggingface.co/datasets/mstz/monks/raw/main/monks-1.train",
"test": "https://huggingface.co/datasets/mstz/monks/raw/main/monks-1.test"
},
"monks2": {
"train": "https://huggingface.co/datasets/mstz/monks/raw/main/monks-2.train",
"test": "https://huggingface.co/datasets/mstz/monks/raw/main/monks-2.test"
},
"monks3": {
"train": "https://huggingface.co/datasets/mstz/monks/raw/main/monks-3.train",
"test": "https://huggingface.co/datasets/mstz/monks/raw/main/monks-3.test"
}
}
features_types_per_config = {
"monks1": {
"head_shape": datasets.Value("string"),
"body_shape": datasets.Value("string"),
"is_smiling": datasets.Value("bool"),
"holding": datasets.Value("string"),
"jacket_color": datasets.Value("string"),
"has_tie": datasets.Value("bool"),
"is_monk": datasets.ClassLabel(num_classes=2)
},
"monks2": {
"head_shape": datasets.Value("string"),
"body_shape": datasets.Value("string"),
"is_smiling": datasets.Value("bool"),
"holding": datasets.Value("string"),
"jacket_color": datasets.Value("string"),
"has_tie": datasets.Value("bool"),
"is_monk": datasets.ClassLabel(num_classes=2)
},
"monks3": {
"head_shape": datasets.Value("string"),
"body_shape": datasets.Value("string"),
"is_smiling": datasets.Value("bool"),
"holding": datasets.Value("string"),
"jacket_color": datasets.Value("string"),
"has_tie": datasets.Value("bool"),
"is_monk": datasets.ClassLabel(num_classes=2)
}
}
features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}
class MonkConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(MonkConfig, self).__init__(version=VERSION, **kwargs)
self.features = features_per_config[kwargs["name"]]
class Monk(datasets.GeneratorBasedBuilder):
# dataset versions
DEFAULT_CONFIG = "monks1"
BUILDER_CONFIGS = [
MonkConfig(name="monks1",
description="Monk 1 problem."),
MonkConfig(name="monks2",
description="Monk 2 problem."),
MonkConfig(name="monks3",
description="Monk 3 problem.")
]
def _info(self):
info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
features=features_per_config[self.config.name])
return info
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
downloads = dl_manager.download_and_extract(urls_per_split)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads[self.config.name]["train"]}),
]
def _generate_examples(self, filepath: str):
data = pandas.read_csv(filepath, header=None, sep=" ")
data = self.preprocess(data, config=self.config.name)
for row_id, row in data.iterrows():
data_row = dict(row)
yield row_id, data_row
def preprocess(self, data: pandas.DataFrame, config: str = "monks1") -> pandas.DataFrame:
print(data.head())
data.columns = _ORIGINAL_FEATURE_NAMES
data.drop("ID", axis="columns", inplace=True)
data.drop("empty", axis="columns", inplace=True)
data.loc[:, "has_tie"] = data.has_tie.apply(bool)
data.loc[:, "is_smiling"] = data.is_smiling.apply(bool)
data = data[_BASE_FEATURE_NAMES]
print(data.head())
for feature in _ENCODING_DICS:
print(feature)
print(data[feature].unique())
encoding_function = partial(self.encode, feature)
data.loc[:, feature] = data[feature].apply(encoding_function)
return data[list(features_types_per_config[config].keys())]
def encode(self, feature, value):
if feature in _ENCODING_DICS:
return _ENCODING_DICS[feature][value]
raise ValueError(f"Unknown feature: {feature}")