wura / wura.py
theyorubayesian's picture
init commit
dc0fb27
raw
history blame contribute delete
No virus
3.8 kB
import json
from dataclasses import dataclass
from string import Template
import datasets
from datasets.download.download_manager import DownloadManager
_CITATION = ""
_DESCRIPTION = \
"""
Wura is large-scale pretraining data for 20 languages popularly spoken in Africa.
"""
_HOMEPAGE = "https://github.com/castorini/AfriTeVa-keji"
_LICENSE = "Apache License 2.0"
_DOCUMENT_DATASET_VERSION = "1.0.0"
_PASSAGE_DATASET_VERSION = "1.0.0"
_LANGUAGES = {
"Afrikaans": "afr",
"Amharic": "amh",
"Egyptian Arabic": "arz",
"English": "eng",
"French": "fra",
"Hausa": "hau",
"Igbo": "ibo",
"Gahuza": "kin",
"Malagasy": "mlg",
"Chichewa": "nya",
"Afaan Oromoo": "orm",
# "Nigerian Pidgin": "pcm",
"Portuguese": "por",
"Shona": "sna",
"Somali": "som",
"Sesotho": "sot",
"Swahili": "swa",
"Tigrinya": "tir",
"Xhosa": "xho",
"Yoruba": "yor",
"Zulu": "zul"
}
_DOCUMENT_DATASET_URL = Template("./documents-v1.0/${split}/${language}.jsonl")
_PASSAGE_DATASET_URL = Template("./passages-v1.0/${split}/${language}.txt")
INVALID_WINDOWS_CHARACTERS_IN_PATH = r"<>:/\|?*"
@dataclass
class WuraConfig(datasets.BuilderConfig):
level: str = "document"
class WuraDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
WuraConfig(
name=language,
version=datasets.Version(_DOCUMENT_DATASET_VERSION),
description=f"Wura dataset for language: {language}\n{_DESCRIPTION}",
) for language in _LANGUAGES.values()
]
DEFAULT_CONFIG_NAME = "afr"
def _info(self):
if self.config.level == "document":
features = ["id", "headline", "content", "category", "url"]
elif self.config.level == "passage":
features = ["id", "text"]
else:
raise ValueError("level can only be one of `document` or `passage`")
features = {feature: datasets.Value("string") for feature in features}
return datasets.DatasetInfo(
description=self.config.description,
features=datasets.Features(features),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE
)
def _split_generators(self, dl_manager: DownloadManager):
if self.config.level == "document":
data_files = {
split: _DOCUMENT_DATASET_URL.substitute(
split=split,
language=self.config.name,
) for split in ["train", "eval"]
}
elif self.config.level == "passage":
data_files = {
split: _PASSAGE_DATASET_URL.substitute(
split=split,
language=self.config.name,
) for split in ["train", "eval"]
}
else:
raise ValueError("level can only be one of `document` or `passage`")
language_files = dl_manager.download_and_extract(data_files)
splits = [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": language_files["train"]}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": language_files["eval"]}
)
]
return splits
def _generate_examples(self, filepath: str):
with open(filepath, encoding="utf-8") as f:
for idx, line in enumerate(f):
if self.config.level == "document":
data = json.loads(line)
data["id"] = idx
else:
data = {"id": idx, "text": line.strip()}
yield idx, data