|
from __future__ import annotations |
|
|
|
import json |
|
import random |
|
from typing import Generator |
|
|
|
import datasets |
|
|
|
_CITATION = """ |
|
@inproceedings{omi-2021-wikipedia, |
|
title = "Wikipediaを用いた日本語の固有表現抽出のデータセットの構築", |
|
author = "近江 崇宏", |
|
booktitle = "言語処理学会第27回年次大会", |
|
year = "2021", |
|
url = "https://anlp.jp/proceedings/annual_meeting/2021/pdf_dir/P2-7.pdf", |
|
} |
|
""" |
|
_DESCRIPTION = "This is a dataset of Wikipedia articles with named entity labels created by Stockmark Inc." |
|
_HOMEPAGE = "https://github.com/stockmarkteam/ner-wikipedia-dataset" |
|
_LICENSE = "CC-BY-SA 3.0" |
|
_URL = "https://raw.githubusercontent.com/stockmarkteam/ner-wikipedia-dataset/main/ner.json" |
|
|
|
|
|
class NerWikipediaDatasetConfig(datasets.BuilderConfig): |
|
def __init__( |
|
self, |
|
name: str = "default", |
|
version: datasets.Version | str | None = datasets.Version("0.0.0"), |
|
data_dir: str | None = None, |
|
data_files: datasets.data_files.DataFilesDict | None = None, |
|
description: str | None = _DESCRIPTION, |
|
shuffle: bool = True, |
|
seed: int = 42, |
|
train_ratio: float = 0.8, |
|
validation_ratio: float = 0.1, |
|
) -> None: |
|
super().__init__( |
|
name=name, |
|
version=version, |
|
data_dir=data_dir, |
|
data_files=data_files, |
|
description=description, |
|
) |
|
self.shuffle = shuffle |
|
self.seed = seed |
|
self.train_ratio = train_ratio |
|
self.validation_ratio = validation_ratio |
|
|
|
|
|
class NerWikipediaDataset(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIG_CLASS = NerWikipediaDatasetConfig |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"curid": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"entities": [ |
|
{ |
|
"name": datasets.Value("string"), |
|
"span": datasets.Sequence( |
|
datasets.Value("int64"), length=2 |
|
), |
|
"type": datasets.Value("string"), |
|
} |
|
], |
|
} |
|
), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators( |
|
self, dl_manager: datasets.DownloadManager |
|
) -> list[datasets.SplitGenerator]: |
|
dataset_dir = str(dl_manager.download_and_extract(_URL)) |
|
with open(dataset_dir, "r", encoding="utf-8") as f: |
|
data = json.load(f) |
|
|
|
if self.config.shuffle == True: |
|
random.seed(self.config.seed) |
|
random.shuffle(data) |
|
|
|
num_data = len(data) |
|
num_train_data = int(num_data * self.config.train_ratio) |
|
num_validation_data = int(num_data * self.config.validation_ratio) |
|
train_data = data[:num_train_data] |
|
validation_data = data[num_train_data : num_train_data + num_validation_data] |
|
test_data = data[num_train_data + num_validation_data :] |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"data": train_data}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"data": validation_data}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"data": test_data}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, data: list[dict[str, str]]) -> Generator: |
|
for i, d in enumerate(data): |
|
yield i, { |
|
"curid": d["curid"], |
|
"text": d["text"], |
|
"entities": d["entities"], |
|
} |
|
|