Datasets:

Languages:
Japanese
License:
File size: 3,985 Bytes
fa53fa6
 
d07ecd5
 
9d30a18
 
6c07a79
9d30a18
 
 
 
 
 
 
 
d07ecd5
 
9d30a18
 
 
a4dbcca
d07ecd5
 
6c07a79
3a6b34c
 
 
6c07a79
3a6b34c
6c07a79
 
3a6b34c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6c07a79
d163ff5
 
6c07a79
 
9d30a18
6c07a79
d07ecd5
6c07a79
 
49d75a9
e826634
6c07a79
 
 
 
 
e826634
49d75a9
d07ecd5
9d30a18
d07ecd5
 
 
 
 
9d30a18
6c07a79
 
9d30a18
 
d07ecd5
 
3a6b34c
 
 
d07ecd5
9d30a18
3a6b34c
 
9d30a18
6c07a79
9d30a18
d07ecd5
6c07a79
 
707ae5a
d07ecd5
6c07a79
 
707ae5a
d07ecd5
6c07a79
 
707ae5a
d07ecd5
 
 
9d30a18
 
 
 
 
 
d07ecd5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
from __future__ import annotations

import json
import random
from typing import Generator

import datasets

_CITATION = """
@inproceedings{omi-2021-wikipedia,
    title = "Wikipediaを用いた日本語の固有表現抽出のデータセットの構築",
    author = "近江 崇宏",
    booktitle = "言語処理学会第27回年次大会",
    year = "2021",
    url = "https://anlp.jp/proceedings/annual_meeting/2021/pdf_dir/P2-7.pdf",
}
"""
_DESCRIPTION = "This is a dataset of Wikipedia articles with named entity labels created by Stockmark Inc."
_HOMEPAGE = "https://github.com/stockmarkteam/ner-wikipedia-dataset"
_LICENSE = "CC-BY-SA 3.0"
_URL = "https://raw.githubusercontent.com/stockmarkteam/ner-wikipedia-dataset/main/ner.json"


class NerWikipediaDatasetConfig(datasets.BuilderConfig):
    def __init__(
        self,
        name: str = "default",
        version: datasets.Version | str | None = datasets.Version("0.0.0"),
        data_dir: str | None = None,
        data_files: datasets.data_files.DataFilesDict | None = None,
        description: str | None = _DESCRIPTION,
        shuffle: bool = True,
        seed: int = 42,
        train_ratio: float = 0.8,
        validation_ratio: float = 0.1,
    ) -> None:
        super().__init__(
            name=name,
            version=version,
            data_dir=data_dir,
            data_files=data_files,
            description=description,
        )
        self.shuffle = shuffle
        self.seed = seed
        self.train_ratio = train_ratio
        self.validation_ratio = validation_ratio


class NerWikipediaDataset(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIG_CLASS = NerWikipediaDatasetConfig

    def _info(self) -> datasets.DatasetInfo:
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "curid": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    "entities": [
                        {
                            "name": datasets.Value("string"),
                            "span": datasets.Sequence(
                                datasets.Value("int64"), length=2
                            ),
                            "type": datasets.Value("string"),
                        }
                    ],
                }
            ),
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(
        self, dl_manager: datasets.DownloadManager
    ) -> list[datasets.SplitGenerator]:
        dataset_dir = str(dl_manager.download_and_extract(_URL))
        with open(dataset_dir, "r", encoding="utf-8") as f:
            data = json.load(f)

        if self.config.shuffle == True:
            random.seed(self.config.seed)
            random.shuffle(data)

        num_data = len(data)
        num_train_data = int(num_data * self.config.train_ratio)
        num_validation_data = int(num_data * self.config.validation_ratio)
        train_data = data[:num_train_data]
        validation_data = data[num_train_data : num_train_data + num_validation_data]
        test_data = data[num_train_data + num_validation_data :]
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"data": train_data},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"data": validation_data},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"data": test_data},
            ),
        ]

    def _generate_examples(self, data: list[dict[str, str]]) -> Generator:
        for i, d in enumerate(data):
            yield i, {
                "curid": d["curid"],
                "text": d["text"],
                "entities": d["entities"],
            }