Datasets:

Languages:
Japanese
License:
Kosuke-Yamada commited on
Commit
9d30a18
1 Parent(s): 49d75a9

modify file

Browse files
Files changed (1) hide show
  1. ner-wikipedia-dataset.py +67 -140
ner-wikipedia-dataset.py CHANGED
@@ -1,175 +1,102 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
-
17
- import csv
18
  import json
19
- import os
20
  import random
21
-
22
- import datasets
23
-
24
- # TODO: Add BibTeX citation
25
- # Find for instance the citation on arxiv or on the dataset repo/website
26
- _CITATION = """\
27
- @InProceedings{huggingface:dataset,
28
- title = {A great new dataset},
29
- author={huggingface, Inc.
30
- },
31
- year={2020}
 
 
 
 
 
 
 
 
 
 
 
32
  }
33
  """
34
-
35
- # TODO: Add description of the dataset here
36
- # You can copy an official description
37
- _DESCRIPTION = """\
38
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
39
- """
40
-
41
- # TODO: Add a link to an official homepage for the dataset here
42
- _HOMEPAGE = ""
43
-
44
- # TODO: Add the licence for the dataset here if you can find it
45
- _LICENSE = ""
46
-
47
- # TODO: Add link to the official dataset URLs here
48
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
  _URL = "https://raw.githubusercontent.com/stockmarkteam/ner-wikipedia-dataset/main/ner.json"
51
 
52
 
53
- class NerWikipediaDatasetConfig(datasets.BuilderConfig):
54
- """BuilderConfig for NerWikipediaDataset."""
55
-
56
- def __init__(self, **kwargs):
57
- """BuilderConfig for NerWikipediaDataset
58
- Args:
59
- **kwargs: keyword arguments forwarded to super.
60
- """
61
- super(NerWikipediaDatasetConfig, self).__init__(**kwargs)
62
-
63
-
64
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
65
- class NerWikipediaDataset(datasets.GeneratorBasedBuilder):
66
- """TODO: Short description of my dataset."""
67
-
68
- VERSION = datasets.Version("1.1.0")
69
-
70
- # This is an example of a dataset with multiple configurations.
71
- # If you don't want/need to define several sub-sets in your dataset,
72
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
73
-
74
- # If you need to make complex sub-parts in the datasets with configurable options
75
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
76
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
77
-
78
- # You will be able to load one or the other configurations in the following list with
79
- # data = datasets.load_dataset('my_dataset', 'first_domain')
80
- # data = datasets.load_dataset('my_dataset', 'second_domain')
81
- # BUILDER_CONFIGS = [
82
- # datasets.BuilderConfig(
83
- # name="all",
84
- # version=VERSION,
85
- # description="This part of my dataset covers a first domain",
86
- # ),
87
- # ]
88
-
89
- # DEFAULT_CONFIG_NAME = "all" # It's not mandatory to have a default configuration. Just use one if it make sense.
90
 
91
- def _info(self):
92
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
93
- return datasets.DatasetInfo(
94
- # This is the description that will appear on the datasets page.
95
  description=_DESCRIPTION,
96
- # This defines the different columns of the dataset and their types
97
- features=datasets.Features(
98
  {
99
- "curid": datasets.Value("string"),
100
- "text": datasets.Value("string"),
101
  "entities": [
102
  {
103
- "name": datasets.Value(dtype="string"),
104
- "span": datasets.Sequence(
105
- datasets.Value(dtype="int64"), length=2
106
- ),
107
- "type": datasets.Value(dtype="string"),
108
  }
109
  ],
110
- # These are the features of your dataset like images, labels ...
111
  }
112
- ), # Here we define them above because they are different between the two configurations
113
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
114
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
115
- # supervised_keys=("sentence", "label"),
116
- # Homepage of the dataset for documentation
117
  homepage=_HOMEPAGE,
118
- # License for the dataset if available
119
  license=_LICENSE,
120
- # Citation for the dataset
121
  citation=_CITATION,
122
  )
123
 
124
- def _split_generators(self, dl_manager):
125
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
126
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
127
-
128
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
129
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
130
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
131
- data_dir = dl_manager.download_and_extract(_URL)
132
-
133
- # ダウンロードしたファイルを読み込み、全てのデータを取得
134
- with open(data_dir, "r", encoding="utf-8") as f:
135
  data = json.load(f)
136
 
137
- # データをランダムにシャッフルする
138
  random.seed(42)
139
  random.shuffle(data)
140
 
141
- # 学習データ、開発データ、テストデータに分割する
142
- train_ratio = 0.8
143
- validation_ratio = 0.1
144
- num_examples = len(data)
145
- train_split = int(num_examples * train_ratio)
146
- validation_split = int(num_examples * (train_ratio + validation_ratio))
147
- train_data = data[:train_split]
148
- validation_data = data[train_split:validation_split]
149
- test_data = data[validation_split:]
150
-
151
  return [
152
- datasets.SplitGenerator(
153
- name=datasets.Split.TRAIN,
154
  gen_kwargs={"data": train_data},
155
  ),
156
- datasets.SplitGenerator(
157
- name=datasets.Split.VALIDATION,
158
  gen_kwargs={"data": validation_data},
159
  ),
160
- datasets.SplitGenerator(
161
- name=datasets.Split.TEST,
162
  gen_kwargs={"data": test_data},
163
  ),
164
  ]
165
 
166
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
167
- def _generate_examples(self, data):
168
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
169
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
170
- for key, data in enumerate(data):
171
- yield key, {
172
- "curid": data["curid"],
173
- "text": data["text"],
174
- "entities": data["entities"],
175
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import json
 
2
  import random
3
+ from typing import Generator
4
+
5
+ from datasets import (
6
+ BuilderConfig,
7
+ DatasetInfo,
8
+ DownloadManager,
9
+ Features,
10
+ GeneratorBasedBuilder,
11
+ Sequence,
12
+ Split,
13
+ SplitGenerator,
14
+ Value,
15
+ Version,
16
+ )
17
+
18
+ _CITATION = """
19
+ @inproceedings{omi-2021-wikipedia,
20
+ title = "Wikipediaを用いた日本語の固有表現抽出のデータセットの構築",
21
+ author = "近江 崇宏",
22
+ booktitle = "言語処理学会第27回年次大会",
23
+ year = "2021",
24
+ url = "https://anlp.jp/proceedings/annual_meeting/2021/pdf_dir/P2-7.pdf",
25
  }
26
  """
27
+ _DESCRIPTION = "This is a dataset of Wikipedia articles with named entity labels created by Stockmark Inc."
28
+ _HOMEPAGE = "https://github.com/stockmarkteam/ner-wikipedia-dataset"
29
+ _LICENSE = "CC-BY-SA 3.0"
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  _URL = "https://raw.githubusercontent.com/stockmarkteam/ner-wikipedia-dataset/main/ner.json"
31
 
32
 
33
+ class NerWikipediaDataset(GeneratorBasedBuilder):
34
+ BUILDER_CONFIGS = [
35
+ BuilderConfig(
36
+ name="ner-wikipedia-dataset",
37
+ version=Version("2.0.0"),
38
+ description=_DESCRIPTION,
39
+ ),
40
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
+ def _info(self) -> DatasetInfo:
43
+ return DatasetInfo(
 
 
44
  description=_DESCRIPTION,
45
+ features=Features(
 
46
  {
47
+ "curid": Value("string"),
48
+ "text": Value("string"),
49
  "entities": [
50
  {
51
+ "name": Value("string"),
52
+ "span": Sequence(Value("int64"), length=2),
53
+ "type": Value("string"),
 
 
54
  }
55
  ],
 
56
  }
57
+ ),
 
 
 
 
58
  homepage=_HOMEPAGE,
 
59
  license=_LICENSE,
 
60
  citation=_CITATION,
61
  )
62
 
63
+ def _split_generators(
64
+ self, dl_manager: DownloadManager
65
+ ) -> list[SplitGenerator]:
66
+ dataset_dir = str(dl_manager.download_and_extract(_URL))
67
+ with open(dataset_dir, "r", encoding="utf-8") as f:
 
 
 
 
 
 
68
  data = json.load(f)
69
 
 
70
  random.seed(42)
71
  random.shuffle(data)
72
 
73
+ num_data = len(data)
74
+ num_train_data = int(num_data * 0.8)
75
+ num_validation_data = (num_data - num_train_data) // 2
76
+ train_data = data[:num_train_data]
77
+ validation_data = data[
78
+ num_train_data : num_train_data + num_validation_data
79
+ ]
80
+ test_data = data[num_train_data + num_validation_data :]
 
 
81
  return [
82
+ SplitGenerator(
83
+ name=Split.TRAIN,
84
  gen_kwargs={"data": train_data},
85
  ),
86
+ SplitGenerator(
87
+ name=Split.VALIDATION,
88
  gen_kwargs={"data": validation_data},
89
  ),
90
+ SplitGenerator(
91
+ name=Split.TEST,
92
  gen_kwargs={"data": test_data},
93
  ),
94
  ]
95
 
96
+ def _generate_examples(self, data: list[dict[str, str]]) -> Generator:
97
+ for i, d in enumerate(data):
98
+ yield i, {
99
+ "curid": d["curid"],
100
+ "text": d["text"],
101
+ "entities": d["entities"],
 
 
 
102
  }