Kosuke-Yamada
commited on
Commit
•
9d30a18
1
Parent(s):
49d75a9
modify file
Browse files- ner-wikipedia-dataset.py +67 -140
ner-wikipedia-dataset.py
CHANGED
@@ -1,175 +1,102 @@
|
|
1 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
# TODO: Address all TODOs and remove all explanatory comments
|
15 |
-
"""TODO: Add a description here."""
|
16 |
-
|
17 |
-
import csv
|
18 |
import json
|
19 |
-
import os
|
20 |
import random
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
}
|
33 |
"""
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
_DESCRIPTION = """\
|
38 |
-
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
|
39 |
-
"""
|
40 |
-
|
41 |
-
# TODO: Add a link to an official homepage for the dataset here
|
42 |
-
_HOMEPAGE = ""
|
43 |
-
|
44 |
-
# TODO: Add the licence for the dataset here if you can find it
|
45 |
-
_LICENSE = ""
|
46 |
-
|
47 |
-
# TODO: Add link to the official dataset URLs here
|
48 |
-
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
49 |
-
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
50 |
_URL = "https://raw.githubusercontent.com/stockmarkteam/ner-wikipedia-dataset/main/ner.json"
|
51 |
|
52 |
|
53 |
-
class
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
super(NerWikipediaDatasetConfig, self).__init__(**kwargs)
|
62 |
-
|
63 |
-
|
64 |
-
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
65 |
-
class NerWikipediaDataset(datasets.GeneratorBasedBuilder):
|
66 |
-
"""TODO: Short description of my dataset."""
|
67 |
-
|
68 |
-
VERSION = datasets.Version("1.1.0")
|
69 |
-
|
70 |
-
# This is an example of a dataset with multiple configurations.
|
71 |
-
# If you don't want/need to define several sub-sets in your dataset,
|
72 |
-
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
73 |
-
|
74 |
-
# If you need to make complex sub-parts in the datasets with configurable options
|
75 |
-
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
76 |
-
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
77 |
-
|
78 |
-
# You will be able to load one or the other configurations in the following list with
|
79 |
-
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
80 |
-
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
81 |
-
# BUILDER_CONFIGS = [
|
82 |
-
# datasets.BuilderConfig(
|
83 |
-
# name="all",
|
84 |
-
# version=VERSION,
|
85 |
-
# description="This part of my dataset covers a first domain",
|
86 |
-
# ),
|
87 |
-
# ]
|
88 |
-
|
89 |
-
# DEFAULT_CONFIG_NAME = "all" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
90 |
|
91 |
-
def _info(self):
|
92 |
-
|
93 |
-
return datasets.DatasetInfo(
|
94 |
-
# This is the description that will appear on the datasets page.
|
95 |
description=_DESCRIPTION,
|
96 |
-
|
97 |
-
features=datasets.Features(
|
98 |
{
|
99 |
-
"curid":
|
100 |
-
"text":
|
101 |
"entities": [
|
102 |
{
|
103 |
-
"name":
|
104 |
-
"span":
|
105 |
-
|
106 |
-
),
|
107 |
-
"type": datasets.Value(dtype="string"),
|
108 |
}
|
109 |
],
|
110 |
-
# These are the features of your dataset like images, labels ...
|
111 |
}
|
112 |
-
),
|
113 |
-
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
|
114 |
-
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
|
115 |
-
# supervised_keys=("sentence", "label"),
|
116 |
-
# Homepage of the dataset for documentation
|
117 |
homepage=_HOMEPAGE,
|
118 |
-
# License for the dataset if available
|
119 |
license=_LICENSE,
|
120 |
-
# Citation for the dataset
|
121 |
citation=_CITATION,
|
122 |
)
|
123 |
|
124 |
-
def _split_generators(
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
130 |
-
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
131 |
-
data_dir = dl_manager.download_and_extract(_URL)
|
132 |
-
|
133 |
-
# ダウンロードしたファイルを読み込み、全てのデータを取得
|
134 |
-
with open(data_dir, "r", encoding="utf-8") as f:
|
135 |
data = json.load(f)
|
136 |
|
137 |
-
# データをランダムにシャッフルする
|
138 |
random.seed(42)
|
139 |
random.shuffle(data)
|
140 |
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
test_data = data[validation_split:]
|
150 |
-
|
151 |
return [
|
152 |
-
|
153 |
-
name=
|
154 |
gen_kwargs={"data": train_data},
|
155 |
),
|
156 |
-
|
157 |
-
name=
|
158 |
gen_kwargs={"data": validation_data},
|
159 |
),
|
160 |
-
|
161 |
-
name=
|
162 |
gen_kwargs={"data": test_data},
|
163 |
),
|
164 |
]
|
165 |
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
"curid": data["curid"],
|
173 |
-
"text": data["text"],
|
174 |
-
"entities": data["entities"],
|
175 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import json
|
|
|
2 |
import random
|
3 |
+
from typing import Generator
|
4 |
+
|
5 |
+
from datasets import (
|
6 |
+
BuilderConfig,
|
7 |
+
DatasetInfo,
|
8 |
+
DownloadManager,
|
9 |
+
Features,
|
10 |
+
GeneratorBasedBuilder,
|
11 |
+
Sequence,
|
12 |
+
Split,
|
13 |
+
SplitGenerator,
|
14 |
+
Value,
|
15 |
+
Version,
|
16 |
+
)
|
17 |
+
|
18 |
+
_CITATION = """
|
19 |
+
@inproceedings{omi-2021-wikipedia,
|
20 |
+
title = "Wikipediaを用いた日本語の固有表現抽出のデータセットの構築",
|
21 |
+
author = "近江 崇宏",
|
22 |
+
booktitle = "言語処理学会第27回年次大会",
|
23 |
+
year = "2021",
|
24 |
+
url = "https://anlp.jp/proceedings/annual_meeting/2021/pdf_dir/P2-7.pdf",
|
25 |
}
|
26 |
"""
|
27 |
+
_DESCRIPTION = "This is a dataset of Wikipedia articles with named entity labels created by Stockmark Inc."
|
28 |
+
_HOMEPAGE = "https://github.com/stockmarkteam/ner-wikipedia-dataset"
|
29 |
+
_LICENSE = "CC-BY-SA 3.0"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
_URL = "https://raw.githubusercontent.com/stockmarkteam/ner-wikipedia-dataset/main/ner.json"
|
31 |
|
32 |
|
33 |
+
class NerWikipediaDataset(GeneratorBasedBuilder):
|
34 |
+
BUILDER_CONFIGS = [
|
35 |
+
BuilderConfig(
|
36 |
+
name="ner-wikipedia-dataset",
|
37 |
+
version=Version("2.0.0"),
|
38 |
+
description=_DESCRIPTION,
|
39 |
+
),
|
40 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
+
def _info(self) -> DatasetInfo:
|
43 |
+
return DatasetInfo(
|
|
|
|
|
44 |
description=_DESCRIPTION,
|
45 |
+
features=Features(
|
|
|
46 |
{
|
47 |
+
"curid": Value("string"),
|
48 |
+
"text": Value("string"),
|
49 |
"entities": [
|
50 |
{
|
51 |
+
"name": Value("string"),
|
52 |
+
"span": Sequence(Value("int64"), length=2),
|
53 |
+
"type": Value("string"),
|
|
|
|
|
54 |
}
|
55 |
],
|
|
|
56 |
}
|
57 |
+
),
|
|
|
|
|
|
|
|
|
58 |
homepage=_HOMEPAGE,
|
|
|
59 |
license=_LICENSE,
|
|
|
60 |
citation=_CITATION,
|
61 |
)
|
62 |
|
63 |
+
def _split_generators(
|
64 |
+
self, dl_manager: DownloadManager
|
65 |
+
) -> list[SplitGenerator]:
|
66 |
+
dataset_dir = str(dl_manager.download_and_extract(_URL))
|
67 |
+
with open(dataset_dir, "r", encoding="utf-8") as f:
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
data = json.load(f)
|
69 |
|
|
|
70 |
random.seed(42)
|
71 |
random.shuffle(data)
|
72 |
|
73 |
+
num_data = len(data)
|
74 |
+
num_train_data = int(num_data * 0.8)
|
75 |
+
num_validation_data = (num_data - num_train_data) // 2
|
76 |
+
train_data = data[:num_train_data]
|
77 |
+
validation_data = data[
|
78 |
+
num_train_data : num_train_data + num_validation_data
|
79 |
+
]
|
80 |
+
test_data = data[num_train_data + num_validation_data :]
|
|
|
|
|
81 |
return [
|
82 |
+
SplitGenerator(
|
83 |
+
name=Split.TRAIN,
|
84 |
gen_kwargs={"data": train_data},
|
85 |
),
|
86 |
+
SplitGenerator(
|
87 |
+
name=Split.VALIDATION,
|
88 |
gen_kwargs={"data": validation_data},
|
89 |
),
|
90 |
+
SplitGenerator(
|
91 |
+
name=Split.TEST,
|
92 |
gen_kwargs={"data": test_data},
|
93 |
),
|
94 |
]
|
95 |
|
96 |
+
def _generate_examples(self, data: list[dict[str, str]]) -> Generator:
|
97 |
+
for i, d in enumerate(data):
|
98 |
+
yield i, {
|
99 |
+
"curid": d["curid"],
|
100 |
+
"text": d["text"],
|
101 |
+
"entities": d["entities"],
|
|
|
|
|
|
|
102 |
}
|