Datasets:
update kobest v1 data loader
Browse files- dataset_infos.json +1 -1
- kobest_v1.py +102 -58
dataset_infos.json
CHANGED
@@ -166,7 +166,7 @@
|
|
166 |
"num_classes": 4,
|
167 |
"names": [
|
168 |
"ending_1",
|
169 |
-
"
|
170 |
"ending_3",
|
171 |
"ending_4"
|
172 |
],
|
|
|
166 |
"num_classes": 4,
|
167 |
"names": [
|
168 |
"ending_1",
|
169 |
+
"ending_2",
|
170 |
"ending_3",
|
171 |
"ending_4"
|
172 |
],
|
kobest_v1.py
CHANGED
@@ -16,7 +16,8 @@ _DESCRIPTION = """\
|
|
16 |
The dataset contains data for KoBEST dataset
|
17 |
"""
|
18 |
|
19 |
-
_URL = "https://github.com/SKT-LSL/KoBEST_datarepo"
|
|
|
20 |
|
21 |
_DATA_URLS = {
|
22 |
"boolq": {
|
@@ -46,6 +47,8 @@ _DATA_URLS = {
|
|
46 |
},
|
47 |
}
|
48 |
|
|
|
|
|
49 |
|
50 |
class KoBESTConfig(datasets.BuilderConfig):
|
51 |
"""Config for building KoBEST"""
|
@@ -66,6 +69,26 @@ class KoBESTConfig(datasets.BuilderConfig):
|
|
66 |
self.url = url
|
67 |
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
class KoBEST(datasets.GeneratorBasedBuilder):
|
70 |
BUILDER_CONFIGS = [
|
71 |
KoBESTConfig(name=name, description=_DESCRIPTION, data_url=_DATA_URLS[name], citation=_CITATAION, url=_URL)
|
@@ -76,7 +99,7 @@ class KoBEST(datasets.GeneratorBasedBuilder):
|
|
76 |
def _info(self):
|
77 |
features = {}
|
78 |
if self.config.name == "boolq":
|
79 |
-
labels = ["
|
80 |
features["paragraph"] = datasets.Value("string")
|
81 |
features["question"] = datasets.Value("string")
|
82 |
features["label"] = datasets.features.ClassLabel(names=labels)
|
@@ -90,7 +113,7 @@ class KoBEST(datasets.GeneratorBasedBuilder):
|
|
90 |
features["label"] = datasets.features.ClassLabel(names=labels)
|
91 |
|
92 |
if self.config.name == "wic":
|
93 |
-
labels = ["
|
94 |
features["word"] = datasets.Value("string")
|
95 |
features["context_1"] = datasets.Value("string")
|
96 |
features["context_2"] = datasets.Value("string")
|
@@ -127,76 +150,97 @@ class KoBEST(datasets.GeneratorBasedBuilder):
|
|
127 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test, "split": "test"}),
|
128 |
]
|
129 |
|
130 |
-
# if self.config.name == "boolq":
|
131 |
-
# train = dl_manager.download_and_extract(self.config.data_url["train"])
|
132 |
-
# dev = dl_manager.download_and_extract(self.config.data_url["dev"])
|
133 |
-
# test = dl_manager.download_and_extract(self.config.data_url["test"])
|
134 |
-
#
|
135 |
-
# return [
|
136 |
-
# datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train, "split": "train"}),
|
137 |
-
# datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": dev, "split": "dev"}),
|
138 |
-
# datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test, "split": "test"}),
|
139 |
-
# ]
|
140 |
-
#
|
141 |
-
|
142 |
def _generate_examples(self, filepath, split):
|
143 |
if self.config.name == "boolq":
|
144 |
df = pd.read_csv(filepath, sep="\t")
|
145 |
df = df.dropna()
|
|
|
146 |
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
|
154 |
-
|
155 |
df = pd.read_csv(filepath, sep="\t")
|
156 |
df = df.dropna()
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
|
|
168 |
df = pd.read_csv(filepath, sep="\t")
|
169 |
df = df.dropna()
|
|
|
170 |
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
|
179 |
-
|
180 |
df = pd.read_csv(filepath, sep="\t")
|
181 |
df = df.dropna()
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
df = pd.read_csv(filepath, sep="\t")
|
195 |
df = df.dropna()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
|
|
|
16 |
The dataset contains data for KoBEST dataset
|
17 |
"""
|
18 |
|
19 |
+
_URL = "https://github.com/SKT-LSL/KoBEST_datarepo/raw/main"
|
20 |
+
|
21 |
|
22 |
_DATA_URLS = {
|
23 |
"boolq": {
|
|
|
47 |
},
|
48 |
}
|
49 |
|
50 |
+
_LICENSE = "CC-BY-SA-4.0"
|
51 |
+
|
52 |
|
53 |
class KoBESTConfig(datasets.BuilderConfig):
|
54 |
"""Config for building KoBEST"""
|
|
|
69 |
self.url = url
|
70 |
|
71 |
|
72 |
+
# class KoBESTConfig(datasets.BuilderConfig):
|
73 |
+
# """BuilderConfig for KoTEST."""
|
74 |
+
#
|
75 |
+
# def __init__(
|
76 |
+
# self,
|
77 |
+
# features,
|
78 |
+
# data_url,
|
79 |
+
# file_map,
|
80 |
+
# url,
|
81 |
+
# **kwargs,
|
82 |
+
# ):
|
83 |
+
# """BuilderConfig for KoTEST."""
|
84 |
+
#
|
85 |
+
# super(KoBESTConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
|
86 |
+
# self.features = features
|
87 |
+
# self.data_url = data_url
|
88 |
+
# self.file_map = file_map
|
89 |
+
# self.url = url
|
90 |
+
|
91 |
+
|
92 |
class KoBEST(datasets.GeneratorBasedBuilder):
|
93 |
BUILDER_CONFIGS = [
|
94 |
KoBESTConfig(name=name, description=_DESCRIPTION, data_url=_DATA_URLS[name], citation=_CITATAION, url=_URL)
|
|
|
99 |
def _info(self):
|
100 |
features = {}
|
101 |
if self.config.name == "boolq":
|
102 |
+
labels = ["False", "True"]
|
103 |
features["paragraph"] = datasets.Value("string")
|
104 |
features["question"] = datasets.Value("string")
|
105 |
features["label"] = datasets.features.ClassLabel(names=labels)
|
|
|
113 |
features["label"] = datasets.features.ClassLabel(names=labels)
|
114 |
|
115 |
if self.config.name == "wic":
|
116 |
+
labels = ["False", "True"]
|
117 |
features["word"] = datasets.Value("string")
|
118 |
features["context_1"] = datasets.Value("string")
|
119 |
features["context_2"] = datasets.Value("string")
|
|
|
150 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test, "split": "test"}),
|
151 |
]
|
152 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
def _generate_examples(self, filepath, split):
|
154 |
if self.config.name == "boolq":
|
155 |
df = pd.read_csv(filepath, sep="\t")
|
156 |
df = df.dropna()
|
157 |
+
df = df[['Text', 'Question', 'Answer']]
|
158 |
|
159 |
+
df = df.rename(columns={
|
160 |
+
'Text': 'paragraph',
|
161 |
+
'Question': 'question',
|
162 |
+
'Answer': 'label',
|
163 |
+
})
|
164 |
+
df['label'] = [0 if str(s) == 'False' else 1 for s in df['label'].tolist()]
|
165 |
|
166 |
+
elif self.config.name == "copa":
|
167 |
df = pd.read_csv(filepath, sep="\t")
|
168 |
df = df.dropna()
|
169 |
+
df = df[['sentence', 'question', '1', '2', 'Answer']]
|
170 |
+
|
171 |
+
df = df.rename(columns={
|
172 |
+
'sentence': 'premise',
|
173 |
+
'question': 'question',
|
174 |
+
'1': 'alternative_1',
|
175 |
+
'2': 'alternative_2',
|
176 |
+
'Answer': 'label',
|
177 |
+
})
|
178 |
+
df['label'] = [i-1 for i in df['label'].tolist()]
|
179 |
+
|
180 |
+
elif self.config.name == "wic":
|
181 |
df = pd.read_csv(filepath, sep="\t")
|
182 |
df = df.dropna()
|
183 |
+
df = df[['Target', 'SENTENCE1', 'SENTENCE2', 'ANSWER']]
|
184 |
|
185 |
+
df = df.rename(columns={
|
186 |
+
'Target': 'word',
|
187 |
+
'SENTENCE1': 'context_1',
|
188 |
+
'SENTENCE2': 'context_2',
|
189 |
+
'ANSWER': 'label',
|
190 |
+
})
|
191 |
+
df['label'] = [0 if str(s) == 'False' else 1 for s in df['label'].tolist()]
|
192 |
|
193 |
+
elif self.config.name == "hellaswag":
|
194 |
df = pd.read_csv(filepath, sep="\t")
|
195 |
df = df.dropna()
|
196 |
+
df = df[['context', 'choice1', 'choice2', 'choice3', 'choice4', 'label']]
|
197 |
+
|
198 |
+
# for id_, row in df.iterrows():
|
199 |
+
# yield id_, {
|
200 |
+
# "context": str(row["context"]),
|
201 |
+
# "ending_1": str(row["choice1"]),
|
202 |
+
# "ending_2": str(int(row["choice2"])),
|
203 |
+
# "ending_3": str(int(row["choice3"])),
|
204 |
+
# "ending_4": str(int(row["choice4"])),
|
205 |
+
# "label": str(row["label"]),
|
206 |
+
# }
|
207 |
+
#
|
208 |
+
df = df.rename(columns={
|
209 |
+
'context': 'context',
|
210 |
+
'choice1': 'ending_1',
|
211 |
+
'choice2': 'ending_2',
|
212 |
+
'choice3': 'ending_3',
|
213 |
+
'choice4': 'ending_4',
|
214 |
+
'label': 'label',
|
215 |
+
})
|
216 |
+
|
217 |
+
elif self.config.name == "sentineg":
|
218 |
df = pd.read_csv(filepath, sep="\t")
|
219 |
df = df.dropna()
|
220 |
+
df = df[['Text', 'Label']]
|
221 |
+
|
222 |
+
# for id_, row in df.iterrows():
|
223 |
+
# yield id_, {
|
224 |
+
# "sentence": str(row["Text"]),
|
225 |
+
# "label": str(int(row["Label"])),
|
226 |
+
# }
|
227 |
+
|
228 |
+
df = df.rename(columns={
|
229 |
+
'Text': 'sentence',
|
230 |
+
'Label': 'label',
|
231 |
+
})
|
232 |
+
|
233 |
+
else:
|
234 |
+
raise NotImplementedError
|
235 |
+
|
236 |
+
for id_, row in df.iterrows():
|
237 |
+
features = {key: row[key] for key in row.keys()}
|
238 |
+
yield id_, features
|
239 |
+
|
240 |
|
241 |
+
if __name__ == "__main__":
|
242 |
+
for task in ['boolq', 'copa', 'wic', 'hellaswag', 'sentineg']:
|
243 |
+
dataset = datasets.load_dataset("kobest_v1.py", task, ignore_verifications=True)
|
244 |
+
print(dataset)
|
245 |
+
print(dataset['train']['label'])
|
246 |
|