holylovenia commited on
Commit
11dfc88
1 Parent(s): 189b294

Upload seaeval.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. seaeval.py +238 -0
seaeval.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+ import pandas as pd
21
+
22
+ from seacrowd.utils import schemas
23
+ from seacrowd.utils.configs import SEACrowdConfig
24
+ from seacrowd.utils.constants import Licenses, Tasks
25
+
26
+ _CITATION = """\
27
+ @article{SeaEval2023,
28
+ title={SeaEval for Multilingual Foundation Models: From Cross-Lingual Alignment to Cultural Reasoning},
29
+ author={Wang, Bin and Liu, Zhengyuan and Huang, Xin and Jiao, Fangkai and Ding, Yang and Aw, Ai Ti and Chen, Nancy F.},
30
+ journal={arXiv preprint arXiv:2309.04766},
31
+ year={2023},
32
+ url={https://github.com/SeaEval/SeaEval}
33
+ }
34
+ """
35
+
36
+ _DATASETNAME = "seaeval"
37
+
38
+ _DESCRIPTION = """\
39
+ SeaEval is a benchmark toolkit for evaluating multilingual LLMs. The benchmark contains 28 datasets,
40
+ covering 7 languages. It contains 2 datasets for cross-lingual consistency, each containing parallel
41
+ questions for the 7 represented languages. It alsoc ontains 4 datasets for cultural reasoning
42
+ (multiple choice Q&A) that are in English but focused on regions including Singapore and Philipines.
43
+
44
+ This dataloader provides examples for Indonesia, Vietnamese, Malay, and Filipino.
45
+ """
46
+
47
+ _HOMEPAGE = "https://github.com/SeaEval/SeaEval"
48
+
49
+ _LANGUAGES = {"ind": "Indonesian", "vie": "Vietnamese", "zlm": "Malay", "fil": "Filipino"}
50
+
51
+ _LICENSE = Licenses.CC_BY_NC_4_0.value
52
+
53
+ _LOCAL = False
54
+
55
+ _URLS = {
56
+ "cross_mmlu": "https://huggingface.co/datasets/SeaEval/SeaEval_datasets/raw/main/cross_mmlu.json",
57
+ "cross_logiqa": "https://huggingface.co/datasets/SeaEval/SeaEval_datasets/raw/main/cross_logiqa.json",
58
+ "sg_eval": "https://huggingface.co/datasets/SeaEval/SeaEval_datasets/raw/main/sg_eval.json",
59
+ "ph_eval": "https://huggingface.co/datasets/SeaEval/SeaEval_datasets/raw/main/ph_eval.json",
60
+ }
61
+
62
+ _SUPPORTED_TASKS = [Tasks.COMMONSENSE_REASONING, Tasks.QUESTION_ANSWERING]
63
+
64
+ _SOURCE_VERSION = "1.0.0"
65
+
66
+ _SEACROWD_VERSION = "2024.06.20"
67
+
68
+
69
+ class SeaEvalDataset(datasets.GeneratorBasedBuilder):
70
+ """
71
+ SeaEval is a benchmark for evaluating multilingual LLMs from https://github.com/SeaEval/SeaEval.
72
+ """
73
+
74
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
75
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
76
+
77
+ LANGUAGES_EXCHANGED = dict((v, k) for k, v in _LANGUAGES.items())
78
+ SUBSETS_CROSS_MMLU = ["cross_mmlu_" + lang for lang in _LANGUAGES.keys()]
79
+ SUBSETS_CROSS_LOGIQA = ["cross_logiqa_" + lang for lang in _LANGUAGES.keys()]
80
+ SUBSETS = SUBSETS_CROSS_MMLU + SUBSETS_CROSS_LOGIQA + ["sg_eval_eng", "ph_eval_eng"]
81
+
82
+ BUILDER_CONFIGS = [
83
+ SEACrowdConfig(
84
+ name=f"{_DATASETNAME}_{subset}_source",
85
+ version=datasets.Version(_SOURCE_VERSION),
86
+ description=f"{_DATASETNAME}_{subset} source schema",
87
+ schema="source",
88
+ subset_id=f"{_DATASETNAME}_{subset}",
89
+ )
90
+ for subset in SUBSETS
91
+ ]
92
+
93
+ BUILDER_CONFIGS += [
94
+ SEACrowdConfig(
95
+ name=f"{_DATASETNAME}_{subset}_seacrowd_qa",
96
+ version=datasets.Version(_SOURCE_VERSION),
97
+ description=f"{_DATASETNAME}_{subset} SEACrowd schema",
98
+ schema="seacrowd_qa",
99
+ subset_id=f"{_DATASETNAME}_{subset}",
100
+ )
101
+ for subset in SUBSETS
102
+ ]
103
+
104
+ def _info(self) -> datasets.DatasetInfo:
105
+ if self.config.schema == "source" and self.config.subset_id not in ["cross_logiqa", "ph_eval"]:
106
+ features = datasets.Features(
107
+ {
108
+ "id": datasets.Value("string"),
109
+ "question": datasets.Value("string"),
110
+ "choices": datasets.Sequence(datasets.Value("string")),
111
+ "answer": datasets.Value("string"),
112
+ }
113
+ )
114
+ elif self.config.schema == "source" and self.config.subset_id == "cross_logiqa":
115
+ features = datasets.Features(
116
+ {
117
+ "id": datasets.Value("string"),
118
+ "question": datasets.Value("string"),
119
+ "context": datasets.Value("string"),
120
+ "choices": datasets.Sequence(datasets.Value("string")),
121
+ "answer": datasets.Value("string"),
122
+ }
123
+ )
124
+ elif self.config.schema == "source" and self.config.subset_id == "ph_eval":
125
+ features = datasets.Features(
126
+ {
127
+ "id": datasets.Value("string"),
128
+ "question": datasets.Value("string"),
129
+ "choices": datasets.Sequence(datasets.Value("string")),
130
+ "answer": datasets.Value("string"),
131
+ "category": datasets.Value("string"),
132
+ }
133
+ )
134
+ elif self.config.schema == "seacrowd_qa":
135
+ features = schemas.qa_features
136
+ else:
137
+ raise ValueError(f"Unexpected schema received! {self.config.schema}")
138
+
139
+ return datasets.DatasetInfo(
140
+ description=_DESCRIPTION,
141
+ features=features,
142
+ homepage=_HOMEPAGE,
143
+ license=_LICENSE,
144
+ citation=_CITATION,
145
+ )
146
+
147
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
148
+ """
149
+ Returns SplitGenerators.
150
+ """
151
+
152
+ data = {key: dl_manager.download_and_extract(value) for key, value in _URLS.items()}
153
+
154
+ paths = {}
155
+ file = self.config.subset_id.split("_")
156
+ file = "_".join(file[1:3])
157
+ paths[self.config.subset_id] = data[file]
158
+
159
+ return [
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TEST,
162
+ gen_kwargs={
163
+ "paths": paths,
164
+ "split": "test",
165
+ },
166
+ ),
167
+ ]
168
+
169
+ def _generate_examples(self, paths: Path, split: str) -> Tuple[int, Dict]:
170
+ """
171
+ Yields examples as (key, example) tuples.
172
+ """
173
+
174
+ language = self.config.subset_id.split("_")[3]
175
+ examples = None
176
+
177
+ for key, path in paths.items():
178
+ if "cross" in key:
179
+ data = pd.read_json(path).rename(columns=self.LANGUAGES_EXCHANGED)
180
+ data = pd.melt(data, id_vars=["id"], value_vars=_LANGUAGES.keys(), var_name="language")
181
+ data_flattened = pd.json_normalize(data["value"])
182
+ data_merged = pd.merge(data, data_flattened, left_index=True, right_index=True)
183
+ data_filtered = data_merged[data_merged["language"] == language].drop(columns=["value", "language"])
184
+ examples = data_filtered.to_records()
185
+ elif "eval" in key:
186
+ data = pd.read_json(path)
187
+ examples = data.to_records()
188
+
189
+ idx = 0
190
+ if self.config.schema == "source" and self.config.subset_id not in ["cross_logiqa", "ph_eval"]:
191
+ for row in examples:
192
+ x = {
193
+ "id": row["id"],
194
+ "question": row["question"],
195
+ "choices": row["choices"],
196
+ "answer": row["answer"],
197
+ }
198
+ yield idx, x
199
+ idx += 1
200
+ elif self.config.schema == "source" and self.config.subset_id == "cross_logiqa":
201
+ for row in examples:
202
+ x = {
203
+ "id": row["id"],
204
+ "question": row["question"],
205
+ "context": row["context"] if "context" in row else None,
206
+ "choices": row["choices"],
207
+ "answer": row["answer"],
208
+ }
209
+ yield idx, x
210
+ idx += 1
211
+ elif self.config.schema == "source" and self.config.subset_id == "ph_eval":
212
+ for row in examples:
213
+ x = {
214
+ "id": row["id"],
215
+ "question": row["question"],
216
+ "choices": row["choices"],
217
+ "answer": row["answer"],
218
+ "category": row["category"] if "category" in row else None,
219
+ }
220
+ yield idx, x
221
+ idx += 1
222
+ elif self.config.schema == "seacrowd_qa":
223
+ for row in examples:
224
+ x = {
225
+ "id": idx,
226
+ "question_id": row["id"],
227
+ "document_id": row["id"],
228
+ "question": row["question"],
229
+ "type": "multiple_choice",
230
+ "choices": row["choices"],
231
+ "context": row["context"] if "context" in row else None,
232
+ "answer": [row["answer"]],
233
+ "meta": {},
234
+ }
235
+ yield idx, x
236
+ idx += 1
237
+ else:
238
+ raise ValueError(f"Invalid schema: {self.config.schema}")