Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
yongchanghao commited on
Commit
985ffd8
1 Parent(s): 1d17287

Delete loading script

Browse files
Files changed (1) hide show
  1. LongBench.py +0 -127
LongBench.py DELETED
@@ -1,127 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- import os
15
-
16
- import datasets
17
- import json
18
-
19
-
20
- _DESCRIPTION = """\
21
- LongBench is a comprehensive benchmark for multilingual and multi-task purposes, with the goal to fully measure and evaluate the ability of pre-trained language models to understand long text. This dataset consists of twenty different tasks, covering key long-text application scenarios such as multi-document QA, single-document QA, summarization, few-shot learning, synthetic tasks, and code completion.
22
- """
23
-
24
- _HOMEPAGE = "https://github.com/THUDM/LongBench"
25
-
26
-
27
- _URL = r"https://huggingface.co/datasets/THUDM/LongBench/resolve/main/data.zip"
28
-
29
- task_list = [
30
- "narrativeqa",
31
- "qasper",
32
- "multifieldqa_en",
33
- "multifieldqa_zh",
34
- "hotpotqa",
35
- "2wikimqa",
36
- "musique",
37
- "dureader",
38
- "gov_report",
39
- "qmsum",
40
- "multi_news",
41
- "vcsum",
42
- "trec",
43
- "triviaqa",
44
- "samsum",
45
- "lsht",
46
- "passage_count",
47
- "passage_retrieval_en",
48
- "passage_retrieval_zh",
49
- "lcc",
50
- "repobench-p",
51
- "qasper_e",
52
- "multifieldqa_en_e",
53
- "hotpotqa_e",
54
- "2wikimqa_e",
55
- "gov_report_e",
56
- "multi_news_e",
57
- "trec_e",
58
- "triviaqa_e",
59
- "samsum_e",
60
- "passage_count_e",
61
- "passage_retrieval_en_e",
62
- "lcc_e",
63
- "repobench-p_e"
64
- ]
65
-
66
-
67
- class LongBenchConfig(datasets.BuilderConfig):
68
- def __init__(self, **kwargs):
69
- super().__init__(version=datasets.Version("1.0.0"), **kwargs)
70
-
71
-
72
- class LongBench(datasets.GeneratorBasedBuilder):
73
- BUILDER_CONFIGS = [
74
- LongBenchConfig(
75
- name=task_name,
76
- )
77
- for task_name in task_list
78
- ]
79
-
80
- def _info(self):
81
- features = datasets.Features(
82
- {
83
- "input": datasets.Value("string"),
84
- "context": datasets.Value("string"),
85
- "answers": [datasets.Value("string")],
86
- "length": datasets.Value("int32"),
87
- "dataset": datasets.Value("string"),
88
- "language": datasets.Value("string"),
89
- "all_classes": [datasets.Value("string")],
90
- "_id": datasets.Value("string"),
91
- }
92
- )
93
- return datasets.DatasetInfo(
94
- description=_DESCRIPTION,
95
- features=features,
96
- homepage=_HOMEPAGE,
97
- )
98
-
99
- def _split_generators(self, dl_manager):
100
- data_dir = dl_manager.download_and_extract(_URL)
101
- task_name = self.config.name
102
- return [
103
- datasets.SplitGenerator(
104
- name=datasets.Split.TEST,
105
- gen_kwargs={
106
- "filepath": os.path.join(
107
- data_dir, "data", f"{task_name}.jsonl"
108
- ),
109
- },
110
- )
111
- ]
112
-
113
- def _generate_examples(self, filepath):
114
- with open(filepath, encoding="utf-8") as f:
115
- for idx, line in enumerate(f):
116
- key = f"{self.config.name}-{idx}"
117
- item = json.loads(line)
118
- yield key, {
119
- "input": item["input"],
120
- "context": item["context"],
121
- "answers": item["answers"],
122
- "length": item["length"],
123
- "dataset": item["dataset"],
124
- "language": item["language"],
125
- "_id": item["_id"],
126
- "all_classes": item["all_classes"],
127
- }