Chester Palen-Michel commited on
Commit
563b28c
1 Parent(s): 0c18bf0

Add loading script

Browse files
Files changed (1) hide show
  1. lr-sum.py +160 -0
lr-sum.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """LR-Sum summarization dataset"""
2
+
3
+ import json
4
+ import os
5
+
6
+ import datasets
7
+
8
+ _CITATION = """\
9
+ @inproceedings{palen-michel-lignos-2023-lr,
10
+ title = "{LR}-Sum: Summarization for Less-Resourced Languages",
11
+ author = "Palen-Michel, Chester and
12
+ Lignos, Constantine",
13
+ booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
14
+ month = jul,
15
+ year = "2023",
16
+ address = "Toronto, Canada",
17
+ publisher = "Association for Computational Linguistics",
18
+ url = "https://aclanthology.org/2023.findings-acl.427",
19
+ doi = "10.18653/v1/2023.findings-acl.427",
20
+ pages = "6829--6844",
21
+ abstract = "We introduce LR-Sum, a new permissively-licensed dataset created with the goal of enabling further research in automatic summarization for less-resourced languages.LR-Sum contains human-written summaries for 40 languages, many of which are less-resourced. We describe our process for extracting and filtering the dataset from the Multilingual Open Text corpus (Palen-Michel et al., 2022).The source data is public domain newswire collected from from Voice of America websites, and LR-Sum is released under a Creative Commons license (CC BY 4.0), making it one of the most openly-licensed multilingual summarization datasets. We describe abstractive and extractive summarization experiments to establish baselines and discuss the limitations of this dataset.",
22
+ }
23
+ """
24
+
25
+ _DESCRIPTION = """\
26
+ We introduce LR-Sum, a new permissively-licensed dataset created with the goal of enabling further research in automatic summarization for less-resourced languages.
27
+ LR-Sum contains human-written summaries for 40 languages, many of which are less-resourced.
28
+ We describe our process for extracting and filtering the dataset from the Multilingual Open Text corpus (Palen-Michel et al., 2022).
29
+ The source data is public domain newswire collected from from Voice of America websites, and LR-Sum is released under a Creative Commons license (CC BY 4.0), making it one of the most openly-licensed multilingual summarization datasets.
30
+ We describe abstractive and extractive summarization experiments to establish baselines and discuss the limitations of this dataset.
31
+ """
32
+
33
+ _HOMEPAGE = "https://github.com/bltlab"
34
+
35
+ _LICENSE = "Creative Commons Attribution 4.0 International (CC-BY 4.0)"
36
+
37
+ _URL = "https://huggingface.co/datasets/bltlab/lr-sum/resolve/main/data/{}.zip"
38
+
39
+ _LANGUAGES = [
40
+ "amh",
41
+ "aze",
42
+ "ben",
43
+ "bod",
44
+ "bos",
45
+ "ckb",
46
+ "cmn_t",
47
+ "cmn_s",
48
+ "ell",
49
+ "eng",
50
+ "fas",
51
+ "fra",
52
+ "hat",
53
+ "hau",
54
+ "hye",
55
+ "ind",
56
+ "kat",
57
+ "khm",
58
+ "kin",
59
+ "kor",
60
+ "kmr",
61
+ "lao",
62
+ "mkd",
63
+ "mya",
64
+ "nde",
65
+ "por",
66
+ "prs",
67
+ "pus",
68
+ "rus",
69
+ "sna",
70
+ "som",
71
+ "spa",
72
+ "sqi",
73
+ "srp",
74
+ "swh",
75
+ "tha",
76
+ "tir",
77
+ "tur",
78
+ "ukr",
79
+ "urd",
80
+ "uzb",
81
+ "vie",
82
+ ]
83
+
84
+
85
+ class Lrsum(datasets.GeneratorBasedBuilder):
86
+ VERSION = datasets.Version("1.0.0")
87
+
88
+ BUILDER_CONFIGS = [
89
+ datasets.BuilderConfig(
90
+ name="{}".format(lang),
91
+ version=datasets.Version("1.0.0")
92
+ )
93
+ for lang in _LANGUAGES
94
+ ]
95
+
96
+ def _info(self):
97
+ return datasets.DatasetInfo(
98
+ description=_DESCRIPTION,
99
+ features=datasets.Features(
100
+ {
101
+ "id": datasets.Value("string"),
102
+ "url": datasets.Value("string"),
103
+ "title": datasets.Value("string"),
104
+ "summary": datasets.Value("string"),
105
+ "text": datasets.Value("string"),
106
+ }
107
+ ),
108
+ supervised_keys=None,
109
+ homepage=_HOMEPAGE,
110
+ citation=_CITATION,
111
+ license=_LICENSE,
112
+ version=self.VERSION,
113
+ )
114
+
115
+ def _split_generators(self, dl_manager):
116
+ """Returns SplitGenerators."""
117
+ lang = str(self.config.name)
118
+ url = _URL.format(lang)
119
+
120
+ data_dir = dl_manager.download_and_extract(url)
121
+ ret = [
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.TEST,
124
+ gen_kwargs={
125
+ "filepath": os.path.join(data_dir, lang + "_test.jsonl"),
126
+ },
127
+ )
128
+ ]
129
+ if os.path.exists(os.path.join(data_dir, lang + "_train.jsonl")):
130
+ ret.append(datasets.SplitGenerator(
131
+ name=datasets.Split.TRAIN,
132
+ gen_kwargs={
133
+ "filepath": os.path.join(data_dir, lang + "_train.jsonl"),
134
+ },
135
+ )
136
+ )
137
+ if os.path.exists(os.path.join(data_dir, lang + "_val.jsonl")):
138
+ ret.append(
139
+ datasets.SplitGenerator(
140
+ name=datasets.Split.VALIDATION,
141
+ gen_kwargs={
142
+ "filepath": os.path.join(data_dir, lang + "_val.jsonl"),
143
+ },
144
+ )
145
+ )
146
+
147
+ return ret
148
+
149
+ def _generate_examples(self, filepath):
150
+ """Yields examples as (key, example) tuples."""
151
+ with open(filepath, encoding="utf-8") as f:
152
+ for idx_, row in enumerate(f):
153
+ data = json.loads(row)
154
+ yield idx_, {
155
+ "id": data["id"],
156
+ "url": data["url"],
157
+ "title": data["title"],
158
+ "summary": data["summary"],
159
+ "text": data["text"],
160
+ }