File size: 5,425 Bytes
563b28c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
"""LR-Sum summarization dataset"""
import json
import os
import datasets
_CITATION = """\
@inproceedings{palen-michel-lignos-2023-lr,
title = "{LR}-Sum: Summarization for Less-Resourced Languages",
author = "Palen-Michel, Chester and
Lignos, Constantine",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-acl.427",
doi = "10.18653/v1/2023.findings-acl.427",
pages = "6829--6844",
abstract = "We introduce LR-Sum, a new permissively-licensed dataset created with the goal of enabling further research in automatic summarization for less-resourced languages.LR-Sum contains human-written summaries for 40 languages, many of which are less-resourced. We describe our process for extracting and filtering the dataset from the Multilingual Open Text corpus (Palen-Michel et al., 2022).The source data is public domain newswire collected from from Voice of America websites, and LR-Sum is released under a Creative Commons license (CC BY 4.0), making it one of the most openly-licensed multilingual summarization datasets. We describe abstractive and extractive summarization experiments to establish baselines and discuss the limitations of this dataset.",
}
"""
_DESCRIPTION = """\
We introduce LR-Sum, a new permissively-licensed dataset created with the goal of enabling further research in automatic summarization for less-resourced languages.
LR-Sum contains human-written summaries for 40 languages, many of which are less-resourced.
We describe our process for extracting and filtering the dataset from the Multilingual Open Text corpus (Palen-Michel et al., 2022).
The source data is public domain newswire collected from from Voice of America websites, and LR-Sum is released under a Creative Commons license (CC BY 4.0), making it one of the most openly-licensed multilingual summarization datasets.
We describe abstractive and extractive summarization experiments to establish baselines and discuss the limitations of this dataset.
"""
_HOMEPAGE = "https://github.com/bltlab"
_LICENSE = "Creative Commons Attribution 4.0 International (CC-BY 4.0)"
_URL = "https://huggingface.co/datasets/bltlab/lr-sum/resolve/main/data/{}.zip"
_LANGUAGES = [
"amh",
"aze",
"ben",
"bod",
"bos",
"ckb",
"cmn_t",
"cmn_s",
"ell",
"eng",
"fas",
"fra",
"hat",
"hau",
"hye",
"ind",
"kat",
"khm",
"kin",
"kor",
"kmr",
"lao",
"mkd",
"mya",
"nde",
"por",
"prs",
"pus",
"rus",
"sna",
"som",
"spa",
"sqi",
"srp",
"swh",
"tha",
"tir",
"tur",
"ukr",
"urd",
"uzb",
"vie",
]
class Lrsum(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="{}".format(lang),
version=datasets.Version("1.0.0")
)
for lang in _LANGUAGES
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"url": datasets.Value("string"),
"title": datasets.Value("string"),
"summary": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
version=self.VERSION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
lang = str(self.config.name)
url = _URL.format(lang)
data_dir = dl_manager.download_and_extract(url)
ret = [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, lang + "_test.jsonl"),
},
)
]
if os.path.exists(os.path.join(data_dir, lang + "_train.jsonl")):
ret.append(datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, lang + "_train.jsonl"),
},
)
)
if os.path.exists(os.path.join(data_dir, lang + "_val.jsonl")):
ret.append(
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, lang + "_val.jsonl"),
},
)
)
return ret
def _generate_examples(self, filepath):
"""Yields examples as (key, example) tuples."""
with open(filepath, encoding="utf-8") as f:
for idx_, row in enumerate(f):
data = json.loads(row)
yield idx_, {
"id": data["id"],
"url": data["url"],
"title": data["title"],
"summary": data["summary"],
"text": data["text"],
} |