|
"""LR-Sum summarization dataset""" |
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@inproceedings{palen-michel-lignos-2023-lr, |
|
title = "{LR}-Sum: Summarization for Less-Resourced Languages", |
|
author = "Palen-Michel, Chester and |
|
Lignos, Constantine", |
|
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023", |
|
month = jul, |
|
year = "2023", |
|
address = "Toronto, Canada", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2023.findings-acl.427", |
|
doi = "10.18653/v1/2023.findings-acl.427", |
|
pages = "6829--6844", |
|
abstract = "We introduce LR-Sum, a new permissively-licensed dataset created with the goal of enabling further research in automatic summarization for less-resourced languages.LR-Sum contains human-written summaries for 40 languages, many of which are less-resourced. We describe our process for extracting and filtering the dataset from the Multilingual Open Text corpus (Palen-Michel et al., 2022).The source data is public domain newswire collected from from Voice of America websites, and LR-Sum is released under a Creative Commons license (CC BY 4.0), making it one of the most openly-licensed multilingual summarization datasets. We describe abstractive and extractive summarization experiments to establish baselines and discuss the limitations of this dataset.", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
We introduce LR-Sum, a new permissively-licensed dataset created with the goal of enabling further research in automatic summarization for less-resourced languages. |
|
LR-Sum contains human-written summaries for 40 languages, many of which are less-resourced. |
|
We describe our process for extracting and filtering the dataset from the Multilingual Open Text corpus (Palen-Michel et al., 2022). |
|
The source data is public domain newswire collected from from Voice of America websites, and LR-Sum is released under a Creative Commons license (CC BY 4.0), making it one of the most openly-licensed multilingual summarization datasets. |
|
We describe abstractive and extractive summarization experiments to establish baselines and discuss the limitations of this dataset. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/bltlab" |
|
|
|
_LICENSE = "Creative Commons Attribution 4.0 International (CC-BY 4.0)" |
|
|
|
_URL = "https://huggingface.co/datasets/bltlab/lr-sum/resolve/main/data/{}.zip" |
|
|
|
_LANGUAGES = [ |
|
"amh", |
|
"aze", |
|
"ben", |
|
"bod", |
|
"bos", |
|
"ckb", |
|
"cmn_t", |
|
"cmn_s", |
|
"ell", |
|
"eng", |
|
"fas", |
|
"fra", |
|
"hat", |
|
"hau", |
|
"hye", |
|
"ind", |
|
"kat", |
|
"khm", |
|
"kin", |
|
"kor", |
|
"kmr", |
|
"lao", |
|
"mkd", |
|
"mya", |
|
"nde", |
|
"por", |
|
"prs", |
|
"pus", |
|
"rus", |
|
"sna", |
|
"som", |
|
"spa", |
|
"sqi", |
|
"srp", |
|
"swh", |
|
"tha", |
|
"tir", |
|
"tur", |
|
"ukr", |
|
"urd", |
|
"uzb", |
|
"vie", |
|
] |
|
|
|
|
|
class Lrsum(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="{}".format(lang), |
|
version=datasets.Version("1.0.0") |
|
) |
|
for lang in _LANGUAGES |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"url": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"summary": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE, |
|
version=self.VERSION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
lang = str(self.config.name) |
|
url = _URL.format(lang) |
|
|
|
data_dir = dl_manager.download_and_extract(url) |
|
ret = [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, lang + "_test.jsonl"), |
|
}, |
|
) |
|
] |
|
if os.path.exists(os.path.join(data_dir, lang + "_train.jsonl")): |
|
ret.append(datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, lang + "_train.jsonl"), |
|
}, |
|
) |
|
) |
|
if os.path.exists(os.path.join(data_dir, lang + "_val.jsonl")): |
|
ret.append( |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, lang + "_val.jsonl"), |
|
}, |
|
) |
|
) |
|
|
|
return ret |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples as (key, example) tuples.""" |
|
with open(filepath, encoding="utf-8") as f: |
|
for idx_, row in enumerate(f): |
|
data = json.loads(row) |
|
yield idx_, { |
|
"id": data["id"], |
|
"url": data["url"], |
|
"title": data["title"], |
|
"summary": data["summary"], |
|
"text": data["text"], |
|
} |