Datasets:
File size: 4,255 Bytes
73b5e30 ca828be 73b5e30 f2f7ce8 73b5e30 f2f7ce8 73b5e30 f2f7ce8 73b5e30 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
"""CrossSum cross-lingual abstractive summarization dataset."""
import json
import os
import datasets
_CITATION = """\
@article{hasan2021crosssum,
author = {Tahmid Hasan and Abhik Bhattacharjee and Wasi Uddin Ahmad and Yuan-Fang Li and Yong-bin Kang and Rifat Shahriyar},
title = {CrossSum: Beyond English-Centric Cross-Lingual Abstractive Text Summarization for 1500+ Language Pairs},
journal = {CoRR},
volume = {abs/2112.08804},
year = {2021},
url = {https://arxiv.org/abs/2112.08804},
eprinttype = {arXiv},
eprint = {2112.08804}
}
"""
_DESCRIPTION = """\
We present CrossSum, a large-scale dataset
comprising 1.70 million cross-lingual article summary samples in 1500+ language-pairs
constituting 45 languages. We use the multilingual XL-Sum dataset and align identical
articles written in different languages via crosslingual retrieval using a language-agnostic
representation model.
"""
_HOMEPAGE = "https://github.com/csebuetnlp/CrossSum"
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
_URL = "https://huggingface.co/datasets/csebuetnlp/CrossSum/resolve/main/data/{}-{}_CrossSum.tar.bz2"
_LANGUAGES = [
"oromo",
"french",
"amharic",
"arabic",
"azerbaijani",
"bengali",
"burmese",
"chinese_simplified",
"chinese_traditional",
"welsh",
"english",
"kirundi",
"gujarati",
"hausa",
"hindi",
"igbo",
"indonesian",
"japanese",
"korean",
"kyrgyz",
"marathi",
"spanish",
"scottish_gaelic",
"nepali",
"pashto",
"persian",
"pidgin",
"portuguese",
"punjabi",
"russian",
"serbian_cyrillic",
"serbian_latin",
"sinhala",
"somali",
"swahili",
"tamil",
"telugu",
"thai",
"tigrinya",
"turkish",
"ukrainian",
"urdu",
"uzbek",
"vietnamese",
"yoruba",
]
class Crosssum(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="{}-{}".format(src_lang, tgt_lang),
version=datasets.Version("1.0.0")
)
for src_lang in _LANGUAGES
for tgt_lang in _LANGUAGES
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"source_url": datasets.Value("string"),
"target_url": datasets.Value("string"),
"summary": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
version=self.VERSION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
lang_pairs = str(self.config.name)
url = _URL.format(*lang_pairs.split("-"))
data_dir = dl_manager.download_and_extract(url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, lang_pairs + "_train.jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, lang_pairs + "_test.jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, lang_pairs + "_val.jsonl"),
},
),
]
def _generate_examples(self, filepath):
"""Yields examples as (key, example) tuples."""
with open(filepath, encoding="utf-8") as f:
for idx_, row in enumerate(f):
data = json.loads(row)
yield idx_, {
"source_url": data["source_url"],
"target_url": data["target_url"],
"summary": data["summary"],
"text": data["text"],
}
|