Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
e1ba69c
1 Parent(s): a6b15b4

Update metadata

Browse files
Files changed (2) hide show
  1. README.md +15 -7
  2. wiki_lingua.py +17 -24
README.md CHANGED
@@ -482,7 +482,7 @@ config_names:
482
 
483
  ### Dataset Summary
484
 
485
- We introduce WikiLingua, a large-scale, multilingual dataset for the evaluation of crosslingual abstractive summarization systems. We extract article and summary pairs in 18 languages from WikiHow, a high quality, collaborative resource of how-to guides on a diverse set of topics written by human authors. We create gold-standard article-summary alignments across languages by aligning the images that are used to describe each how-to step in an article.
486
 
487
  ### Supported Tasks and Leaderboards
488
 
@@ -631,12 +631,20 @@ ______________________________
631
  ### Citation Information
632
 
633
  ```bibtex
634
- @article{ladhak-wiki-2020,
635
- title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},
636
- authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},
637
- journal = {arXiv preprint arXiv:2010.03093},
638
- year = {2020},
639
- url = {https://arxiv.org/abs/2010.03093}
 
 
 
 
 
 
 
 
640
  }
641
  ```
642
 
 
482
 
483
  ### Dataset Summary
484
 
485
+ We introduce WikiLingua, a large-scale, multilingual dataset for the evaluation of cross-lingual abstractive summarization systems. We extract article and summary pairs in 18 languages from WikiHow, a high quality, collaborative resource of how-to guides on a diverse set of topics written by human authors. We create gold-standard article-summary alignments across languages by aligning the images that are used to describe each how-to step in an article.
486
 
487
  ### Supported Tasks and Leaderboards
488
 
 
631
  ### Citation Information
632
 
633
  ```bibtex
634
+ @inproceedings{ladhak-etal-2020-wikilingua,
635
+ title = "{W}iki{L}ingua: A New Benchmark Dataset for Cross-Lingual Abstractive Summarization",
636
+ author = "Ladhak, Faisal and
637
+ Durmus, Esin and
638
+ Cardie, Claire and
639
+ McKeown, Kathleen",
640
+ booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
641
+ month = nov,
642
+ year = "2020",
643
+ address = "Online",
644
+ publisher = "Association for Computational Linguistics",
645
+ url = "https://aclanthology.org/2020.findings-emnlp.360",
646
+ doi = "10.18653/v1/2020.findings-emnlp.360",
647
+ pages = "4034--4048",
648
  }
649
  ```
650
 
wiki_lingua.py CHANGED
@@ -12,7 +12,7 @@
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
- """TODO: Add a description here."""
16
 
17
 
18
  import json
@@ -22,18 +22,26 @@ import datasets
22
 
23
  # Find for instance the citation on arxiv or on the dataset repo/website
24
  _CITATION = """\
25
- @article{ladhak-wiki-2020,
26
- title = {WikiLingua: A New Benchmark Dataset for Multilingual Abstractive Summarization},
27
- authors = {Faisal Ladhak, Esin Durmus, Claire Cardie and Kathleen McKeown},
28
- journal = {arXiv preprint arXiv:2010.03093},
29
- year = {2020},
30
- url = {https://arxiv.org/abs/2010.03093}
 
 
 
 
 
 
 
 
31
  }
32
  """
33
 
34
  _DESCRIPTION = """\
35
  WikiLingua is a large-scale multilingual dataset for the evaluation of
36
- crosslingual abstractive summarization systems. The dataset includes ~770k
37
  article and summary pairs in 18 languages from WikiHow. The gold-standard
38
  article-summary alignments across languages was done by aligning the images
39
  that are used to describe each how-to step in an article.
@@ -68,21 +76,10 @@ _LANGUAGES = [
68
 
69
 
70
  class WikiLingua(datasets.GeneratorBasedBuilder):
71
- """TODO: Short description of my dataset."""
72
 
73
  VERSION = datasets.Version("1.1.1")
74
 
75
- # This is an example of a dataset with multiple configurations.
76
- # If you don't want/need to define several sub-sets in your dataset,
77
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
78
-
79
- # If you need to make complex sub-parts in the datasets with configurable options
80
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
81
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
82
-
83
- # You will be able to load one or the other configurations in the following list with
84
- # data = datasets.load_dataset('my_dataset', 'first_domain')
85
- # data = datasets.load_dataset('my_dataset', 'second_domain')
86
  BUILDER_CONFIGS = [
87
  datasets.BuilderConfig(
88
  name=lang,
@@ -129,10 +126,6 @@ class WikiLingua(datasets.GeneratorBasedBuilder):
129
  description=_DESCRIPTION,
130
  # This defines the different columns of the dataset and their types
131
  features=features, # Here we define them above because they are different between the two configurations
132
- # If there's a common (input, target) tuple from the features,
133
- # specify them here. They'll be used if as_supervised=True in
134
- # builder.as_dataset.
135
- supervised_keys=None,
136
  # Homepage of the dataset for documentation
137
  homepage=_HOMEPAGE,
138
  # License for the dataset if available
 
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
+ """WikiLingua."""
16
 
17
 
18
  import json
 
22
 
23
  # Find for instance the citation on arxiv or on the dataset repo/website
24
  _CITATION = """\
25
+ @inproceedings{ladhak-etal-2020-wikilingua,
26
+ title = "{W}iki{L}ingua: A New Benchmark Dataset for Cross-Lingual Abstractive Summarization",
27
+ author = "Ladhak, Faisal and
28
+ Durmus, Esin and
29
+ Cardie, Claire and
30
+ McKeown, Kathleen",
31
+ booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
32
+ month = nov,
33
+ year = "2020",
34
+ address = "Online",
35
+ publisher = "Association for Computational Linguistics",
36
+ url = "https://aclanthology.org/2020.findings-emnlp.360",
37
+ doi = "10.18653/v1/2020.findings-emnlp.360",
38
+ pages = "4034--4048",
39
  }
40
  """
41
 
42
  _DESCRIPTION = """\
43
  WikiLingua is a large-scale multilingual dataset for the evaluation of
44
+ cross-lingual abstractive summarization systems. The dataset includes ~770k
45
  article and summary pairs in 18 languages from WikiHow. The gold-standard
46
  article-summary alignments across languages was done by aligning the images
47
  that are used to describe each how-to step in an article.
 
76
 
77
 
78
  class WikiLingua(datasets.GeneratorBasedBuilder):
79
+ """WikiLingua dataset."""
80
 
81
  VERSION = datasets.Version("1.1.1")
82
 
 
 
 
 
 
 
 
 
 
 
 
83
  BUILDER_CONFIGS = [
84
  datasets.BuilderConfig(
85
  name=lang,
 
126
  description=_DESCRIPTION,
127
  # This defines the different columns of the dataset and their types
128
  features=features, # Here we define them above because they are different between the two configurations
 
 
 
 
129
  # Homepage of the dataset for documentation
130
  homepage=_HOMEPAGE,
131
  # License for the dataset if available