Datasets:
wmt
/

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
Dask
License:
system HF staff commited on
Commit
53e16a4
1 Parent(s): 54c4b12

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (4) hide show
  1. README.md +7 -0
  2. dataset_infos.json +1 -1
  3. dummy/cs-en/1.0.0/dummy_data.zip +2 -2
  4. wmt_utils.py +111 -103
README.md CHANGED
@@ -1,5 +1,12 @@
1
  ---
 
2
  paperswithcode_id: null
 
 
 
 
 
 
3
  ---
4
 
5
  # Dataset Card for "wmt17"
 
1
  ---
2
+ pretty_name: WMT17
3
  paperswithcode_id: null
4
+ multilinguality:
5
+ - translation
6
+ task_categories:
7
+ - conditional-text-generation
8
+ task_ids:
9
+ - machine-translation
10
  ---
11
 
12
  # Dataset Card for "wmt17"
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"cs-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "\n@InProceedings{bojar-EtAl:2017:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huang, Shujian and Huck, Matthias and Koehn, Philipp and Liu, Qun and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Rubino, Raphael and Specia, Lucia and Turchi, Marco},\n title = {Findings of the 2017 Conference on Machine Translation (WMT17)},\n booktitle = {Proceedings of the Second Conference on Machine Translation, Volume 2: Shared Task Papers},\n month = {September},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n pages = {169--214},\n url = {http://www.aclweb.org/anthology/W17-4717}\n}\n", "homepage": "http://www.statmt.org/wmt17/translation-task.html", "license": "", "features": {"translation": {"languages": ["cs", "en"], "id": null, "_type": "Translation"}}, "supervised_keys": {"input": "cs", "output": "en"}, "builder_name": "wmt17", "config_name": "cs-en", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 674430, "num_examples": 3005, "dataset_name": "wmt17"}, "train": {"name": "train", "num_bytes": 300709796, "num_examples": 1018291, "dataset_name": "wmt17"}, "validation": {"name": "validation", "num_bytes": 707870, "num_examples": 2999, "dataset_name": "wmt17"}}, "download_checksums": {"https://huggingface.co/datasets/wmt/wmt13/resolve/main/training-parallel-europarl-v7.tgz": {"num_bytes": 657632379, "checksum": "0224c7c710c8a063dfd893b0cc0830202d61f4c75c17eb8e31836103d27d96e7"}, "https://huggingface.co/datasets/wmt/wmt13/resolve/main/training-parallel-commoncrawl.tgz": {"num_bytes": 918311367, "checksum": "c7a74e2ea01ac6c920123108627e35278d4ccb5701e15428ffa34de86fa3a9e5"}, "https://huggingface.co/datasets/wmt/wmt17/resolve/main/translation-task/training-parallel-nc-v12.tgz": {"num_bytes": 168591139, "checksum": "2b45f30ef1d550d302fd17dd3a5cbe19134ccc4c2cf50c2dae534aee600101a2"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz": {"num_bytes": 38654961, "checksum": "7a7deccf82ebb05ba508dba5eb21356492224e8f630ec4f992132b029b4b25e7"}}, "download_size": 1783189846, "dataset_size": 302092096, "size_in_bytes": 2085281942}}
 
1
+ {"cs-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "\n@InProceedings{bojar-EtAl:2017:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huang, Shujian and Huck, Matthias and Koehn, Philipp and Liu, Qun and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Rubino, Raphael and Specia, Lucia and Turchi, Marco},\n title = {Findings of the 2017 Conference on Machine Translation (WMT17)},\n booktitle = {Proceedings of the Second Conference on Machine Translation, Volume 2: Shared Task Papers},\n month = {September},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n pages = {169--214},\n url = {http://www.aclweb.org/anthology/W17-4717}\n}\n", "homepage": "http://www.statmt.org/wmt17/translation-task.html", "license": "", "features": {"translation": {"languages": ["cs", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "cs", "output": "en"}, "task_templates": null, "builder_name": "wmt17", "config_name": "cs-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 300698431, "num_examples": 1018291, "dataset_name": "wmt17"}, "validation": {"name": "validation", "num_bytes": 707870, "num_examples": 2999, "dataset_name": "wmt17"}, "test": {"name": "test", "num_bytes": 674430, "num_examples": 3005, "dataset_name": "wmt17"}}, "download_checksums": {"https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-europarl-v7.zip": {"num_bytes": 658092427, "checksum": "5b2d8b32c2396da739b4e731871c597fcc6e75729becd74619d0712eecf7770e"}, "https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-commoncrawl.zip": {"num_bytes": 918734483, "checksum": "5ffe980072ea29adfd84568d099bea366d9f72772b988e670794ae851b4e5627"}, "https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/training-parallel-nc-v12.zip": {"num_bytes": 168699339, "checksum": "a3e922fd19485a25870e628fdecb81b7d621f545e16df21a38fae15127413122"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 1784240523, "post_processing_size": null, "dataset_size": 302080731, "size_in_bytes": 2086321254}, "de-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "\n@InProceedings{bojar-EtAl:2017:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huang, Shujian and Huck, Matthias and Koehn, Philipp and Liu, Qun and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Rubino, Raphael and Specia, Lucia and Turchi, Marco},\n title = {Findings of the 2017 Conference on Machine Translation (WMT17)},\n booktitle = {Proceedings of the Second Conference on Machine Translation, Volume 2: Shared Task Papers},\n month = {September},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n pages = {169--214},\n url = {http://www.aclweb.org/anthology/W17-4717}\n}\n", "homepage": "http://www.statmt.org/wmt17/translation-task.html", "license": "", "features": {"translation": {"languages": ["de", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "de", "output": "en"}, "task_templates": null, "builder_name": "wmt17", "config_name": "de-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1715537443, "num_examples": 5906184, "dataset_name": "wmt17"}, "validation": {"name": "validation", "num_bytes": 735516, "num_examples": 2999, "dataset_name": "wmt17"}, "test": {"name": "test", "num_bytes": 729519, "num_examples": 3004, "dataset_name": "wmt17"}}, "download_checksums": {"https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-europarl-v7.zip": {"num_bytes": 658092427, "checksum": "5b2d8b32c2396da739b4e731871c597fcc6e75729becd74619d0712eecf7770e"}, "https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-commoncrawl.zip": {"num_bytes": 918734483, "checksum": "5ffe980072ea29adfd84568d099bea366d9f72772b988e670794ae851b4e5627"}, "https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/training-parallel-nc-v12.zip": {"num_bytes": 168699339, "checksum": "a3e922fd19485a25870e628fdecb81b7d621f545e16df21a38fae15127413122"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/rapid2016.zip": {"num_bytes": 161141713, "checksum": "93217093c624d9e16023fee98afb089208cca5937c2c08ee7edc707196d09a28"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 1945382236, "post_processing_size": null, "dataset_size": 1717002478, "size_in_bytes": 3662384714}, "fi-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "\n@InProceedings{bojar-EtAl:2017:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huang, Shujian and Huck, Matthias and Koehn, Philipp and Liu, Qun and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Rubino, Raphael and Specia, Lucia and Turchi, Marco},\n title = {Findings of the 2017 Conference on Machine Translation (WMT17)},\n booktitle = {Proceedings of the Second Conference on Machine Translation, Volume 2: Shared Task Papers},\n month = {September},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n pages = {169--214},\n url = {http://www.aclweb.org/anthology/W17-4717}\n}\n", "homepage": "http://www.statmt.org/wmt17/translation-task.html", "license": "", "features": {"translation": {"languages": ["fi", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "fi", "output": "en"}, "task_templates": null, "builder_name": "wmt17", "config_name": "fi-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 743856525, "num_examples": 2656542, "dataset_name": "wmt17"}, "validation": {"name": "validation", "num_bytes": 1410515, "num_examples": 6000, "dataset_name": "wmt17"}, "test": {"name": "test", "num_bytes": 1388828, "num_examples": 6004, "dataset_name": "wmt17"}}, "download_checksums": {"https://huggingface.co/datasets/wmt/wmt16/resolve/main-zip/translation-task/training-parallel-ep-v8.zip": {"num_bytes": 225190342, "checksum": "387e570a6812948e30c64885e64a1d3735a66b7c0bc424fcff1208ef11110149"}, "https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip": {"num_bytes": 9485604, "checksum": "b3134566261b39d830eed345df1be1864039339cfeccf24b1bf86398c9e4a87c"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/rapid2016.zip": {"num_bytes": 161141713, "checksum": "93217093c624d9e16023fee98afb089208cca5937c2c08ee7edc707196d09a28"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 434531933, "post_processing_size": null, "dataset_size": 746655868, "size_in_bytes": 1181187801}, "lv-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "\n@InProceedings{bojar-EtAl:2017:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huang, Shujian and Huck, Matthias and Koehn, Philipp and Liu, Qun and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Rubino, Raphael and Specia, Lucia and Turchi, Marco},\n title = {Findings of the 2017 Conference on Machine Translation (WMT17)},\n booktitle = {Proceedings of the Second Conference on Machine Translation, Volume 2: Shared Task Papers},\n month = {September},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n pages = {169--214},\n url = {http://www.aclweb.org/anthology/W17-4717}\n}\n", "homepage": "http://www.statmt.org/wmt17/translation-task.html", "license": "", "features": {"translation": {"languages": ["lv", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "lv", "output": "en"}, "task_templates": null, "builder_name": "wmt17", "config_name": "lv-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 517419100, "num_examples": 3567528, "dataset_name": "wmt17"}, "validation": {"name": "validation", "num_bytes": 544604, "num_examples": 2003, "dataset_name": "wmt17"}, "test": {"name": "test", "num_bytes": 530474, "num_examples": 2001, "dataset_name": "wmt17"}}, "download_checksums": {"https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/leta.v1.zip": {"num_bytes": 2027044, "checksum": "b30b9a729a41dc1bc6cb6867a1bf8367c5a573fbc321e5de6d545280328f7da8"}, "https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/dcep.lv-en.v1.zip": {"num_bytes": 128577127, "checksum": "a387a8bfbc367d4b6a0db6d1f4ea6499ceba4731f17f41d3dcec28c94925b503"}, "https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/books.lv-en.v1.zip": {"num_bytes": 316099, "checksum": "d1092e19cbc10682859360eb777cc0f9cf32698bcb7181b8b22ca6ca570e7fdf"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 169634544, "post_processing_size": null, "dataset_size": 518494178, "size_in_bytes": 688128722}, "ru-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "\n@InProceedings{bojar-EtAl:2017:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huang, Shujian and Huck, Matthias and Koehn, Philipp and Liu, Qun and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Rubino, Raphael and Specia, Lucia and Turchi, Marco},\n title = {Findings of the 2017 Conference on Machine Translation (WMT17)},\n booktitle = {Proceedings of the Second Conference on Machine Translation, Volume 2: Shared Task Papers},\n month = {September},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n pages = {169--214},\n url = {http://www.aclweb.org/anthology/W17-4717}\n}\n", "homepage": "http://www.statmt.org/wmt17/translation-task.html", "license": "", "features": {"translation": {"languages": ["ru", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "ru", "output": "en"}, "task_templates": null, "builder_name": "wmt17", "config_name": "ru-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 11000075522, "num_examples": 24782720, "dataset_name": "wmt17"}, "validation": {"name": "validation", "num_bytes": 1050677, "num_examples": 2998, "dataset_name": "wmt17"}, "test": {"name": "test", "num_bytes": 1040195, "num_examples": 3001, "dataset_name": "wmt17"}}, "download_checksums": {"https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-commoncrawl.zip": {"num_bytes": 918734483, "checksum": "5ffe980072ea29adfd84568d099bea366d9f72772b988e670794ae851b4e5627"}, "https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/training-parallel-nc-v12.zip": {"num_bytes": 168699339, "checksum": "a3e922fd19485a25870e628fdecb81b7d621f545e16df21a38fae15127413122"}, "https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip": {"num_bytes": 9485604, "checksum": "b3134566261b39d830eed345df1be1864039339cfeccf24b1bf86398c9e4a87c"}, "https://huggingface.co/datasets/wmt/uncorpus/resolve/main-zip/UNv1.0.en-ru.zip": {"num_bytes": 2447006960, "checksum": "72c2670fa6aadb36d541cba91cd26b9da291a976bf1a2748177a57baf8261f4c"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 3582640660, "post_processing_size": null, "dataset_size": 11002166394, "size_in_bytes": 14584807054}, "tr-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "\n@InProceedings{bojar-EtAl:2017:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huang, Shujian and Huck, Matthias and Koehn, Philipp and Liu, Qun and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Rubino, Raphael and Specia, Lucia and Turchi, Marco},\n title = {Findings of the 2017 Conference on Machine Translation (WMT17)},\n booktitle = {Proceedings of the Second Conference on Machine Translation, Volume 2: Shared Task Papers},\n month = {September},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n pages = {169--214},\n url = {http://www.aclweb.org/anthology/W17-4717}\n}\n", "homepage": "http://www.statmt.org/wmt17/translation-task.html", "license": "", "features": {"translation": {"languages": ["tr", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "tr", "output": "en"}, "task_templates": null, "builder_name": "wmt17", "config_name": "tr-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 60416617, "num_examples": 205756, "dataset_name": "wmt17"}, "validation": {"name": "validation", "num_bytes": 732436, "num_examples": 3000, "dataset_name": "wmt17"}, "test": {"name": "test", "num_bytes": 752773, "num_examples": 3007, "dataset_name": "wmt17"}}, "download_checksums": {"https://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-tr.tmx.gz": {"num_bytes": 23548787, "checksum": "23581212dc3267383198a92636219fceb3f23207bfc1d1e78ab60a2cb465eff8"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 62263061, "post_processing_size": null, "dataset_size": 61901826, "size_in_bytes": 124164887}, "zh-en": {"description": "Translate dataset based on the data from statmt.org.\n\nVersions exists for the different years using a combination of multiple data\nsources. The base `wmt_translate` allows you to create your own config to choose\nyour own data/language pair by creating a custom `datasets.translate.wmt.WmtConfig`.\n\n```\nconfig = datasets.wmt.WmtConfig(\n version=\"0.0.1\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\nbuilder = datasets.builder(\"wmt_translate\", config=config)\n```\n\n", "citation": "\n@InProceedings{bojar-EtAl:2017:WMT1,\n author = {Bojar, Ond\u000b{r}ej and Chatterjee, Rajen and Federmann, Christian and Graham, Yvette and Haddow, Barry and Huang, Shujian and Huck, Matthias and Koehn, Philipp and Liu, Qun and Logacheva, Varvara and Monz, Christof and Negri, Matteo and Post, Matt and Rubino, Raphael and Specia, Lucia and Turchi, Marco},\n title = {Findings of the 2017 Conference on Machine Translation (WMT17)},\n booktitle = {Proceedings of the Second Conference on Machine Translation, Volume 2: Shared Task Papers},\n month = {September},\n year = {2017},\n address = {Copenhagen, Denmark},\n publisher = {Association for Computational Linguistics},\n pages = {169--214},\n url = {http://www.aclweb.org/anthology/W17-4717}\n}\n", "homepage": "http://www.statmt.org/wmt17/translation-task.html", "license": "", "features": {"translation": {"languages": ["zh", "en"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": {"input": "zh", "output": "en"}, "task_templates": null, "builder_name": "wmt17", "config_name": "zh-en", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5529286149, "num_examples": 25134743, "dataset_name": "wmt17"}, "validation": {"name": "validation", "num_bytes": 589591, "num_examples": 2002, "dataset_name": "wmt17"}, "test": {"name": "test", "num_bytes": 540347, "num_examples": 2001, "dataset_name": "wmt17"}}, "download_checksums": {"https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/training-parallel-nc-v12.zip": {"num_bytes": 168699339, "checksum": "a3e922fd19485a25870e628fdecb81b7d621f545e16df21a38fae15127413122"}, "https://huggingface.co/datasets/wmt/uncorpus/resolve/main-zip/UNv1.0.en-zh.zip": {"num_bytes": 1385832125, "checksum": "97f5ce0892084cdbb2332b52ffcc0299a649ba0a43712d921575fe2b7edfb4b4"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/casia2015.zip": {"num_bytes": 98159063, "checksum": "c939f1528f96c419e9bbffb9caad869616a969e7704ffac896e245a02aff59a9"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/casict2011.zip": {"num_bytes": 166957775, "checksum": "606adc0ccc5d8fc7c47f8589991286616342a1a379a571ce3038918731ae0182"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/casict2015.zip": {"num_bytes": 106836569, "checksum": "eef8e25b297c1aff12ab24719247d3588e756d7a4e2c30d4d34fcb4d05ab1050"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/datum2015.zip": {"num_bytes": 100118018, "checksum": "654afce6731485c40ce856514ab80cd2bfd836126bcaf48cdb911ebc32b021a4"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/datum2017.zip": {"num_bytes": 99278067, "checksum": "737455c139596f4abf3b1da73bc38932b3ef9534549328eff47d867e29950ed2"}, "https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/neu2017.zip": {"num_bytes": 150311715, "checksum": "5c5ea9ac5cbc43c974bd53796a3a29829800865b6398b52cda0a3854cb0d2e03"}, "https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip": {"num_bytes": 38714274, "checksum": "d796e363740fdc4261aa6f5a3d2f8223e3adaee7d737b7724863325b8956dfd1"}}, "download_size": 2314906945, "post_processing_size": null, "dataset_size": 5530416087, "size_in_bytes": 7845323032}}
dummy/cs-en/1.0.0/dummy_data.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f37a69bb24dc976e3f647a05baea047a68e5caae41db2c95052db9b653264b40
3
- size 5906
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b49de84b7f148d15fdaca2fdb54bc87ceb8daa37ad7150a6689968205f9e4855
3
+ size 7591
wmt_utils.py CHANGED
@@ -96,7 +96,7 @@ class SubDataset:
96
  def _inject_language(self, src, strings):
97
  """Injects languages into (potentially) template strings."""
98
  if src not in self.sources:
99
- raise ValueError("Invalid source for '{0}': {1}".format(self.name, src))
100
 
101
  def _format_string(s):
102
  if "{0}" in s and "{1}" and "{src}" in s:
@@ -127,7 +127,7 @@ _TRAIN_SUBSETS = [
127
  name="commoncrawl",
128
  target="en", # fr-de pair in commoncrawl_frde
129
  sources={"cs", "de", "es", "fr", "ru"},
130
- url="https://huggingface.co/datasets/wmt/wmt13/resolve/main/training-parallel-commoncrawl.tgz",
131
  path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
132
  ),
133
  SubDataset(
@@ -184,14 +184,14 @@ _TRAIN_SUBSETS = [
184
  name="dcep_v1",
185
  target="en",
186
  sources={"lv"},
187
- url="https://huggingface.co/datasets/wmt/wmt17/resolve/main/translation-task/dcep.lv-en.v1.tgz",
188
  path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
189
  ),
190
  SubDataset(
191
  name="europarl_v7",
192
  target="en",
193
  sources={"cs", "de", "es", "fr"},
194
- url="https://huggingface.co/datasets/wmt/wmt13/resolve/main/training-parallel-europarl-v7.tgz",
195
  path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
196
  ),
197
  SubDataset(
@@ -208,14 +208,14 @@ _TRAIN_SUBSETS = [
208
  name="europarl_v8_18",
209
  target="en",
210
  sources={"et", "fi"},
211
- url="https://huggingface.co/datasets/wmt/wmt18/resolve/main/translation-task/training-parallel-ep-v8.tgz",
212
  path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
213
  ),
214
  SubDataset(
215
  name="europarl_v8_16",
216
  target="en",
217
  sources={"fi", "ro"},
218
- url="https://huggingface.co/datasets/wmt/wmt16/resolve/main/translation-task/training-parallel-ep-v8.tgz",
219
  path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
220
  ),
221
  SubDataset(
@@ -229,7 +229,7 @@ _TRAIN_SUBSETS = [
229
  name="gigafren",
230
  target="en",
231
  sources={"fr"},
232
- url="https://huggingface.co/datasets/wmt/wmt10/resolve/main/training-giga-fren.tar",
233
  path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
234
  ),
235
  SubDataset(
@@ -244,35 +244,35 @@ _TRAIN_SUBSETS = [
244
  name="leta_v1",
245
  target="en",
246
  sources={"lv"},
247
- url="https://huggingface.co/datasets/wmt/wmt17/resolve/main/translation-task/leta.v1.tgz",
248
  path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
249
  ),
250
  SubDataset(
251
  name="multiun",
252
  target="en",
253
  sources={"es", "fr"},
254
- url="https://huggingface.co/datasets/wmt/wmt13/resolve/main/training-parallel-un.tgz",
255
  path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
256
  ),
257
  SubDataset(
258
  name="newscommentary_v9",
259
  target="en",
260
  sources={"cs", "de", "fr", "ru"},
261
- url="https://huggingface.co/datasets/wmt/wmt14/resolve/main/training-parallel-nc-v9.tgz",
262
  path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
263
  ),
264
  SubDataset(
265
  name="newscommentary_v10",
266
  target="en",
267
  sources={"cs", "de", "fr", "ru"},
268
- url="https://huggingface.co/datasets/wmt/wmt15/resolve/main/training-parallel-nc-v10.tgz",
269
  path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
270
  ),
271
  SubDataset(
272
  name="newscommentary_v11",
273
  target="en",
274
  sources={"cs", "de", "ru"},
275
- url="https://huggingface.co/datasets/wmt/wmt16/resolve/main/translation-task/training-parallel-nc-v11.tgz",
276
  path=(
277
  "training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
278
  "training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
@@ -282,14 +282,14 @@ _TRAIN_SUBSETS = [
282
  name="newscommentary_v12",
283
  target="en",
284
  sources={"cs", "de", "ru", "zh"},
285
- url="https://huggingface.co/datasets/wmt/wmt17/resolve/main/translation-task/training-parallel-nc-v12.tgz",
286
  path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
287
  ),
288
  SubDataset(
289
  name="newscommentary_v13",
290
  target="en",
291
  sources={"cs", "de", "ru", "zh"},
292
- url="https://huggingface.co/datasets/wmt/wmt18/resolve/main/translation-task/training-parallel-nc-v13.tgz",
293
  path=(
294
  "training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
295
  "training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
@@ -313,14 +313,14 @@ _TRAIN_SUBSETS = [
313
  name="onlinebooks_v1",
314
  target="en",
315
  sources={"lv"},
316
- url="https://huggingface.co/datasets/wmt/wmt17/resolve/main/translation-task/books.lv-en.v1.tgz",
317
  path=("farewell/farewell.lv", "farewell/farewell.en"),
318
  ),
319
  SubDataset(
320
  name="paracrawl_v1",
321
  target="en",
322
  sources={"cs", "de", "et", "fi", "ru"},
323
- url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz",
324
  path=(
325
  "paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
326
  "paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
@@ -330,7 +330,7 @@ _TRAIN_SUBSETS = [
330
  name="paracrawl_v1_ru",
331
  target="en",
332
  sources={"ru"},
333
- url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz",
334
  path=(
335
  "paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
336
  "paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
@@ -357,7 +357,7 @@ _TRAIN_SUBSETS = [
357
  name="rapid_2016",
358
  target="en",
359
  sources={"de", "et", "fi"},
360
- url="https://huggingface.co/datasets/wmt/wmt18/resolve/main/translation-task/rapid2016.tgz",
361
  path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
362
  ),
363
  SubDataset(
@@ -385,21 +385,21 @@ _TRAIN_SUBSETS = [
385
  name="uncorpus_v1",
386
  target="en",
387
  sources={"ru", "zh"},
388
- url="https://huggingface.co/datasets/wmt/uncorpus/resolve/main/UNv1.0.en-{src}.tar.gz",
389
  path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
390
  ),
391
  SubDataset(
392
  name="wikiheadlines_fi",
393
  target="en",
394
  sources={"fi"},
395
- url="https://huggingface.co/datasets/wmt/wmt15/resolve/main/wiki-titles.tgz",
396
  path="wiki/fi-en/titles.fi-en",
397
  ),
398
  SubDataset(
399
  name="wikiheadlines_hi",
400
  target="en",
401
  sources={"hi"},
402
- url="https://huggingface.co/datasets/wmt/wmt14/resolve/main/wiki-titles.tgz",
403
  path="wiki/hi-en/wiki-titles.hi-en",
404
  ),
405
  SubDataset(
@@ -407,7 +407,7 @@ _TRAIN_SUBSETS = [
407
  name="wikiheadlines_ru",
408
  target="en",
409
  sources={"ru"},
410
- url="https://huggingface.co/datasets/wmt/wmt15/resolve/main/wiki-titles.tgz",
411
  path="wiki/ru-en/wiki.ru-en",
412
  ),
413
  SubDataset(
@@ -431,7 +431,7 @@ _TRAIN_SUBSETS = [
431
  name=ss,
432
  target="en",
433
  sources={"zh"},
434
- url="ftp://cwmt-wmt:cwmt-wmt@datasets.nju.edu.cn/parallel/%s.zip" % ss,
435
  path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
436
  )
437
  for ss in CWMT_SUBSET_NAMES
@@ -442,175 +442,175 @@ _DEV_SUBSETS = [
442
  name="euelections_dev2019",
443
  target="de",
444
  sources={"fr"},
445
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
446
  path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
447
  ),
448
  SubDataset(
449
  name="newsdev2014",
450
  target="en",
451
  sources={"hi"},
452
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
453
  path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
454
  ),
455
  SubDataset(
456
  name="newsdev2015",
457
  target="en",
458
  sources={"fi"},
459
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
460
  path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
461
  ),
462
  SubDataset(
463
  name="newsdiscussdev2015",
464
  target="en",
465
  sources={"ro", "tr"},
466
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
467
  path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
468
  ),
469
  SubDataset(
470
  name="newsdev2016",
471
  target="en",
472
  sources={"ro", "tr"},
473
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
474
  path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
475
  ),
476
  SubDataset(
477
  name="newsdev2017",
478
  target="en",
479
  sources={"lv", "zh"},
480
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
481
  path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
482
  ),
483
  SubDataset(
484
  name="newsdev2018",
485
  target="en",
486
  sources={"et"},
487
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
488
  path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
489
  ),
490
  SubDataset(
491
  name="newsdev2019",
492
  target="en",
493
  sources={"gu", "kk", "lt"},
494
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
495
  path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
496
  ),
497
  SubDataset(
498
  name="newsdiscussdev2015",
499
  target="en",
500
  sources={"fr"},
501
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
502
  path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
503
  ),
504
  SubDataset(
505
  name="newsdiscusstest2015",
506
  target="en",
507
  sources={"fr"},
508
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
509
  path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
510
  ),
511
  SubDataset(
512
  name="newssyscomb2009",
513
  target="en",
514
  sources={"cs", "de", "es", "fr"},
515
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
516
  path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
517
  ),
518
  SubDataset(
519
  name="newstest2008",
520
  target="en",
521
  sources={"cs", "de", "es", "fr", "hu"},
522
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
523
  path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
524
  ),
525
  SubDataset(
526
  name="newstest2009",
527
  target="en",
528
  sources={"cs", "de", "es", "fr"},
529
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
530
  path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
531
  ),
532
  SubDataset(
533
  name="newstest2010",
534
  target="en",
535
  sources={"cs", "de", "es", "fr"},
536
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
537
  path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
538
  ),
539
  SubDataset(
540
  name="newstest2011",
541
  target="en",
542
  sources={"cs", "de", "es", "fr"},
543
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
544
  path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
545
  ),
546
  SubDataset(
547
  name="newstest2012",
548
  target="en",
549
  sources={"cs", "de", "es", "fr", "ru"},
550
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
551
  path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
552
  ),
553
  SubDataset(
554
  name="newstest2013",
555
  target="en",
556
  sources={"cs", "de", "es", "fr", "ru"},
557
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
558
  path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
559
  ),
560
  SubDataset(
561
  name="newstest2014",
562
  target="en",
563
  sources={"cs", "de", "es", "fr", "hi", "ru"},
564
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
565
  path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
566
  ),
567
  SubDataset(
568
  name="newstest2015",
569
  target="en",
570
  sources={"cs", "de", "fi", "ru"},
571
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
572
  path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
573
  ),
574
  SubDataset(
575
  name="newsdiscusstest2015",
576
  target="en",
577
  sources={"fr"},
578
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
579
  path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
580
  ),
581
  SubDataset(
582
  name="newstest2016",
583
  target="en",
584
  sources={"cs", "de", "fi", "ro", "ru", "tr"},
585
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
586
  path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
587
  ),
588
  SubDataset(
589
  name="newstestB2016",
590
  target="en",
591
  sources={"fi"},
592
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
593
  path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
594
  ),
595
  SubDataset(
596
  name="newstest2017",
597
  target="en",
598
  sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
599
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
600
  path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
601
  ),
602
  SubDataset(
603
  name="newstestB2017",
604
  target="en",
605
  sources={"fi"},
606
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
607
  path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
608
  ),
609
  SubDataset(
610
  name="newstest2018",
611
  target="en",
612
  sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
613
- url="https://huggingface.co/datasets/wmt/wmt19/resolve/main/translation-task/dev.tgz",
614
  path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
615
  ),
616
  ]
@@ -658,9 +658,7 @@ class WmtConfig(datasets.BuilderConfig):
658
  # TODO(PVP): remove when manual dir works
659
  # +++++++++++++++++++++
660
  if language_pair[1] in ["cs", "hi", "ru"]:
661
- assert NotImplementedError(
662
- "The dataset for {}-en is currently not fully supported.".format(language_pair[1])
663
- )
664
  # +++++++++++++++++++++
665
 
666
 
@@ -730,7 +728,7 @@ class Wmt(ABC, datasets.GeneratorBasedBuilder):
730
  if dataset.get_manual_dl_files(source):
731
  # TODO(PVP): following two lines skip configs that are incomplete for now
732
  # +++++++++++++++++++++
733
- logger.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
734
  continue
735
  # +++++++++++++++++++++
736
 
@@ -741,9 +739,7 @@ class Wmt(ABC, datasets.GeneratorBasedBuilder):
741
  ]
742
  assert all(
743
  os.path.exists(path) for path in manual_paths
744
- ), "For {0}, you must manually download the following file(s) from {1} and place them in {2}: {3}".format(
745
- dataset.name, dataset.get_url(source), dl_manager.manual_dir, ", ".join(manual_dl_files)
746
- )
747
 
748
  # set manual path for correct subset
749
  manual_paths_dict[ss_name] = manual_paths
@@ -779,24 +775,36 @@ class Wmt(ABC, datasets.GeneratorBasedBuilder):
779
  for ex_dir, rel_path in zip(extract_dirs, rel_paths)
780
  ]
781
 
 
 
 
 
 
 
 
782
  for ss_name in split_subsets:
783
  # TODO(PVP) remove following five lines when manual data works
784
  # +++++++++++++++++++++
785
  dataset = DATASET_MAP[ss_name]
786
  source, _ = self.config.language_pair
787
  if dataset.get_manual_dl_files(source):
788
- logger.info("Skipping {} for now. Incomplete dataset for {}".format(dataset.name, self.config.name))
789
  continue
790
  # +++++++++++++++++++++
791
 
792
  logger.info("Generating examples from: %s", ss_name)
 
793
  dataset = DATASET_MAP[ss_name]
794
  extract_dirs = extraction_map[ss_name]
795
  files = _get_local_paths(dataset, extract_dirs)
 
 
 
796
 
797
  if ss_name.startswith("czeng"):
798
  if ss_name.endswith("16pre"):
799
  sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
 
800
  elif ss_name.endswith("17"):
801
  filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
802
  sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
@@ -809,18 +817,21 @@ class Wmt(ABC, datasets.GeneratorBasedBuilder):
809
  sub_generator = _parse_frde_bitext
810
  else:
811
  sub_generator = _parse_parallel_sentences
 
812
  elif len(files) == 1:
813
- fname = files[0]
814
  # Note: Due to formatting used by `download_manager`, the file
815
  # extension may not be at the end of the file path.
816
  if ".tsv" in fname:
817
  sub_generator = _parse_tsv
 
818
  elif (
819
  ss_name.startswith("newscommentary_v14")
820
  or ss_name.startswith("europarl_v9")
821
  or ss_name.startswith("wikititles_v1")
822
  ):
823
  sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
 
824
  elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
825
  sub_generator = _parse_tmx
826
  elif ss_name.startswith("wikiheadlines"):
@@ -830,28 +841,33 @@ class Wmt(ABC, datasets.GeneratorBasedBuilder):
830
  else:
831
  raise ValueError("Invalid number of files: %d" % len(files))
832
 
833
- for sub_key, ex in sub_generator(*files):
834
  if not all(ex.values()):
835
  continue
836
  # TODO(adarob): Add subset feature.
837
  # ex["subset"] = subset
838
- key = "{}/{}".format(ss_name, sub_key)
839
  if with_translation is True:
840
  ex = {"translation": ex}
841
  yield key, ex
842
 
843
 
844
- def _parse_parallel_sentences(f1, f2):
845
  """Returns examples from parallel SGML or text files, which may be gzipped."""
846
 
847
- def _parse_text(path):
848
  """Returns the sentences from a single text file, which may be gzipped."""
849
- split_path = path.split(".")
850
 
851
  if split_path[-1] == "gz":
852
  lang = split_path[-2]
853
- with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
854
- return g.read().decode("utf-8").split("\n"), lang
 
 
 
 
 
855
 
856
  if split_path[-1] == "txt":
857
  # CWMT
@@ -859,25 +875,32 @@ def _parse_parallel_sentences(f1, f2):
859
  lang = "zh" if lang in ("ch", "cn") else lang
860
  else:
861
  lang = split_path[-1]
862
- with open(path, "rb") as f:
863
- return f.read().decode("utf-8").split("\n"), lang
864
 
865
- def _parse_sgm(path):
 
 
 
 
 
 
 
866
  """Returns sentences from a single SGML file."""
867
- lang = path.split(".")[-2]
868
- sentences = []
869
  # Note: We can't use the XML parser since some of the files are badly
870
  # formatted.
871
  seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
872
- with open(path, encoding="utf-8") as f:
873
- for line in f:
874
- seg_match = re.match(seg_re, line)
875
- if seg_match:
876
- assert len(seg_match.groups()) == 1
877
- sentences.append(seg_match.groups()[0])
878
- return sentences, lang
879
 
880
- parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text
 
 
 
 
 
 
 
 
 
 
881
 
882
  # Some datasets (e.g., CWMT) contain multiple parallel files specified with
883
  # a wildcard. We sort both sets to align them and parse them one by one.
@@ -893,34 +916,19 @@ def _parse_parallel_sentences(f1, f2):
893
  )
894
 
895
  for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
896
- l1_sentences, l1 = parse_file(f1_i)
897
- l2_sentences, l2 = parse_file(f2_i)
898
-
899
- assert len(l1_sentences) == len(l2_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
900
- len(l1_sentences),
901
- len(l2_sentences),
902
- f1_i,
903
- f2_i,
904
- )
905
 
906
  for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
907
- key = "{}/{}".format(f_id, line_id)
908
  yield key, {l1: s1, l2: s2}
909
 
910
 
911
  def _parse_frde_bitext(fr_path, de_path):
912
- with open(fr_path, encoding="utf-8") as f:
913
- fr_sentences = f.read().split("\n")
914
- with open(de_path, encoding="utf-8") as f:
915
- de_sentences = f.read().split("\n")
916
- assert len(fr_sentences) == len(de_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
917
- len(fr_sentences),
918
- len(de_sentences),
919
- fr_path,
920
- de_path,
921
- )
922
- for line_id, (s1, s2) in enumerate(zip(fr_sentences, de_sentences)):
923
- yield line_id, {"fr": s1, "de": s2}
924
 
925
 
926
  def _parse_tmx(path):
@@ -946,11 +954,11 @@ def _parse_tmx(path):
946
  elem.clear()
947
 
948
 
949
- def _parse_tsv(path, language_pair=None):
950
  """Generates examples from TSV file."""
951
  if language_pair is None:
952
- lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path)
953
- assert lang_match is not None, "Invalid TSV filename: %s" % path
954
  l1, l2 = lang_match.groups()
955
  else:
956
  l1, l2 = language_pair
@@ -997,7 +1005,7 @@ def _parse_czeng(*paths, **kwargs):
997
  block_match = re.match(re_block, id_)
998
  if block_match and block_match.groups()[0] in bad_blocks:
999
  continue
1000
- sub_key = "{}/{}".format(filename, line_id)
1001
  yield sub_key, {
1002
  "cs": cs.strip(),
1003
  "en": en.strip(),
 
96
  def _inject_language(self, src, strings):
97
  """Injects languages into (potentially) template strings."""
98
  if src not in self.sources:
99
+ raise ValueError(f"Invalid source for '{self.name}': {src}")
100
 
101
  def _format_string(s):
102
  if "{0}" in s and "{1}" and "{src}" in s:
 
127
  name="commoncrawl",
128
  target="en", # fr-de pair in commoncrawl_frde
129
  sources={"cs", "de", "es", "fr", "ru"},
130
+ url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-commoncrawl.zip",
131
  path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en"),
132
  ),
133
  SubDataset(
 
184
  name="dcep_v1",
185
  target="en",
186
  sources={"lv"},
187
+ url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/dcep.lv-en.v1.zip",
188
  path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en"),
189
  ),
190
  SubDataset(
191
  name="europarl_v7",
192
  target="en",
193
  sources={"cs", "de", "es", "fr"},
194
+ url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-europarl-v7.zip",
195
  path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en"),
196
  ),
197
  SubDataset(
 
208
  name="europarl_v8_18",
209
  target="en",
210
  sources={"et", "fi"},
211
+ url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-ep-v8.zip",
212
  path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en"),
213
  ),
214
  SubDataset(
215
  name="europarl_v8_16",
216
  target="en",
217
  sources={"fi", "ro"},
218
+ url="https://huggingface.co/datasets/wmt/wmt16/resolve/main-zip/translation-task/training-parallel-ep-v8.zip",
219
  path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en"),
220
  ),
221
  SubDataset(
 
229
  name="gigafren",
230
  target="en",
231
  sources={"fr"},
232
+ url="https://huggingface.co/datasets/wmt/wmt10/resolve/main-zip/training-giga-fren.zip",
233
  path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz"),
234
  ),
235
  SubDataset(
 
244
  name="leta_v1",
245
  target="en",
246
  sources={"lv"},
247
+ url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/leta.v1.zip",
248
  path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en"),
249
  ),
250
  SubDataset(
251
  name="multiun",
252
  target="en",
253
  sources={"es", "fr"},
254
+ url="https://huggingface.co/datasets/wmt/wmt13/resolve/main-zip/training-parallel-un.zip",
255
  path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en"),
256
  ),
257
  SubDataset(
258
  name="newscommentary_v9",
259
  target="en",
260
  sources={"cs", "de", "fr", "ru"},
261
+ url="https://huggingface.co/datasets/wmt/wmt14/resolve/main-zip/training-parallel-nc-v9.zip",
262
  path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en"),
263
  ),
264
  SubDataset(
265
  name="newscommentary_v10",
266
  target="en",
267
  sources={"cs", "de", "fr", "ru"},
268
+ url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/training-parallel-nc-v10.zip",
269
  path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en"),
270
  ),
271
  SubDataset(
272
  name="newscommentary_v11",
273
  target="en",
274
  sources={"cs", "de", "ru"},
275
+ url="https://huggingface.co/datasets/wmt/wmt16/resolve/main-zip/translation-task/training-parallel-nc-v11.zip",
276
  path=(
277
  "training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}",
278
  "training-parallel-nc-v11/news-commentary-v11.{src}-en.en",
 
282
  name="newscommentary_v12",
283
  target="en",
284
  sources={"cs", "de", "ru", "zh"},
285
+ url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/training-parallel-nc-v12.zip",
286
  path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en"),
287
  ),
288
  SubDataset(
289
  name="newscommentary_v13",
290
  target="en",
291
  sources={"cs", "de", "ru", "zh"},
292
+ url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/training-parallel-nc-v13.zip",
293
  path=(
294
  "training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}",
295
  "training-parallel-nc-v13/news-commentary-v13.{src}-en.en",
 
313
  name="onlinebooks_v1",
314
  target="en",
315
  sources={"lv"},
316
+ url="https://huggingface.co/datasets/wmt/wmt17/resolve/main-zip/translation-task/books.lv-en.v1.zip",
317
  path=("farewell/farewell.lv", "farewell/farewell.en"),
318
  ),
319
  SubDataset(
320
  name="paracrawl_v1",
321
  target="en",
322
  sources={"cs", "de", "et", "fi", "ru"},
323
+ url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz", # TODO(QL): use gzip for streaming
324
  path=(
325
  "paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}",
326
  "paracrawl-release1.en-{src}.zipporah0-dedup-clean.en",
 
330
  name="paracrawl_v1_ru",
331
  target="en",
332
  sources={"ru"},
333
+ url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz", # TODO(QL): use gzip for streaming
334
  path=(
335
  "paracrawl-release1.en-ru.zipporah0-dedup-clean.ru",
336
  "paracrawl-release1.en-ru.zipporah0-dedup-clean.en",
 
357
  name="rapid_2016",
358
  target="en",
359
  sources={"de", "et", "fi"},
360
+ url="https://huggingface.co/datasets/wmt/wmt18/resolve/main-zip/translation-task/rapid2016.zip",
361
  path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en"),
362
  ),
363
  SubDataset(
 
385
  name="uncorpus_v1",
386
  target="en",
387
  sources={"ru", "zh"},
388
+ url="https://huggingface.co/datasets/wmt/uncorpus/resolve/main-zip/UNv1.0.en-{src}.zip",
389
  path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en"),
390
  ),
391
  SubDataset(
392
  name="wikiheadlines_fi",
393
  target="en",
394
  sources={"fi"},
395
+ url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip",
396
  path="wiki/fi-en/titles.fi-en",
397
  ),
398
  SubDataset(
399
  name="wikiheadlines_hi",
400
  target="en",
401
  sources={"hi"},
402
+ url="https://huggingface.co/datasets/wmt/wmt14/resolve/main-zip/wiki-titles.zip",
403
  path="wiki/hi-en/wiki-titles.hi-en",
404
  ),
405
  SubDataset(
 
407
  name="wikiheadlines_ru",
408
  target="en",
409
  sources={"ru"},
410
+ url="https://huggingface.co/datasets/wmt/wmt15/resolve/main-zip/wiki-titles.zip",
411
  path="wiki/ru-en/wiki.ru-en",
412
  ),
413
  SubDataset(
 
431
  name=ss,
432
  target="en",
433
  sources={"zh"},
434
+ url="https://huggingface.co/datasets/wmt/wmt18/resolve/main/cwmt-wmt/%s.zip" % ss,
435
  path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss),
436
  )
437
  for ss in CWMT_SUBSET_NAMES
 
442
  name="euelections_dev2019",
443
  target="de",
444
  sources={"fr"},
445
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
446
  path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de"),
447
  ),
448
  SubDataset(
449
  name="newsdev2014",
450
  target="en",
451
  sources={"hi"},
452
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
453
  path=("dev/newsdev2014.hi", "dev/newsdev2014.en"),
454
  ),
455
  SubDataset(
456
  name="newsdev2015",
457
  target="en",
458
  sources={"fi"},
459
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
460
  path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm"),
461
  ),
462
  SubDataset(
463
  name="newsdiscussdev2015",
464
  target="en",
465
  sources={"ro", "tr"},
466
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
467
  path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
468
  ),
469
  SubDataset(
470
  name="newsdev2016",
471
  target="en",
472
  sources={"ro", "tr"},
473
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
474
  path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm"),
475
  ),
476
  SubDataset(
477
  name="newsdev2017",
478
  target="en",
479
  sources={"lv", "zh"},
480
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
481
  path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm"),
482
  ),
483
  SubDataset(
484
  name="newsdev2018",
485
  target="en",
486
  sources={"et"},
487
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
488
  path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm"),
489
  ),
490
  SubDataset(
491
  name="newsdev2019",
492
  target="en",
493
  sources={"gu", "kk", "lt"},
494
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
495
  path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm"),
496
  ),
497
  SubDataset(
498
  name="newsdiscussdev2015",
499
  target="en",
500
  sources={"fr"},
501
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
502
  path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm"),
503
  ),
504
  SubDataset(
505
  name="newsdiscusstest2015",
506
  target="en",
507
  sources={"fr"},
508
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
509
  path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
510
  ),
511
  SubDataset(
512
  name="newssyscomb2009",
513
  target="en",
514
  sources={"cs", "de", "es", "fr"},
515
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
516
  path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en"),
517
  ),
518
  SubDataset(
519
  name="newstest2008",
520
  target="en",
521
  sources={"cs", "de", "es", "fr", "hu"},
522
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
523
  path=("dev/news-test2008.{src}", "dev/news-test2008.en"),
524
  ),
525
  SubDataset(
526
  name="newstest2009",
527
  target="en",
528
  sources={"cs", "de", "es", "fr"},
529
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
530
  path=("dev/newstest2009.{src}", "dev/newstest2009.en"),
531
  ),
532
  SubDataset(
533
  name="newstest2010",
534
  target="en",
535
  sources={"cs", "de", "es", "fr"},
536
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
537
  path=("dev/newstest2010.{src}", "dev/newstest2010.en"),
538
  ),
539
  SubDataset(
540
  name="newstest2011",
541
  target="en",
542
  sources={"cs", "de", "es", "fr"},
543
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
544
  path=("dev/newstest2011.{src}", "dev/newstest2011.en"),
545
  ),
546
  SubDataset(
547
  name="newstest2012",
548
  target="en",
549
  sources={"cs", "de", "es", "fr", "ru"},
550
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
551
  path=("dev/newstest2012.{src}", "dev/newstest2012.en"),
552
  ),
553
  SubDataset(
554
  name="newstest2013",
555
  target="en",
556
  sources={"cs", "de", "es", "fr", "ru"},
557
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
558
  path=("dev/newstest2013.{src}", "dev/newstest2013.en"),
559
  ),
560
  SubDataset(
561
  name="newstest2014",
562
  target="en",
563
  sources={"cs", "de", "es", "fr", "hi", "ru"},
564
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
565
  path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm"),
566
  ),
567
  SubDataset(
568
  name="newstest2015",
569
  target="en",
570
  sources={"cs", "de", "fi", "ru"},
571
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
572
  path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm"),
573
  ),
574
  SubDataset(
575
  name="newsdiscusstest2015",
576
  target="en",
577
  sources={"fr"},
578
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
579
  path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm"),
580
  ),
581
  SubDataset(
582
  name="newstest2016",
583
  target="en",
584
  sources={"cs", "de", "fi", "ro", "ru", "tr"},
585
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
586
  path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm"),
587
  ),
588
  SubDataset(
589
  name="newstestB2016",
590
  target="en",
591
  sources={"fi"},
592
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
593
  path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm"),
594
  ),
595
  SubDataset(
596
  name="newstest2017",
597
  target="en",
598
  sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"},
599
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
600
  path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm"),
601
  ),
602
  SubDataset(
603
  name="newstestB2017",
604
  target="en",
605
  sources={"fi"},
606
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
607
  path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm"),
608
  ),
609
  SubDataset(
610
  name="newstest2018",
611
  target="en",
612
  sources={"cs", "de", "et", "fi", "ru", "tr", "zh"},
613
+ url="https://huggingface.co/datasets/wmt/wmt19/resolve/main-zip/translation-task/dev.zip",
614
  path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm"),
615
  ),
616
  ]
 
658
  # TODO(PVP): remove when manual dir works
659
  # +++++++++++++++++++++
660
  if language_pair[1] in ["cs", "hi", "ru"]:
661
+ assert NotImplementedError(f"The dataset for {language_pair[1]}-en is currently not fully supported.")
 
 
662
  # +++++++++++++++++++++
663
 
664
 
 
728
  if dataset.get_manual_dl_files(source):
729
  # TODO(PVP): following two lines skip configs that are incomplete for now
730
  # +++++++++++++++++++++
731
+ logger.info("Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
732
  continue
733
  # +++++++++++++++++++++
734
 
 
739
  ]
740
  assert all(
741
  os.path.exists(path) for path in manual_paths
742
+ ), f"For {dataset.name}, you must manually download the following file(s) from {dataset.get_url(source)} and place them in {dl_manager.manual_dir}: {', '.join(manual_dl_files)}"
 
 
743
 
744
  # set manual path for correct subset
745
  manual_paths_dict[ss_name] = manual_paths
 
775
  for ex_dir, rel_path in zip(extract_dirs, rel_paths)
776
  ]
777
 
778
+ def _get_filenames(dataset):
779
+ rel_paths = dataset.get_path(source)
780
+ urls = dataset.get_url(source)
781
+ if len(urls) == 1:
782
+ urls = urls * len(rel_paths)
783
+ return [rel_path if rel_path else os.path.basename(url) for url, rel_path in zip(urls, rel_paths)]
784
+
785
  for ss_name in split_subsets:
786
  # TODO(PVP) remove following five lines when manual data works
787
  # +++++++++++++++++++++
788
  dataset = DATASET_MAP[ss_name]
789
  source, _ = self.config.language_pair
790
  if dataset.get_manual_dl_files(source):
791
+ logger.info(f"Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
792
  continue
793
  # +++++++++++++++++++++
794
 
795
  logger.info("Generating examples from: %s", ss_name)
796
+ print("Generating examples from: %s", ss_name)
797
  dataset = DATASET_MAP[ss_name]
798
  extract_dirs = extraction_map[ss_name]
799
  files = _get_local_paths(dataset, extract_dirs)
800
+ filenames = _get_filenames(dataset)
801
+
802
+ sub_generator_args = tuple(files)
803
 
804
  if ss_name.startswith("czeng"):
805
  if ss_name.endswith("16pre"):
806
  sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
807
+ sub_generator_args += tuple(filenames)
808
  elif ss_name.endswith("17"):
809
  filter_path = _get_local_paths(_CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0]
810
  sub_generator = functools.partial(_parse_czeng, filter_path=filter_path)
 
817
  sub_generator = _parse_frde_bitext
818
  else:
819
  sub_generator = _parse_parallel_sentences
820
+ sub_generator_args += tuple(filenames)
821
  elif len(files) == 1:
822
+ fname = filenames[0]
823
  # Note: Due to formatting used by `download_manager`, the file
824
  # extension may not be at the end of the file path.
825
  if ".tsv" in fname:
826
  sub_generator = _parse_tsv
827
+ sub_generator_args += tuple(filenames)
828
  elif (
829
  ss_name.startswith("newscommentary_v14")
830
  or ss_name.startswith("europarl_v9")
831
  or ss_name.startswith("wikititles_v1")
832
  ):
833
  sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
834
+ sub_generator_args += tuple(filenames)
835
  elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
836
  sub_generator = _parse_tmx
837
  elif ss_name.startswith("wikiheadlines"):
 
841
  else:
842
  raise ValueError("Invalid number of files: %d" % len(files))
843
 
844
+ for sub_key, ex in sub_generator(*sub_generator_args):
845
  if not all(ex.values()):
846
  continue
847
  # TODO(adarob): Add subset feature.
848
  # ex["subset"] = subset
849
+ key = f"{ss_name}/{sub_key}"
850
  if with_translation is True:
851
  ex = {"translation": ex}
852
  yield key, ex
853
 
854
 
855
+ def _parse_parallel_sentences(f1, f2, filename1, filename2):
856
  """Returns examples from parallel SGML or text files, which may be gzipped."""
857
 
858
+ def _parse_text(path, original_filename):
859
  """Returns the sentences from a single text file, which may be gzipped."""
860
+ split_path = original_filename.split(".")
861
 
862
  if split_path[-1] == "gz":
863
  lang = split_path[-2]
864
+
865
+ def gen():
866
+ with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
867
+ for line in g:
868
+ yield line.decode("utf-8").rstrip()
869
+
870
+ return gen(), lang
871
 
872
  if split_path[-1] == "txt":
873
  # CWMT
 
875
  lang = "zh" if lang in ("ch", "cn") else lang
876
  else:
877
  lang = split_path[-1]
 
 
878
 
879
+ def gen():
880
+ with open(path, "rb") as f:
881
+ for line in f:
882
+ yield line.decode("utf-8").rstrip()
883
+
884
+ return gen(), lang
885
+
886
+ def _parse_sgm(path, original_filename):
887
  """Returns sentences from a single SGML file."""
888
+ lang = original_filename.split(".")[-2]
 
889
  # Note: We can't use the XML parser since some of the files are badly
890
  # formatted.
891
  seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
 
 
 
 
 
 
 
892
 
893
+ def gen():
894
+ with open(path, encoding="utf-8") as f:
895
+ for line in f:
896
+ seg_match = re.match(seg_re, line)
897
+ if seg_match:
898
+ assert len(seg_match.groups()) == 1
899
+ yield seg_match.groups()[0]
900
+
901
+ return gen(), lang
902
+
903
+ parse_file = _parse_sgm if os.path.basename(f1).endswith(".sgm") else _parse_text
904
 
905
  # Some datasets (e.g., CWMT) contain multiple parallel files specified with
906
  # a wildcard. We sort both sets to align them and parse them one by one.
 
916
  )
917
 
918
  for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
919
+ l1_sentences, l1 = parse_file(f1_i, filename1)
920
+ l2_sentences, l2 = parse_file(f2_i, filename2)
 
 
 
 
 
 
 
921
 
922
  for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
923
+ key = f"{f_id}/{line_id}"
924
  yield key, {l1: s1, l2: s2}
925
 
926
 
927
  def _parse_frde_bitext(fr_path, de_path):
928
+ with open(fr_path, encoding="utf-8") as fr_f:
929
+ with open(de_path, encoding="utf-8") as de_f:
930
+ for line_id, (s1, s2) in enumerate(zip(fr_f, de_f)):
931
+ yield line_id, {"fr": s1.rstrip(), "de": s2.rstrip()}
 
 
 
 
 
 
 
 
932
 
933
 
934
  def _parse_tmx(path):
 
954
  elem.clear()
955
 
956
 
957
+ def _parse_tsv(path, filename, language_pair=None):
958
  """Generates examples from TSV file."""
959
  if language_pair is None:
960
+ lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", filename)
961
+ assert lang_match is not None, "Invalid TSV filename: %s" % filename
962
  l1, l2 = lang_match.groups()
963
  else:
964
  l1, l2 = language_pair
 
1005
  block_match = re.match(re_block, id_)
1006
  if block_match and block_match.groups()[0] in bad_blocks:
1007
  continue
1008
+ sub_key = f"{filename}/{line_id}"
1009
  yield sub_key, {
1010
  "cs": cs.strip(),
1011
  "en": en.strip(),