Datasets:
wmt
/

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
d94225b
1 Parent(s): 798d104

Delete loading script

Browse files
Files changed (1) hide show
  1. wmt19.py +0 -80
wmt19.py DELETED
@@ -1,80 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """WMT19: Translate dataset."""
18
-
19
- import datasets
20
-
21
- from .wmt_utils import CWMT_SUBSET_NAMES, Wmt, WmtConfig
22
-
23
-
24
- _URL = "http://www.statmt.org/wmt19/translation-task.html"
25
- # TODO(adarob): Update with citation of overview paper once it is published.
26
- _CITATION = """
27
- @ONLINE {wmt19translate,
28
- author = {Wikimedia Foundation},
29
- title = {ACL 2019 Fourth Conference on Machine Translation (WMT19), Shared Task: Machine Translation of News},
30
- url = {http://www.statmt.org/wmt19/translation-task.html}
31
- }
32
- """
33
-
34
- _LANGUAGE_PAIRS = [(lang, "en") for lang in ["cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"]] + [("fr", "de")]
35
-
36
-
37
- class Wmt19(Wmt):
38
- """WMT 19 translation datasets for {(xx, "en")} + ("fr", "de") pairs."""
39
-
40
- # Version history:
41
- # 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
42
- BUILDER_CONFIGS = [
43
- WmtConfig( # pylint:disable=g-complex-comprehension
44
- description="WMT 2019 %s-%s translation task dataset." % (l1, l2),
45
- url=_URL,
46
- citation=_CITATION,
47
- language_pair=(l1, l2),
48
- version=datasets.Version("1.0.0"),
49
- )
50
- for l1, l2 in _LANGUAGE_PAIRS
51
- ]
52
-
53
- @property
54
- def manual_download_instructions(self):
55
- if self.config.language_pair[1] in ["cs", "hi", "ru"]:
56
- return "Please download the data manually as explained. TODO(PVP)"
57
-
58
- @property
59
- def _subsets(self):
60
- return {
61
- datasets.Split.TRAIN: [
62
- "europarl_v9",
63
- "europarl_v7_frde",
64
- "paracrawl_v3",
65
- "paracrawl_v1_ru",
66
- "paracrawl_v3_frde",
67
- "commoncrawl",
68
- "commoncrawl_frde",
69
- "newscommentary_v14",
70
- "newscommentary_v14_frde",
71
- "czeng_17",
72
- "yandexcorpus",
73
- "wikititles_v1",
74
- "uncorpus_v1",
75
- "rapid_2016_ltfi",
76
- "rapid_2019",
77
- ]
78
- + CWMT_SUBSET_NAMES,
79
- datasets.Split.VALIDATION: ["euelections_dev2019", "newsdev2019", "newstest2018"],
80
- }