Datasets:

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
Dask
License:
albertvillanova HF staff commited on
Commit
915521e
1 Parent(s): a5b0569

Delete loading script

Browse files
Files changed (1) hide show
  1. opus_paracrawl.py +0 -155
opus_paracrawl.py DELETED
@@ -1,155 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- import os
18
-
19
- import datasets
20
-
21
-
22
- _DESCRIPTION = """\
23
- Parallel corpora from Web Crawls collected in the ParaCrawl project.
24
-
25
- 42 languages, 43 bitexts
26
- total number of files: 59,996
27
- total number of tokens: 56.11G
28
- total number of sentence fragments: 3.13G
29
- """
30
- _HOMEPAGE = "http://opus.nlpl.eu/ParaCrawl.php"
31
- _CITATION = r"""\
32
- @inproceedings{banon-etal-2020-paracrawl,
33
- title = "{P}ara{C}rawl: Web-Scale Acquisition of Parallel Corpora",
34
- author = "Ba{\~n}{\'o}n, Marta and
35
- Chen, Pinzhen and
36
- Haddow, Barry and
37
- Heafield, Kenneth and
38
- Hoang, Hieu and
39
- Espl{\`a}-Gomis, Miquel and
40
- Forcada, Mikel L. and
41
- Kamran, Amir and
42
- Kirefu, Faheem and
43
- Koehn, Philipp and
44
- Ortiz Rojas, Sergio and
45
- Pla Sempere, Leopoldo and
46
- Ram{\'\i}rez-S{\'a}nchez, Gema and
47
- Sarr{\'\i}as, Elsa and
48
- Strelec, Marek and
49
- Thompson, Brian and
50
- Waites, William and
51
- Wiggins, Dion and
52
- Zaragoza, Jaume",
53
- booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
54
- month = jul,
55
- year = "2020",
56
- address = "Online",
57
- publisher = "Association for Computational Linguistics",
58
- url = "https://aclanthology.org/2020.acl-main.417",
59
- doi = "10.18653/v1/2020.acl-main.417",
60
- pages = "4555--4567",
61
- }
62
- @InProceedings{TIEDEMANN12.463,
63
- author = {Jörg Tiedemann},
64
- title = {Parallel Data, Tools and Interfaces in OPUS},
65
- booktitle = {Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)},
66
- year = {2012},
67
- month = {may},
68
- date = {23-25},
69
- address = {Istanbul, Turkey},
70
- editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Mehmet Uğur Doğan and Bente Maegaard and Joseph Mariani and Asuncion Moreno and Jan Odijk and Stelios Piperidis},
71
- publisher = {European Language Resources Association (ELRA)},
72
- isbn = {978-2-9517408-7-7},
73
- language = {english}
74
- }
75
- """
76
-
77
- _VERSION = "9.0.0"
78
- _BASE_NAME = "ParaCrawl.{}.{}"
79
- _BASE_URL = "https://object.pouta.csc.fi/OPUS-ParaCrawl/v9/moses/{}-{}.txt.zip"
80
- # Please note that only few pairs are shown here. You can use config to generate data for all language pairs
81
- _LANGUAGE_PAIRS = [
82
- ("el", "en"),
83
- ("en", "km"),
84
- ("en", "so"),
85
- ("de", "pl"),
86
- ("fr", "nl"),
87
- ("en", "sw"),
88
- ("en", "tl"),
89
- ("es", "gl"),
90
- ]
91
-
92
-
93
- class ParaCrawlConfig(datasets.BuilderConfig):
94
- def __init__(self, *args, lang1=None, lang2=None, **kwargs):
95
- super().__init__(
96
- *args,
97
- name=f"{lang1}-{lang2}",
98
- **kwargs,
99
- )
100
- assert lang1 != lang2, "'language 1' & 'language 2' should be different from each other"
101
- self.lang1 = lang1
102
- self.lang2 = lang2
103
-
104
-
105
- class OpusParaCrawl(datasets.GeneratorBasedBuilder):
106
- BUILDER_CONFIGS = [
107
- ParaCrawlConfig(
108
- lang1=lang1,
109
- lang2=lang2,
110
- description=f"Translating {lang1} to {lang2} or vice versa",
111
- version=datasets.Version(_VERSION),
112
- )
113
- for lang1, lang2 in _LANGUAGE_PAIRS
114
- ]
115
- BUILDER_CONFIG_CLASS = ParaCrawlConfig
116
-
117
- def _info(self):
118
- return datasets.DatasetInfo(
119
- description=_DESCRIPTION,
120
- features=datasets.Features(
121
- {
122
- "id": datasets.Value("string"),
123
- "translation": datasets.Translation(languages=(self.config.lang1, self.config.lang2)),
124
- },
125
- ),
126
- supervised_keys=None,
127
- homepage=_HOMEPAGE,
128
- citation=_CITATION,
129
- )
130
-
131
- def _split_generators(self, dl_manager):
132
- download_url = _BASE_URL.format(self.config.lang1, self.config.lang2)
133
- path = dl_manager.download_and_extract(download_url)
134
- return [
135
- datasets.SplitGenerator(
136
- name=datasets.Split.TRAIN,
137
- gen_kwargs={"datapath": path},
138
- )
139
- ]
140
-
141
- def _generate_examples(self, datapath):
142
- lang1, lang2 = self.config.lang1, self.config.lang2
143
- folder = lang1 + "-" + lang2
144
- lang1_filename = _BASE_NAME.format(folder, lang1)
145
- lang2_filename = _BASE_NAME.format(folder, lang2)
146
- lang1_path = os.path.join(datapath, lang1_filename)
147
- lang2_path = os.path.join(datapath, lang2_filename)
148
- with open(lang1_path, encoding="utf-8") as f1, open(lang2_path, encoding="utf-8") as f2:
149
- for id_, (lang1_sentence, lang2_sentence) in enumerate(zip(f1, f2)):
150
- lang1_sentence = lang1_sentence.strip()
151
- lang2_sentence = lang2_sentence.strip()
152
- yield id_, {
153
- "id": str(id_),
154
- "translation": {lang1: lang1_sentence, lang2: lang2_sentence},
155
- }