Datasets:
Commit
•
d7c015d
1
Parent(s):
1c93867
Delete loading script
Browse files- un_multi.py +0 -111
un_multi.py
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""MultiUN: Multilingual UN Parallel Text 2000—2009"""
|
16 |
-
|
17 |
-
|
18 |
-
import itertools
|
19 |
-
import os
|
20 |
-
|
21 |
-
import datasets
|
22 |
-
|
23 |
-
|
24 |
-
_CITATION = """\
|
25 |
-
@inproceedings{eisele-chen-2010-multiun,
|
26 |
-
title = "{M}ulti{UN}: A Multilingual Corpus from United Nation Documents",
|
27 |
-
author = "Eisele, Andreas and
|
28 |
-
Chen, Yu",
|
29 |
-
booktitle = "Proceedings of the Seventh International Conference on Language Resources and Evaluation ({LREC}'10)",
|
30 |
-
month = may,
|
31 |
-
year = "2010",
|
32 |
-
address = "Valletta, Malta",
|
33 |
-
publisher = "European Language Resources Association (ELRA)",
|
34 |
-
url = "http://www.lrec-conf.org/proceedings/lrec2010/pdf/686_Paper.pdf",
|
35 |
-
abstract = "This paper describes the acquisition, preparation and properties of a corpus extracted from the official documents of the United Nations (UN). This corpus is available in all 6 official languages of the UN, consisting of around 300 million words per language. We describe the methods we used for crawling, document formatting, and sentence alignment. This corpus also includes a common test set for machine translation. We present the results of a French-Chinese machine translation experiment performed on this corpus.",
|
36 |
-
}
|
37 |
-
|
38 |
-
@InProceedings{TIEDEMANN12.463,
|
39 |
-
author = {J�rg Tiedemann},
|
40 |
-
title = {Parallel Data, Tools and Interfaces in OPUS},
|
41 |
-
booktitle = {Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)},
|
42 |
-
year = {2012},
|
43 |
-
month = {may},
|
44 |
-
date = {23-25},
|
45 |
-
address = {Istanbul, Turkey},
|
46 |
-
editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Mehmet Ugur Dogan and Bente Maegaard and Joseph Mariani and Jan Odijk and Stelios Piperidis},
|
47 |
-
publisher = {European Language Resources Association (ELRA)},
|
48 |
-
isbn = {978-2-9517408-7-7},
|
49 |
-
}
|
50 |
-
"""
|
51 |
-
|
52 |
-
|
53 |
-
_DESCRIPTION = """\
|
54 |
-
This is a collection of translated documents from the United Nations. \
|
55 |
-
This corpus is available in all 6 official languages of the UN, \
|
56 |
-
consisting of around 300 million words per language
|
57 |
-
"""
|
58 |
-
|
59 |
-
|
60 |
-
# Original:
|
61 |
-
# _HOMEPAGE = "http://www.euromatrixplus.net/multi-un/"
|
62 |
-
_HOMEPAGE = "https://opus.nlpl.eu/MultiUN/corpus/version/MultiUN"
|
63 |
-
|
64 |
-
_LANGUAGES = ["ar", "de", "en", "es", "fr", "ru", "zh"]
|
65 |
-
_LANGUAGE_PAIRS = list(itertools.combinations(_LANGUAGES, 2))
|
66 |
-
|
67 |
-
_BASE_URL = "https://object.pouta.csc.fi/OPUS-MultiUN/v1/moses"
|
68 |
-
_URLS = {f"{l1}-{l2}": f"{_BASE_URL}/{l1}-{l2}.txt.zip" for l1, l2 in _LANGUAGE_PAIRS}
|
69 |
-
|
70 |
-
|
71 |
-
class UnMulti(datasets.GeneratorBasedBuilder):
|
72 |
-
"""MultiUN: Multilingual UN Parallel Text 2000—2009"""
|
73 |
-
|
74 |
-
VERSION = datasets.Version("1.0.0")
|
75 |
-
|
76 |
-
BUILDER_CONFIGS = [
|
77 |
-
datasets.BuilderConfig(name=f"{l1}-{l2}", version=datasets.Version("1.0.0"), description=f"MultiUN {l1}-{l2}")
|
78 |
-
for l1, l2 in _LANGUAGE_PAIRS
|
79 |
-
]
|
80 |
-
|
81 |
-
def _info(self):
|
82 |
-
return datasets.DatasetInfo(
|
83 |
-
description=_DESCRIPTION,
|
84 |
-
features=datasets.Features(
|
85 |
-
{"translation": datasets.features.Translation(languages=tuple(self.config.name.split("-")))}
|
86 |
-
),
|
87 |
-
supervised_keys=None,
|
88 |
-
homepage=_HOMEPAGE,
|
89 |
-
citation=_CITATION,
|
90 |
-
)
|
91 |
-
|
92 |
-
def _split_generators(self, dl_manager):
|
93 |
-
"""Returns SplitGenerators."""
|
94 |
-
lang_pair = self.config.name.split("-")
|
95 |
-
data_dir = dl_manager.download_and_extract(_URLS[self.config.name])
|
96 |
-
return [
|
97 |
-
datasets.SplitGenerator(
|
98 |
-
name=datasets.Split.TRAIN,
|
99 |
-
gen_kwargs={
|
100 |
-
"source_file": os.path.join(data_dir, f"MultiUN.{self.config.name}.{lang_pair[0]}"),
|
101 |
-
"target_file": os.path.join(data_dir, f"MultiUN.{self.config.name}.{lang_pair[1]}"),
|
102 |
-
},
|
103 |
-
),
|
104 |
-
]
|
105 |
-
|
106 |
-
def _generate_examples(self, source_file, target_file):
|
107 |
-
source, target = tuple(self.config.name.split("-"))
|
108 |
-
with open(source_file, encoding="utf-8") as src_f, open(target_file, encoding="utf-8") as tgt_f:
|
109 |
-
for idx, (l1, l2) in enumerate(zip(src_f, tgt_f)):
|
110 |
-
result = {"translation": {source: l1.strip(), target: l2.strip()}}
|
111 |
-
yield idx, result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|