ecoue commited on
Commit
5dc576d
1 Parent(s): 8f6daea

first iteration of the nordmann2023 dataset on huggingface

Browse files
Files changed (4) hide show
  1. README.md +52 -0
  2. build_tokenizer.py +79 -0
  3. nordmann2023.py +377 -0
  4. utils.py +341 -0
README.md ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators: []
3
+ language:
4
+ - de
5
+ - en
6
+ language_creators: []
7
+ license:
8
+ - unknown
9
+ multilinguality:
10
+ - translation
11
+ pretty_name: nordmann2023
12
+ size_categories:
13
+ - 1M<n<10M
14
+ source_datasets: []
15
+ tags:
16
+ - europarl
17
+ - newscommentary
18
+ - wikititles
19
+ - ecb
20
+ - rapid
21
+ - eesc
22
+ - ema
23
+ - europat
24
+ - books
25
+ - ted2020
26
+ - qed
27
+ - eubookshop
28
+ task_categories:
29
+ - translation
30
+ task_ids: []
31
+ dataset_info:
32
+ features:
33
+ - name: translation
34
+ dtype:
35
+ translation:
36
+ languages:
37
+ - de
38
+ - en
39
+ config_name: balanced
40
+ splits:
41
+ - name: train
42
+ num_bytes: 1539472445
43
+ num_examples: 5656659
44
+ - name: validation
45
+ num_bytes: 706611
46
+ num_examples: 2754
47
+ - name: test
48
+ num_bytes: 411077
49
+ num_examples: 1831
50
+ download_size: 4076594396
51
+ dataset_size: 1540590133
52
+ ---
build_tokenizer.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tokenizers.decoders import WordPiece as WordPieceDecoder
2
+ from tokenizers.pre_tokenizers import BertPreTokenizer
3
+ from tokenizers.normalizers import BertNormalizer
4
+ from tokenizers.trainers import WordPieceTrainer
5
+ from tokenizers.models import WordPiece as WordPieceModel
6
+ from tokenizers import Tokenizer
7
+ import itertools
8
+
9
+ from datasets import load_dataset
10
+ from datasets.utils.logging import set_verbosity_error
11
+ set_verbosity_error()
12
+
13
+ from utils import SampleBatch
14
+
15
+ def unpack_samples(
16
+ batch: SampleBatch
17
+ ):
18
+ iterator = (
19
+ sample.values()
20
+ for sample in batch['translation']
21
+ )
22
+
23
+ return list(
24
+ itertools.chain
25
+ .from_iterable(iterator)
26
+ )
27
+
28
+
29
+ def build_tokenizer(
30
+ clean_text: bool = True,
31
+ strip_accents: bool = True,
32
+ lowercase: bool = True
33
+ ) -> Tokenizer:
34
+ tokenizer = Tokenizer(
35
+ model=WordPieceModel(
36
+ unk_token='<UNK>'
37
+ )
38
+ )
39
+ tokenizer.normalizer = BertNormalizer(
40
+ clean_text=clean_text,
41
+ handle_chinese_chars=True,
42
+ strip_accents=strip_accents,
43
+ lowercase=lowercase
44
+ )
45
+ tokenizer.pre_tokenizer = BertPreTokenizer()
46
+ tokenizer.decoder = WordPieceDecoder()
47
+
48
+ return tokenizer
49
+
50
+
51
+ train_dset = load_dataset(
52
+ path='nordmann2023',
53
+ name='balanced',
54
+ split='train'
55
+ )
56
+
57
+ tokenizer = build_tokenizer(
58
+ clean_text=True,
59
+ strip_accents=False,
60
+ lowercase=False
61
+ )
62
+ tokenizer.train_from_iterator(
63
+ iterator=(
64
+ unpack_samples(batch)
65
+ for batch in train_dset.iter(
66
+ batch_size=10000
67
+ )
68
+ ),
69
+ trainer=WordPieceTrainer(
70
+ vocab_size=40000,
71
+ special_tokens=[
72
+ '<UNK>', '<CLS>', '<SEP>', '<PAD>', '<MASK>'
73
+ ]
74
+ ),
75
+ length=train_dset.num_rows * 2
76
+ )
77
+ tokenizer.save(
78
+ path='tokenizer.json'
79
+ )
nordmann2023.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Callable, List, Dict, Any, Tuple, Generator
2
+ from dataclasses import dataclass
3
+ import itertools
4
+ import os
5
+ import datasets
6
+ from .utils import Sample, list_keyby, parse_tmx, parse_sgm, parse_tsv, cleanup, normalize, dict_map, dict_filter_keys, dict_flatten
7
+
8
+ logger = datasets.logging.get_logger(
9
+ name=__name__
10
+ )
11
+
12
+ @dataclass(frozen=True)
13
+ class Candidate:
14
+ name: str
15
+ url: str
16
+ paths: Tuple[str, ...]
17
+ num_examples: int
18
+ parser: Callable[
19
+ [Tuple[str, ...]], Generator[Sample, None, None]
20
+ ]
21
+
22
+ def download_paths(
23
+ self,
24
+ base_path: str
25
+ ):
26
+ return tuple(
27
+ os.path.join(base_path, path)
28
+ for path in self.paths
29
+ )
30
+
31
+
32
+ @dataclass(frozen=True)
33
+ class Constraint:
34
+ start: Optional[int] = None
35
+ stop: Optional[int] = None
36
+ step: Optional[int] = None
37
+
38
+
39
+ _CANDIDATES = [
40
+ Candidate(
41
+ name='europarl_v10',
42
+ url='https://statmt.org/europarl/v10/training/europarl-v10.de-en.tsv.gz',
43
+ paths=('.',),
44
+ num_examples=1828521,
45
+ parser=lambda filepaths: parse_tsv(
46
+ filepaths=filepaths,
47
+ columns={
48
+ 'de': 0, 'en': 1
49
+ }
50
+ )
51
+ ),
52
+ Candidate(
53
+ name='newscommentary_v17',
54
+ url='https://www.statmt.org/news-commentary/v17/training/news-commentary-v17.de-en.tsv.gz',
55
+ paths=('.',),
56
+ num_examples=418621,
57
+ parser=lambda filepaths: parse_tsv(
58
+ filepaths=filepaths,
59
+ columns={
60
+ 'de': 0, 'en': 1
61
+ }
62
+ )
63
+ ),
64
+ Candidate(
65
+ name='wikititles_v3',
66
+ url='https://object.pouta.csc.fi/OPUS-WikiTitles/v3/tmx/de-en.tmx.gz',
67
+ paths=('.',),
68
+ num_examples=1386770,
69
+ parser=lambda filepaths: parse_tmx(
70
+ filepaths=filepaths,
71
+ attributes={
72
+ 'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
73
+ }
74
+ )
75
+ ),
76
+ Candidate(
77
+ name='ecb_2017',
78
+ url='https://s3-eu-west-1.amazonaws.com/tilde-model/ecb2017.de-en.tmx.zip',
79
+ paths=('ecb2017.UNIQUE.de-en.tmx',),
80
+ num_examples=4147,
81
+ parser=lambda filepaths: parse_tmx(
82
+ filepaths=filepaths,
83
+ attributes={
84
+ 'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
85
+ }
86
+ )
87
+ ),
88
+ Candidate(
89
+ name='rapid_2019',
90
+ url='https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.tmx.zip',
91
+ paths=('RAPID_2019.UNIQUE.de-en.tmx',),
92
+ num_examples=939808,
93
+ parser=lambda filepaths: parse_tmx(
94
+ filepaths=filepaths,
95
+ attributes={
96
+ 'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
97
+ }
98
+ )
99
+ ),
100
+ Candidate(
101
+ name='eesc_2017',
102
+ url='https://s3-eu-west-1.amazonaws.com/tilde-model/EESC2017.de-en.tmx.zip',
103
+ paths=('EESC.de-en.tmx',),
104
+ num_examples=2857850,
105
+ parser=lambda filepaths: parse_tmx(
106
+ filepaths=filepaths,
107
+ attributes={
108
+ 'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
109
+ }
110
+ )
111
+ ),
112
+ Candidate(
113
+ name='ema_2016',
114
+ url='https://s3-eu-west-1.amazonaws.com/tilde-model/EMA2016.de-en.tmx.zip',
115
+ paths=('EMEA2016.de-en.tmx',),
116
+ num_examples=347631,
117
+ parser=lambda filepaths: parse_tmx(
118
+ filepaths=filepaths,
119
+ attributes={
120
+ 'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
121
+ }
122
+ )
123
+ ),
124
+ Candidate(
125
+ name='europat_v3',
126
+ url='https://web-language-models.s3.amazonaws.com/europat/release3/de-en.txt.gz',
127
+ paths=('.',),
128
+ num_examples=19734742,
129
+ parser=lambda filepaths: parse_tsv(
130
+ filepaths=filepaths,
131
+ columns={
132
+ 'de': 0, 'en': 1
133
+ }
134
+ )
135
+ ),
136
+ Candidate(
137
+ name='books_v1',
138
+ url='https://object.pouta.csc.fi/OPUS-Books/v1/tmx/de-en.tmx.gz',
139
+ paths=('.',),
140
+ num_examples=51106,
141
+ parser=lambda filepaths: parse_tmx(
142
+ filepaths=filepaths,
143
+ attributes={
144
+ 'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
145
+ }
146
+ )
147
+ ),
148
+ Candidate(
149
+ name='ted2020_v1',
150
+ url='https://object.pouta.csc.fi/OPUS-TED2020/v1/tmx/de-en.tmx.gz',
151
+ paths=('.',),
152
+ num_examples=289374,
153
+ parser=lambda filepaths: parse_tmx(
154
+ filepaths=filepaths,
155
+ attributes={
156
+ 'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
157
+ }
158
+ )
159
+ ),
160
+ Candidate(
161
+ name='qed_v2',
162
+ url='https://object.pouta.csc.fi/OPUS-QED/v2.0a/tmx/de-en.tmx.gz',
163
+ paths=('.',),
164
+ num_examples=492811,
165
+ parser=lambda filepaths: parse_tmx(
166
+ filepaths=filepaths,
167
+ attributes={
168
+ 'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
169
+ }
170
+ )
171
+ ),
172
+ Candidate(
173
+ name='eubookshop_v2',
174
+ url='https://object.pouta.csc.fi/OPUS-EUbookshop/v2/tmx/de-en.tmx.gz',
175
+ paths=('.',),
176
+ num_examples=8312724,
177
+ parser=lambda filepaths: parse_tmx(
178
+ filepaths=filepaths,
179
+ attributes={
180
+ 'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
181
+ }
182
+ )
183
+ ),
184
+ Candidate(
185
+ name='newstest2018',
186
+ url='https://data.statmt.org/wmt22/translation-task/dev.tgz',
187
+ paths=('dev/sgm/newstest2018-deen-src.de.sgm',
188
+ 'dev/sgm/newstest2018-deen-ref.en.sgm'),
189
+ num_examples=2998,
190
+ parser=lambda filepaths: parse_sgm(
191
+ filepaths=filepaths,
192
+ files={
193
+ 'de': 0, 'en': 1
194
+ }
195
+ )
196
+ ),
197
+ Candidate(
198
+ name='newstest2019',
199
+ url='https://data.statmt.org/wmt22/translation-task/dev.tgz',
200
+ paths=('dev/sgm/newstest2019-deen-src.de.sgm',
201
+ 'dev/sgm/newstest2019-deen-ref.en.sgm'),
202
+ num_examples=2000,
203
+ parser=lambda filepaths: parse_sgm(
204
+ filepaths=filepaths,
205
+ files={
206
+ 'de': 0, 'en': 1
207
+ }
208
+ )
209
+ )
210
+ ]
211
+
212
+ _CANDIDATES_BY_NAME = list_keyby(
213
+ input=_CANDIDATES,
214
+ key_fn=lambda candidate: candidate.name
215
+ )
216
+
217
+
218
+ class NordmannConfig(
219
+ datasets.BuilderConfig
220
+ ):
221
+ def __init__(
222
+ self,
223
+ splits: Dict[datasets.NamedSplit, List[str]],
224
+ constraints: Dict[str, Constraint],
225
+ normalizer: Callable[[Sample], Sample],
226
+ filter: Callable[[Sample], bool],
227
+ **kwargs: Any
228
+ ):
229
+ assert splits
230
+
231
+ datasets.BuilderConfig.__init__(
232
+ self, **kwargs
233
+ )
234
+
235
+ self.splits = dict_map(
236
+ input=splits, map_fn=lambda key, value: (
237
+ key, dict_filter_keys(
238
+ input=_CANDIDATES_BY_NAME, keys=value
239
+ )
240
+ )
241
+ )
242
+ self.constraints = constraints
243
+ self.normalizer = normalizer
244
+ self.filter = filter
245
+
246
+
247
+ class Nordmann(
248
+ datasets.GeneratorBasedBuilder
249
+ ):
250
+ BUILDER_CONFIG_CLASS = NordmannConfig
251
+ BUILDER_CONFIGS = [
252
+ NordmannConfig(
253
+ name='balanced',
254
+ description='NORDMANN 2023 (balanced) translation task dataset.',
255
+ version=datasets.Version(
256
+ version_str='0.0.1'
257
+ ),
258
+ splits={
259
+ datasets.Split.TRAIN: [
260
+ 'europarl_v10',
261
+ 'newscommentary_v17',
262
+ 'wikititles_v3',
263
+ 'europat_v3',
264
+ 'books_v1',
265
+ 'ted2020_v1',
266
+ 'qed_v2',
267
+ 'eubookshop_v2'
268
+ ],
269
+ datasets.Split.VALIDATION: [
270
+ 'newstest2018'
271
+ ],
272
+ datasets.Split.TEST: [
273
+ 'newstest2019'
274
+ ]
275
+ },
276
+ constraints={
277
+ 'europat_v3': Constraint(stop=1000000),
278
+ 'eubookshop_v2': Constraint(stop=2000000)
279
+ },
280
+ normalizer=normalize(
281
+ strip_whitespaces=True,
282
+ clean_control_characters=True,
283
+ enforce_unicode_form='NFC'
284
+ ),
285
+ filter=cleanup(
286
+ length_min=4,
287
+ length_max=4096,
288
+ length_ratio_max=1.33,
289
+ alpha_ratio_min=.5
290
+ )
291
+ )
292
+ ]
293
+
294
+ def _info(
295
+ self
296
+ ):
297
+ features = {
298
+ 'translation': datasets.features.Translation(
299
+ languages=['de', 'en']
300
+ )
301
+ }
302
+
303
+ return datasets.DatasetInfo(
304
+ description='Translation dataset based on statmt.org',
305
+ features=datasets.Features(features)
306
+ )
307
+
308
+ def _split_generators(
309
+ self,
310
+ dl_manager: datasets.DownloadManager
311
+ ):
312
+ self.config: NordmannConfig
313
+
314
+ urls = dict_map(
315
+ input=dict_flatten(
316
+ input=self.config.splits
317
+ ),
318
+ map_fn=lambda key, value: (
319
+ key, value.url
320
+ )
321
+ )
322
+
323
+ base_paths: Dict[str, str]
324
+ base_paths = dl_manager.download_and_extract(
325
+ url_or_urls=urls
326
+ )
327
+
328
+ generators: List[datasets.SplitGenerator]
329
+ generators = list()
330
+ for split, split_candidates in self.config.splits.items():
331
+ generators.append(
332
+ datasets.SplitGenerator(
333
+ name=str(split),
334
+ gen_kwargs={
335
+ 'candidates': split_candidates,
336
+ 'base_paths': base_paths
337
+ }
338
+ )
339
+ )
340
+
341
+ return generators
342
+
343
+ def _generate_examples(
344
+ self,
345
+ candidates: Dict[str, Candidate],
346
+ base_paths: Dict[str, str]
347
+ ):
348
+ self.config: NordmannConfig
349
+
350
+ for name, candidate in candidates.items():
351
+ constraint = (
352
+ self.config.constraints[name]
353
+ if name in self.config.constraints else Constraint()
354
+ )
355
+
356
+ samples = candidate.parser(
357
+ candidate.download_paths(
358
+ base_path=base_paths[name]
359
+ )
360
+ )
361
+
362
+ for sample_num, sample in enumerate(
363
+ itertools.islice(
364
+ samples,
365
+ constraint.start,
366
+ constraint.stop,
367
+ constraint.step
368
+ )
369
+ ):
370
+ normalized_sample = self.config.normalizer(sample)
371
+
372
+ if not self.config.filter(normalized_sample):
373
+ continue
374
+
375
+ yield candidate.name + '_' + str(sample_num), normalized_sample
376
+
377
+ samples.close()
utils.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, TypeVar, Callable, List, Hashable, Literal, Union, Optional, Tuple, Collection, Iterable
2
+ from xml.etree import ElementTree
3
+ import unicodedata
4
+ from pathlib import Path
5
+ import re
6
+
7
+ Paths = Tuple[str, ...]
8
+ Language = Literal['de', 'en']
9
+ Translation = Dict[
10
+ Language, str
11
+ ]
12
+ Sample = Dict[
13
+ Literal['translation'], Translation
14
+ ]
15
+ SampleBatch = Dict[
16
+ Literal['translation'], List[Translation]
17
+ ]
18
+
19
+ _H1 = TypeVar('_H1', bound=Hashable)
20
+ _H2 = TypeVar('_H2', bound=Hashable)
21
+ _T1 = TypeVar('_T1')
22
+ _T2 = TypeVar('_T2')
23
+
24
+
25
+ def dict_filter_keys(
26
+ input: Dict[_H1, _T1],
27
+ keys: List[_H1]
28
+ ) -> Dict[_H1, _T1]:
29
+ return dict(
30
+ (key, input[key])
31
+ for key in keys
32
+ )
33
+
34
+
35
+ def dict_flatten(
36
+ input: Dict[_H1, Dict[_H2, _T2]]
37
+ ) -> Dict[_H2, _T2]:
38
+ return dict(
39
+ items for values in input.values()
40
+ for items in values.items()
41
+ )
42
+
43
+
44
+ def dict_map(
45
+ input: Dict[_H1, _T1],
46
+ map_fn: Callable[[_H1, _T1], Tuple[_H2, _T2]]
47
+ ) -> Dict[_H2, _T2]:
48
+ return dict(
49
+ map_fn(key, value)
50
+ for key, value in input.items()
51
+ )
52
+
53
+
54
+ def list_keyby(
55
+ input: List[_T1],
56
+ key_fn: Callable[[_T1], _H1]
57
+ ) -> Dict[_H1, _T1]:
58
+ return dict(
59
+ (key_fn(value), value)
60
+ for value in input
61
+ )
62
+
63
+
64
+ def expand_path(
65
+ path: Path
66
+ ):
67
+ if path.is_file():
68
+ return [path]
69
+
70
+ return list(
71
+ path.iterdir()
72
+ )
73
+
74
+
75
+ def lenif(
76
+ input: Collection[_T1],
77
+ predicate_fn: Callable[[_T1], bool]
78
+ ):
79
+ return sum(
80
+ predicate_fn(value)
81
+ for value in input
82
+ )
83
+
84
+
85
+ def len_alpha(
86
+ string: str
87
+ ):
88
+ return lenif(
89
+ input=string,
90
+ predicate_fn=lambda character: character.isalpha()
91
+ )
92
+
93
+
94
+ unicode_control_characters = (
95
+ r'\x00-\x1F\x7F-\x9F\xAD\u0600-\u0605\u061C\u06DD\u070F\u0890-\u0891'
96
+ r'\u08E2\u180E\u200B-\u200F\u202A-\u202E\u2060-\u2064\u2066-\u206F\uFEFF\uFFF9-\uFFFB'
97
+ r'\U000110BD\U000110CD\U00013430-\U0001343F\U0001BCA0-\U0001BCA3\U0001D173-\U0001D17A'
98
+ r'\U000E0001\U000E0020-\U000E007F\uE000\uF8FF\U000F0000\U000FFFFD\U00100000\U0010FFFD'
99
+ )
100
+
101
+
102
+ def normalize(
103
+ strip_whitespaces: bool,
104
+ clean_control_characters: bool,
105
+ enforce_unicode_form: Optional[
106
+ Literal['NFC', 'NFKC', 'NFD', 'NFKD']
107
+ ] = None
108
+ ):
109
+ regex_pattern = re.compile(
110
+ pattern='[' + unicode_control_characters + ']+'
111
+ )
112
+
113
+ def normalize_fn(
114
+ sample: Sample
115
+ ):
116
+ translation = sample['translation']
117
+
118
+ if strip_whitespaces:
119
+ translation = dict_map(
120
+ input=translation,
121
+ map_fn=lambda key, value: (
122
+ key, value.strip()
123
+ )
124
+ )
125
+
126
+ if clean_control_characters:
127
+ translation = dict_map(
128
+ input=translation, map_fn=lambda key, value: (
129
+ key, regex_pattern.sub(
130
+ repl='', string=value
131
+ )
132
+ )
133
+ )
134
+
135
+ if enforce_unicode_form is not None:
136
+ translation = dict_map(
137
+ input=translation, map_fn=lambda key, value: (
138
+ key, unicodedata.normalize(
139
+ enforce_unicode_form, value
140
+ )
141
+ )
142
+ )
143
+
144
+ sample['translation'] = translation
145
+ return sample
146
+
147
+ return normalize_fn
148
+
149
+
150
+ def cleanup(
151
+ length_min: int,
152
+ length_max: int,
153
+ length_ratio_max: Union[int, float],
154
+ alpha_ratio_min: Union[int, float]
155
+ ):
156
+ def cleanup_fn(
157
+ sample: Sample
158
+ ):
159
+ translation = sample['translation']
160
+
161
+ lenghts = list(
162
+ len(value) for value in translation.values()
163
+ )
164
+ alpha_lengths = list(
165
+ len_alpha(value) for value in translation.values()
166
+ )
167
+
168
+ return all(
169
+ length_min < length < length_max and alpha_ratio_min < alpha_length / length
170
+ for length, alpha_length in zip(lenghts, alpha_lengths)
171
+ ) and 1 / length_ratio_max < lenghts[0] / lenghts[1] < length_ratio_max
172
+
173
+ return cleanup_fn
174
+
175
+
176
+ class NoResultFound(Exception):
177
+ pass
178
+
179
+
180
+ class MultipleResultsFound(Exception):
181
+ pass
182
+
183
+
184
+ def one(
185
+ iterable: Iterable[_T1]
186
+ ) -> _T1:
187
+ iterator = iter(iterable)
188
+
189
+ try:
190
+ value = next(iterator)
191
+ except StopIteration as e:
192
+ raise NoResultFound from e
193
+
194
+ try:
195
+ next(iterator)
196
+ except StopIteration:
197
+ pass
198
+ else:
199
+ raise MultipleResultsFound
200
+
201
+ return value
202
+
203
+
204
+ def match_one(
205
+ pattern: Union[str, re.Pattern[str]],
206
+ string: str,
207
+ flags: int = 0
208
+ ):
209
+ return one(
210
+ iterable=re.finditer(
211
+ pattern=pattern,
212
+ string=string,
213
+ flags=flags
214
+ )
215
+ )
216
+
217
+
218
+ def parse_sgm(
219
+ filepaths: Paths,
220
+ files: Dict[Language, int],
221
+ encoding: str = 'utf-8'
222
+ ):
223
+ assert len(filepaths) == 2
224
+
225
+ def read_lines_regex(
226
+ filepath: str,
227
+ pattern: re.Pattern[str]
228
+ ):
229
+ with open(
230
+ file=filepath,
231
+ encoding=encoding,
232
+ mode='r'
233
+ ) as file:
234
+ for string in file:
235
+ try:
236
+ match = match_one(
237
+ pattern=pattern,
238
+ string=string
239
+ )
240
+ groups = match.groups(
241
+ default=''
242
+ )
243
+ yield groups[0]
244
+ except:
245
+ yield ''
246
+
247
+ regex = re.compile(
248
+ pattern=r'<seg id="\d+">(.*)</seg>'
249
+ )
250
+
251
+ for lines in zip(
252
+ read_lines_regex(
253
+ filepath=filepaths[0],
254
+ pattern=regex
255
+ ),
256
+ read_lines_regex(
257
+ filepath=filepaths[1],
258
+ pattern=regex
259
+ )
260
+ ):
261
+ translation: Translation
262
+ translation = dict(
263
+ (language, lines[index])
264
+ for language, index in files.items()
265
+ )
266
+
267
+ sample: Sample = dict()
268
+ sample['translation'] = translation
269
+ yield sample
270
+
271
+
272
+ def parse_tsv(
273
+ filepaths: Paths,
274
+ columns: Dict[Language, int],
275
+ encoding: str = 'utf-8'
276
+ ):
277
+ assert len(filepaths) == 1
278
+
279
+ len_columns = len(columns)
280
+
281
+ with open(
282
+ file=filepaths[0],
283
+ encoding=encoding,
284
+ mode='r'
285
+ ) as file:
286
+ for line in file:
287
+ parts = line.split('\t')
288
+
289
+ if len(parts) < len_columns:
290
+ continue
291
+
292
+ translation: Translation = dict()
293
+ for language, index in columns.items():
294
+ translation[language] = parts[index]
295
+
296
+ sample: Sample = dict()
297
+ sample['translation'] = translation
298
+ yield sample
299
+
300
+
301
+ def parse_tmx(
302
+ filepaths: Paths,
303
+ attributes: Dict[Language, str],
304
+ encoding: str = 'utf-8',
305
+ ):
306
+ assert len(filepaths) == 1
307
+
308
+ element: ElementTree.Element
309
+ namespaces = {
310
+ 'xml': 'http://www.w3.org/XML/1998/namespace'
311
+ }
312
+
313
+ with open(
314
+ file=filepaths[0],
315
+ encoding=encoding,
316
+ mode='r'
317
+ ) as file:
318
+ for _, element in ElementTree.iterparse(file):
319
+ if not element.tag == 'tu':
320
+ continue
321
+
322
+ translation: Translation = dict()
323
+ for language, selector in attributes.items():
324
+ path = 'tuv[@' + selector + ']'
325
+
326
+ segs = element.findall(
327
+ path=path + '/seg', namespaces=namespaces
328
+ )
329
+ if not len(segs) == 1:
330
+ continue
331
+
332
+ translation[language] = segs[0].text or ''
333
+
334
+ element.clear()
335
+
336
+ if not len(translation) == 2:
337
+ continue
338
+
339
+ sample: Sample = dict()
340
+ sample['translation'] = translation
341
+ yield sample