nordmann2023 / nordmann2023.py
ecoue
ignored some errors, ensured compatibility with python 3.7
990acb6
raw
history blame contribute delete
No virus
10.8 kB
from typing import Optional, Callable, List, Dict, Any, Tuple, Generator
from dataclasses import dataclass
import itertools
import os
import datasets
from .utils import Sample, list_keyby, parse_tmx, parse_sgm, parse_tsv, cleanup, normalize, dict_map, dict_filter_keys, dict_flatten
logger = datasets.logging.get_logger(
name=__name__
)
@dataclass(frozen=True)
class Candidate:
name: str
url: str
paths: Tuple[str, ...]
num_examples: int
parser: Callable[
[Tuple[str, ...]], Generator[Sample, None, None]
]
def download_paths(
self,
base_path: str
):
return tuple(
os.path.join(base_path, path)
for path in self.paths
)
@dataclass(frozen=True)
class Constraint:
start: Optional[int] = None
stop: Optional[int] = None
step: Optional[int] = None
_CANDIDATES = [
Candidate(
name='europarl_v10',
url='https://statmt.org/europarl/v10/training/europarl-v10.de-en.tsv.gz',
paths=('.',),
num_examples=1828521,
parser=lambda filepaths: parse_tsv(
filepaths=filepaths,
columns={
'de': 0, 'en': 1
}
)
),
Candidate(
name='newscommentary_v17',
url='https://www.statmt.org/news-commentary/v17/training/news-commentary-v17.de-en.tsv.gz',
paths=('.',),
num_examples=418621,
parser=lambda filepaths: parse_tsv(
filepaths=filepaths,
columns={
'de': 0, 'en': 1
}
)
),
Candidate(
name='wikititles_v3',
url='https://object.pouta.csc.fi/OPUS-WikiTitles/v3/tmx/de-en.tmx.gz',
paths=('.',),
num_examples=1386770,
parser=lambda filepaths: parse_tmx(
filepaths=filepaths,
attributes={
'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
}
)
),
Candidate(
name='ecb_2017',
url='https://s3-eu-west-1.amazonaws.com/tilde-model/ecb2017.de-en.tmx.zip',
paths=('ecb2017.UNIQUE.de-en.tmx',),
num_examples=4147,
parser=lambda filepaths: parse_tmx(
filepaths=filepaths,
attributes={
'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
}
)
),
Candidate(
name='rapid_2019',
url='https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.tmx.zip',
paths=('RAPID_2019.UNIQUE.de-en.tmx',),
num_examples=939808,
parser=lambda filepaths: parse_tmx(
filepaths=filepaths,
attributes={
'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
}
)
),
Candidate(
name='eesc_2017',
url='https://s3-eu-west-1.amazonaws.com/tilde-model/EESC2017.de-en.tmx.zip',
paths=('EESC.de-en.tmx',),
num_examples=2857850,
parser=lambda filepaths: parse_tmx(
filepaths=filepaths,
attributes={
'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
}
)
),
Candidate(
name='ema_2016',
url='https://s3-eu-west-1.amazonaws.com/tilde-model/EMA2016.de-en.tmx.zip',
paths=('EMEA2016.de-en.tmx',),
num_examples=347631,
parser=lambda filepaths: parse_tmx(
filepaths=filepaths,
attributes={
'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
}
)
),
Candidate(
name='europat_v3',
url='https://web-language-models.s3.amazonaws.com/europat/release3/de-en.txt.gz',
paths=('.',),
num_examples=19734742,
parser=lambda filepaths: parse_tsv(
filepaths=filepaths,
columns={
'de': 0, 'en': 1
}
)
),
Candidate(
name='books_v1',
url='https://object.pouta.csc.fi/OPUS-Books/v1/tmx/de-en.tmx.gz',
paths=('.',),
num_examples=51106,
parser=lambda filepaths: parse_tmx(
filepaths=filepaths,
attributes={
'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
}
)
),
Candidate(
name='ted2020_v1',
url='https://object.pouta.csc.fi/OPUS-TED2020/v1/tmx/de-en.tmx.gz',
paths=('.',),
num_examples=289374,
parser=lambda filepaths: parse_tmx(
filepaths=filepaths,
attributes={
'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
}
)
),
Candidate(
name='qed_v2',
url='https://object.pouta.csc.fi/OPUS-QED/v2.0a/tmx/de-en.tmx.gz',
paths=('.',),
num_examples=492811,
parser=lambda filepaths: parse_tmx(
filepaths=filepaths,
attributes={
'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
}
)
),
Candidate(
name='eubookshop_v2',
url='https://object.pouta.csc.fi/OPUS-EUbookshop/v2/tmx/de-en.tmx.gz',
paths=('.',),
num_examples=8312724,
parser=lambda filepaths: parse_tmx(
filepaths=filepaths,
attributes={
'de': 'xml:lang="de"', 'en': 'xml:lang="en"',
}
)
),
Candidate(
name='newstest2018',
url='https://data.statmt.org/wmt22/translation-task/dev.tgz',
paths=('dev/sgm/newstest2018-deen-src.de.sgm',
'dev/sgm/newstest2018-deen-ref.en.sgm'),
num_examples=2998,
parser=lambda filepaths: parse_sgm(
filepaths=filepaths,
files={
'de': 0, 'en': 1
}
)
),
Candidate(
name='newstest2019',
url='https://data.statmt.org/wmt22/translation-task/dev.tgz',
paths=('dev/sgm/newstest2019-deen-src.de.sgm',
'dev/sgm/newstest2019-deen-ref.en.sgm'),
num_examples=2000,
parser=lambda filepaths: parse_sgm(
filepaths=filepaths,
files={
'de': 0, 'en': 1
}
)
)
]
_CANDIDATES_BY_NAME = list_keyby(
input=_CANDIDATES,
key_fn=lambda candidate: candidate.name
)
class NordmannConfig(
datasets.BuilderConfig
):
def __init__(
self,
splits: Dict[datasets.NamedSplit, List[str]],
constraints: Dict[str, Constraint],
normalizer: Callable[[Sample], Sample],
filter: Callable[[Sample], bool],
**kwargs: Any
):
assert splits
datasets.BuilderConfig.__init__(
self, **kwargs
)
self.splits = dict_map(
input=splits, map_fn=lambda key, value: (
key, dict_filter_keys(
input=_CANDIDATES_BY_NAME, keys=value
)
)
)
self.constraints = constraints
self.normalizer = normalizer
self.filter = filter
class Nordmann(
datasets.GeneratorBasedBuilder
):
BUILDER_CONFIG_CLASS = NordmannConfig
BUILDER_CONFIGS = [
NordmannConfig(
name='balanced',
description='NORDMANN 2023 (balanced) translation task dataset.',
version=datasets.Version(
version_str='0.0.1'
),
splits={
datasets.Split.TRAIN: [
'europarl_v10',
'newscommentary_v17',
'wikititles_v3',
'europat_v3',
'books_v1',
'ted2020_v1',
'qed_v2',
'eubookshop_v2'
],
datasets.Split.VALIDATION: [
'newstest2018'
],
datasets.Split.TEST: [
'newstest2019'
]
},
constraints={
'europat_v3': Constraint(stop=1000000),
'eubookshop_v2': Constraint(stop=2000000)
},
normalizer=normalize(
strip_whitespaces=True,
clean_control_characters=True,
enforce_unicode_form='NFC'
),
filter=cleanup(
length_min=4,
length_max=4096,
length_ratio_max=1.33,
alpha_ratio_min=.5
)
)
]
def _info(
self
):
features = {
'translation': datasets.features.Translation(
languages=['de', 'en']
)
}
return datasets.DatasetInfo(
description='Translation dataset based on statmt.org',
features=datasets.Features(features)
)
def _split_generators(
self,
dl_manager: datasets.DownloadManager
):
self.config: NordmannConfig
urls = dict_map(
input=dict_flatten(
input=self.config.splits
),
map_fn=lambda key, value: (
key, value.url
)
)
base_paths: Dict[str, str]
base_paths = dl_manager.download_and_extract( # pyright: ignore
url_or_urls=urls
)
generators: List[datasets.SplitGenerator]
generators = list()
for split, split_candidates in self.config.splits.items():
generators.append(
datasets.SplitGenerator(
name=str(split),
gen_kwargs={
'candidates': split_candidates,
'base_paths': base_paths
}
)
)
return generators
def _generate_examples( # pyright: ignore
self,
candidates: Dict[str, Candidate],
base_paths: Dict[str, str]
):
self.config: NordmannConfig
for name, candidate in candidates.items():
constraint = (
self.config.constraints[name]
if name in self.config.constraints else Constraint()
)
samples = candidate.parser(
candidate.download_paths(
base_path=base_paths[name]
)
)
for sample_num, sample in enumerate(
itertools.islice(
samples,
constraint.start,
constraint.stop,
constraint.step
)
):
normalized_sample = self.config.normalizer(sample)
if not self.config.filter(normalized_sample):
continue
yield candidate.name + '_' + str(sample_num), normalized_sample
samples.close()