nordmann2023 / utils.py
ecoue
more python 3.7 compatibility changes
ffcfb5b
raw
history blame
7.9 kB
from typing import Dict, TypeVar, Callable, List, Hashable, Union, Optional, Tuple, Collection, Iterable
from xml.etree import ElementTree
import unicodedata
from pathlib import Path
import re
Paths = Tuple[str, ...]
Language = str # Literal['de', 'en']
Translation = Dict[
Language, str
]
Sample = Dict[
str, Translation # Literal['translation'], Translation
]
SampleBatch = Dict[
str, List[Translation] # Literal['translation'], List[Translation]
]
_H1 = TypeVar('_H1', bound=Hashable)
_H2 = TypeVar('_H2', bound=Hashable)
_T1 = TypeVar('_T1')
_T2 = TypeVar('_T2')
def dict_filter_keys(
input: Dict[_H1, _T1],
keys: List[_H1]
) -> Dict[_H1, _T1]:
return dict(
(key, input[key])
for key in keys
)
def dict_flatten(
input: Dict[_H1, Dict[_H2, _T2]]
) -> Dict[_H2, _T2]:
return dict(
items for values in input.values()
for items in values.items()
)
def dict_map(
input: Dict[_H1, _T1],
map_fn: Callable[[_H1, _T1], Tuple[_H2, _T2]]
) -> Dict[_H2, _T2]:
return dict(
map_fn(key, value)
for key, value in input.items()
)
def list_keyby(
input: List[_T1],
key_fn: Callable[[_T1], _H1]
) -> Dict[_H1, _T1]:
return dict(
(key_fn(value), value)
for value in input
)
def expand_path(
path: Path
):
if path.is_file():
return [path]
return list(
path.iterdir()
)
def lenif(
input: Collection[_T1],
predicate_fn: Callable[[_T1], bool]
):
return sum(
predicate_fn(value)
for value in input
)
def len_alpha(
string: str
):
return lenif(
input=string,
predicate_fn=lambda character: character.isalpha()
)
unicode_control_characters = (
r'\x00-\x1F\x7F-\x9F\xAD\u0600-\u0605\u061C\u06DD\u070F\u0890-\u0891'
r'\u08E2\u180E\u200B-\u200F\u202A-\u202E\u2060-\u2064\u2066-\u206F\uFEFF\uFFF9-\uFFFB'
r'\U000110BD\U000110CD\U00013430-\U0001343F\U0001BCA0-\U0001BCA3\U0001D173-\U0001D17A'
r'\U000E0001\U000E0020-\U000E007F\uE000\uF8FF\U000F0000\U000FFFFD\U00100000\U0010FFFD'
)
def normalize(
strip_whitespaces: bool,
clean_control_characters: bool,
enforce_unicode_form: Optional[str] = None # Optional[Literal['NFC', 'NFKC', 'NFD', 'NFKD']] = None
):
regex_pattern = re.compile(
pattern='[' + unicode_control_characters + ']+'
)
def normalize_fn(
sample: Sample
):
translation = sample['translation']
if strip_whitespaces:
translation = dict_map(
input=translation,
map_fn=lambda key, value: (
key, value.strip()
)
)
if clean_control_characters:
translation = dict_map(
input=translation, map_fn=lambda key, value: (
key, regex_pattern.sub(
repl='', string=value
)
)
)
if enforce_unicode_form is not None:
translation = dict_map(
input=translation, map_fn=lambda key, value: (
key, unicodedata.normalize(
enforce_unicode_form, value # pyright: ignore
)
)
)
sample['translation'] = translation # pyright: ignore
return sample
return normalize_fn
def cleanup(
length_min: int,
length_max: int,
length_ratio_max: Union[int, float],
alpha_ratio_min: Union[int, float]
):
def cleanup_fn(
sample: Sample
):
translation = sample['translation']
lenghts = list(
len(value) for value in translation.values()
)
alpha_lengths = list(
len_alpha(value) for value in translation.values()
)
return all(
length_min < length < length_max and alpha_ratio_min < alpha_length / length
for length, alpha_length in zip(lenghts, alpha_lengths)
) and 1 / length_ratio_max < lenghts[0] / lenghts[1] < length_ratio_max
return cleanup_fn
class NoResultFound(Exception):
pass
class MultipleResultsFound(Exception):
pass
def one(
iterable: Iterable[_T1]
) -> _T1:
iterator = iter(iterable)
try:
value = next(iterator)
except StopIteration as e:
raise NoResultFound from e
try:
next(iterator)
except StopIteration:
pass
else:
raise MultipleResultsFound
return value
def match_one(
pattern: Union[str, re.Pattern], # pyright: ignore # Union[str, re.Pattern[str]],
string: str,
flags: int = 0
):
return one(
iterable=re.finditer(
pattern=pattern, # pyright: ignore
string=string,
flags=flags
)
)
def parse_sgm(
filepaths: Paths,
files: Dict[Language, int],
encoding: str = 'utf-8'
):
assert len(filepaths) == 2
def read_lines_regex(
filepath: str,
pattern: re.Pattern # pyright: ignore # re.Pattern[str]
):
with open(
file=filepath,
encoding=encoding,
mode='r'
) as file:
for string in file:
try:
match = match_one(
pattern=pattern,
string=string
)
groups = match.groups(
default=''
)
yield groups[0]
except:
yield ''
regex = re.compile(
pattern=r'<seg id="\d+">(.*)</seg>'
)
for lines in zip(
read_lines_regex(
filepath=filepaths[0],
pattern=regex
),
read_lines_regex(
filepath=filepaths[1],
pattern=regex
)
):
translation: Translation
translation = dict(
(language, lines[index])
for language, index in files.items()
)
sample: Sample = dict()
sample['translation'] = translation
yield sample
def parse_tsv(
filepaths: Paths,
columns: Dict[Language, int],
encoding: str = 'utf-8'
):
assert len(filepaths) == 1
len_columns = len(columns)
with open(
file=filepaths[0],
encoding=encoding,
mode='r'
) as file:
for line in file:
parts = line.split('\t')
if len(parts) < len_columns:
continue
translation: Translation = dict()
for language, index in columns.items():
translation[language] = parts[index]
sample: Sample = dict()
sample['translation'] = translation
yield sample
def parse_tmx(
filepaths: Paths,
attributes: Dict[Language, str],
encoding: str = 'utf-8',
):
assert len(filepaths) == 1
element: ElementTree.Element
namespaces = {
'xml': 'http://www.w3.org/XML/1998/namespace'
}
with open(
file=filepaths[0],
encoding=encoding,
mode='r'
) as file:
for _, element in ElementTree.iterparse(file):
if not element.tag == 'tu':
continue
translation: Translation = dict()
for language, selector in attributes.items():
path = 'tuv[@' + selector + ']'
segs = element.findall(
path=path + '/seg', namespaces=namespaces
)
if not len(segs) == 1:
continue
translation[language] = segs[0].text or ''
element.clear()
if not len(translation) == 2:
continue
sample: Sample = dict()
sample['translation'] = translation
yield sample