norwegian-xsum / translator.py
versae's picture
Bokmaal version of xsum
c076d9b
raw
history blame
5.5 kB
import argparse
import re
from functools import partial
from pathlib import Path
from typing import Optional, Union
import nltk
import torch
from datasets import load_dataset
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def to_lang_code(texts, lang_code, model, tokenizer, max_words=500):
is_string = isinstance(texts, str)
if is_string:
texts = [texts]
batch_size = len(texts)
to_translate = []
merges = []
for index, text in enumerate(texts):
# Split in sentences if too long
merges.append(0)
if text.count(" ") > max_words:
sentences = nltk.sent_tokenize(text, "norwegian")
text_to_translate = ""
for sentence in sentences:
spaces = (text_to_translate + " " + sentence).count(" ")
if spaces >= max_words:
to_translate.append(text_to_translate.strip())
merges[-1] += 1
else:
text_to_translate += sentence + " "
else:
to_translate.append(text)
translated_texts = []
# Split in batches for translation
to_translate_batchs = [to_translate[i:i + batch_size] for i in range(0, len(to_translate), batch_size)]
for to_translate_batch in to_translate_batchs:
inputs = tokenizer(to_translate_batch, return_tensors="pt", padding=True, truncation=True).to(DEVICE)
translated_tokens = model.generate(
**inputs,
forced_bos_token_id=tokenizer.lang_code_to_id[lang_code],
max_length=int(len(inputs.tokens()) * 1.25) # 25% more tokens for the translation just in case
)
translated_texts += tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
# Merge outputs properly
outputs = []
for merge in merges:
output = ""
if merge:
for i in range(len(outputs), len(outputs) + merge):
output += translated_texts[i] + " "
outputs.append(output.strip())
else:
outputs.append(translated_texts[len(outputs)].strip())
return outputs[0] if is_string else outputs
def main(
dataset_name: str,
dataset_columns: Union[list, tuple],
model_name: Optional[str]="facebook/nllb-200-3.3B", # "facebook/nllb-200-distilled-600M"
model_revision: Optional[str]=None,
dataset_splits: Union[list, tuple]=("test", "validation", "train"),
dataset_config: Optional[str]=None,
dataset_revision: Optional[str]=None,
source_lang: Optional[str]="eng_Latn",
target_langs: Optional[Union[list, tuple]]=("nob_Latn", "nno_Latn"),
batch_size: Optional[int]=24,
output_dir: Optional[Path]=Path("./"),
) -> None:
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, use_auth_token=True, torch_dtype=torch.float32)
model.to(DEVICE, torch.float32, True)
tokenizer = AutoTokenizer.from_pretrained(
model_name, revision=model_revision, use_auth_token=True, src_lang=source_lang,
)
ds = load_dataset(dataset_name, name=dataset_config, revision=dataset_revision)
dss = {}
for lang_code in target_langs:
translate = partial(to_lang_code, lang_code=lang_code, model=model, tokenizer=tokenizer)
dss[lang_code] = ds.map(
lambda batch: {col: translate(batch[col]) for col in dataset_columns},
batched=True,
batch_size=batch_size,
desc=f"Translating to {lang_code}",
)
lang_code_short = re.split(r"[-_ /]", lang_code)[0]
dss[lang_code].save_to_disk(output_dir / lang_code_short, max_shard_size="1GB")
for split in dataset_splits:
json_filename = f"{lang_code_short}_{split}.json.tar.gz".lower()
dss[lang_code][split].to_pandas().to_json(
output_dir / lang_code_short / json_filename, orient='records', lines=True
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Translate datasets using Facebook's NLLB models")
parser.add_argument('dataset_name')
parser.add_argument('dataset_columns', help="Comma separated column names to translate")
parser.add_argument('--dataset_splits', default="test,validation,train", help="Comma separated splits to translate")
parser.add_argument('--dataset_config')
parser.add_argument('--dataset_revision')
parser.add_argument('--model_name', default="facebook/nllb-200-3.3B")
parser.add_argument('--model_revision')
parser.add_argument('--source_lang', default="eng_Latn")
parser.add_argument('--target_langs', default="nob_Latn,nno_Latn", help="Comma separated target languages to translate to")
parser.add_argument('--batch_size', '-bs', default=24, type=int, help='Number of inputs per batch for prediction')
parser.add_argument('--output_dir', '-o', default="./", type=str)
args = parser.parse_args()
main(
dataset_name=args.dataset_name,
dataset_columns=args.dataset_columns.split(","),
dataset_splits=args.dataset_splits.split(","),
dataset_config=args.dataset_config,
dataset_revision=args.dataset_revision,
model_name=args.model_name,
model_revision=args.model_revision,
source_lang=args.source_lang,
target_langs=args.target_langs.split(","),
batch_size=args.batch_size,
output_dir=Path(args.output_dir),
)