versae commited on
Commit
c076d9b
1 Parent(s): 929c657

Bokmaal version of xsum

Browse files
nob/dataset_dict.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["train", "validation", "test"]}
nob/nob_test.json.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e53b90d8579baebfb94b1cfea5695a75633d71c38bac43ad3d5c06586e2204e7
3
+ size 3363690
nob/nob_train.json.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c533b8c8596718c7f3954c0578562efa314e141b3880cb43145d6c7e3ef11053
3
+ size 60171284
nob/nob_validation.json.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51ecfd130e61fb1860448427b83a18a64ca6f779f1785517366faeb8d5d790de
3
+ size 3280742
nob/test/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b6b9eb616116b75a8c23eabde80b9ab21227c7f3f09738e9461548a13670c06
3
+ size 48076152
nob/test/dataset_info.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "xsum",
3
+ "citation": "\n@article{Narayan2018DontGM,\n title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization},\n author={Shashi Narayan and Shay B. Cohen and Mirella Lapata},\n journal={ArXiv},\n year={2018},\n volume={abs/1808.08745}\n}\n",
4
+ "config_name": "default",
5
+ "dataset_size": 532255381,
6
+ "description": "\nExtreme Summarization (XSum) Dataset.\n\nThere are three features:\n - document: Input news article.\n - summary: One sentence summary of the article.\n - id: BBC ID of the article.\n\n",
7
+ "download_checksums": {
8
+ "data/XSUM-EMNLP18-Summary-Data-Original.tar.gz": {
9
+ "num_bytes": 254582292,
10
+ "checksum": "10b48aa187fc9c904b30f76ca97e2da0de8d3a1238acc26acadef93e2001af90"
11
+ },
12
+ "https://raw.githubusercontent.com/EdinburghNLP/XSum/master/XSum-Dataset/XSum-TRAINING-DEV-TEST-SPLIT-90-5-5.json": {
13
+ "num_bytes": 2720574,
14
+ "checksum": "9c0c5d8f048a90bd68b19a34e4c30577ed270d3247b2119fa06a04ef46292068"
15
+ }
16
+ },
17
+ "download_size": 257302866,
18
+ "features": {
19
+ "document": {
20
+ "dtype": "string",
21
+ "_type": "Value"
22
+ },
23
+ "summary": {
24
+ "dtype": "string",
25
+ "_type": "Value"
26
+ },
27
+ "id": {
28
+ "dtype": "string",
29
+ "_type": "Value"
30
+ }
31
+ },
32
+ "homepage": "https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset",
33
+ "license": "",
34
+ "size_in_bytes": 789558247,
35
+ "splits": {
36
+ "train": {
37
+ "name": "train",
38
+ "num_bytes": 479206363,
39
+ "num_examples": 204045,
40
+ "dataset_name": "xsum"
41
+ },
42
+ "validation": {
43
+ "name": "validation",
44
+ "num_bytes": 26292877,
45
+ "num_examples": 11332,
46
+ "dataset_name": "xsum"
47
+ },
48
+ "test": {
49
+ "name": "test",
50
+ "num_bytes": 26756141,
51
+ "num_examples": 11334,
52
+ "dataset_name": "xsum"
53
+ }
54
+ },
55
+ "supervised_keys": {
56
+ "input": "document",
57
+ "output": "summary"
58
+ },
59
+ "version": {
60
+ "version_str": "1.2.0",
61
+ "major": 1,
62
+ "minor": 2,
63
+ "patch": 0
64
+ }
65
+ }
nob/test/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "0cd0deb949ec246b",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": "test"
13
+ }
nob/train/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e799e9bdddcdf2997088f16298df4614f8adb6068f634d52b576b11a66d3e04
3
+ size 833308256
nob/train/dataset_info.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "xsum",
3
+ "citation": "\n@article{Narayan2018DontGM,\n title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization},\n author={Shashi Narayan and Shay B. Cohen and Mirella Lapata},\n journal={ArXiv},\n year={2018},\n volume={abs/1808.08745}\n}\n",
4
+ "config_name": "default",
5
+ "dataset_size": 532255381,
6
+ "description": "\nExtreme Summarization (XSum) Dataset.\n\nThere are three features:\n - document: Input news article.\n - summary: One sentence summary of the article.\n - id: BBC ID of the article.\n\n",
7
+ "download_checksums": {
8
+ "data/XSUM-EMNLP18-Summary-Data-Original.tar.gz": {
9
+ "num_bytes": 254582292,
10
+ "checksum": "10b48aa187fc9c904b30f76ca97e2da0de8d3a1238acc26acadef93e2001af90"
11
+ },
12
+ "https://raw.githubusercontent.com/EdinburghNLP/XSum/master/XSum-Dataset/XSum-TRAINING-DEV-TEST-SPLIT-90-5-5.json": {
13
+ "num_bytes": 2720574,
14
+ "checksum": "9c0c5d8f048a90bd68b19a34e4c30577ed270d3247b2119fa06a04ef46292068"
15
+ }
16
+ },
17
+ "download_size": 257302866,
18
+ "features": {
19
+ "document": {
20
+ "dtype": "string",
21
+ "_type": "Value"
22
+ },
23
+ "summary": {
24
+ "dtype": "string",
25
+ "_type": "Value"
26
+ },
27
+ "id": {
28
+ "dtype": "string",
29
+ "_type": "Value"
30
+ }
31
+ },
32
+ "homepage": "https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset",
33
+ "license": "",
34
+ "size_in_bytes": 789558247,
35
+ "splits": {
36
+ "train": {
37
+ "name": "train",
38
+ "num_bytes": 479206363,
39
+ "num_examples": 204045,
40
+ "dataset_name": "xsum"
41
+ },
42
+ "validation": {
43
+ "name": "validation",
44
+ "num_bytes": 26292877,
45
+ "num_examples": 11332,
46
+ "dataset_name": "xsum"
47
+ },
48
+ "test": {
49
+ "name": "test",
50
+ "num_bytes": 26756141,
51
+ "num_examples": 11334,
52
+ "dataset_name": "xsum"
53
+ }
54
+ },
55
+ "supervised_keys": {
56
+ "input": "document",
57
+ "output": "summary"
58
+ },
59
+ "version": {
60
+ "version_str": "1.2.0",
61
+ "major": 1,
62
+ "minor": 2,
63
+ "patch": 0
64
+ }
65
+ }
nob/train/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "09e18f5a398d83ac",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": "train"
13
+ }
nob/validation/data-00000-of-00001.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dccddfe9fc0209ae45e4ccf9279b0330b8a480dd5d1819a2ba8136e3695a320
3
+ size 44194752
nob/validation/dataset_info.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": "xsum",
3
+ "citation": "\n@article{Narayan2018DontGM,\n title={Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization},\n author={Shashi Narayan and Shay B. Cohen and Mirella Lapata},\n journal={ArXiv},\n year={2018},\n volume={abs/1808.08745}\n}\n",
4
+ "config_name": "default",
5
+ "dataset_size": 532255381,
6
+ "description": "\nExtreme Summarization (XSum) Dataset.\n\nThere are three features:\n - document: Input news article.\n - summary: One sentence summary of the article.\n - id: BBC ID of the article.\n\n",
7
+ "download_checksums": {
8
+ "data/XSUM-EMNLP18-Summary-Data-Original.tar.gz": {
9
+ "num_bytes": 254582292,
10
+ "checksum": "10b48aa187fc9c904b30f76ca97e2da0de8d3a1238acc26acadef93e2001af90"
11
+ },
12
+ "https://raw.githubusercontent.com/EdinburghNLP/XSum/master/XSum-Dataset/XSum-TRAINING-DEV-TEST-SPLIT-90-5-5.json": {
13
+ "num_bytes": 2720574,
14
+ "checksum": "9c0c5d8f048a90bd68b19a34e4c30577ed270d3247b2119fa06a04ef46292068"
15
+ }
16
+ },
17
+ "download_size": 257302866,
18
+ "features": {
19
+ "document": {
20
+ "dtype": "string",
21
+ "_type": "Value"
22
+ },
23
+ "summary": {
24
+ "dtype": "string",
25
+ "_type": "Value"
26
+ },
27
+ "id": {
28
+ "dtype": "string",
29
+ "_type": "Value"
30
+ }
31
+ },
32
+ "homepage": "https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset",
33
+ "license": "",
34
+ "size_in_bytes": 789558247,
35
+ "splits": {
36
+ "train": {
37
+ "name": "train",
38
+ "num_bytes": 479206363,
39
+ "num_examples": 204045,
40
+ "dataset_name": "xsum"
41
+ },
42
+ "validation": {
43
+ "name": "validation",
44
+ "num_bytes": 26292877,
45
+ "num_examples": 11332,
46
+ "dataset_name": "xsum"
47
+ },
48
+ "test": {
49
+ "name": "test",
50
+ "num_bytes": 26756141,
51
+ "num_examples": 11334,
52
+ "dataset_name": "xsum"
53
+ }
54
+ },
55
+ "supervised_keys": {
56
+ "input": "document",
57
+ "output": "summary"
58
+ },
59
+ "version": {
60
+ "version_str": "1.2.0",
61
+ "major": 1,
62
+ "minor": 2,
63
+ "patch": 0
64
+ }
65
+ }
nob/validation/state.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "data-00000-of-00001.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "3d58f12b55350560",
8
+ "_format_columns": null,
9
+ "_format_kwargs": {},
10
+ "_format_type": null,
11
+ "_output_all_columns": false,
12
+ "_split": "validation"
13
+ }
translator.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import re
3
+ from functools import partial
4
+ from pathlib import Path
5
+ from typing import Optional, Union
6
+
7
+ import nltk
8
+ import torch
9
+ from datasets import load_dataset
10
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
11
+
12
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+
14
+
15
+ def to_lang_code(texts, lang_code, model, tokenizer, max_words=500):
16
+ is_string = isinstance(texts, str)
17
+ if is_string:
18
+ texts = [texts]
19
+ batch_size = len(texts)
20
+ to_translate = []
21
+ merges = []
22
+ for index, text in enumerate(texts):
23
+ # Split in sentences if too long
24
+ merges.append(0)
25
+ if text.count(" ") > max_words:
26
+ sentences = nltk.sent_tokenize(text, "norwegian")
27
+ text_to_translate = ""
28
+ for sentence in sentences:
29
+ spaces = (text_to_translate + " " + sentence).count(" ")
30
+ if spaces >= max_words:
31
+ to_translate.append(text_to_translate.strip())
32
+ merges[-1] += 1
33
+ else:
34
+ text_to_translate += sentence + " "
35
+ else:
36
+ to_translate.append(text)
37
+ translated_texts = []
38
+ # Split in batches for translation
39
+ to_translate_batchs = [to_translate[i:i + batch_size] for i in range(0, len(to_translate), batch_size)]
40
+ for to_translate_batch in to_translate_batchs:
41
+ inputs = tokenizer(to_translate_batch, return_tensors="pt", padding=True, truncation=True).to(DEVICE)
42
+ translated_tokens = model.generate(
43
+ **inputs,
44
+ forced_bos_token_id=tokenizer.lang_code_to_id[lang_code],
45
+ max_length=int(len(inputs.tokens()) * 1.25) # 25% more tokens for the translation just in case
46
+ )
47
+ translated_texts += tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
48
+ # Merge outputs properly
49
+ outputs = []
50
+ for merge in merges:
51
+ output = ""
52
+ if merge:
53
+ for i in range(len(outputs), len(outputs) + merge):
54
+ output += translated_texts[i] + " "
55
+ outputs.append(output.strip())
56
+ else:
57
+ outputs.append(translated_texts[len(outputs)].strip())
58
+ return outputs[0] if is_string else outputs
59
+
60
+
61
+ def main(
62
+ dataset_name: str,
63
+ dataset_columns: Union[list, tuple],
64
+ model_name: Optional[str]="facebook/nllb-200-3.3B", # "facebook/nllb-200-distilled-600M"
65
+ model_revision: Optional[str]=None,
66
+ dataset_splits: Union[list, tuple]=("test", "validation", "train"),
67
+ dataset_config: Optional[str]=None,
68
+ dataset_revision: Optional[str]=None,
69
+ source_lang: Optional[str]="eng_Latn",
70
+ target_langs: Optional[Union[list, tuple]]=("nob_Latn", "nno_Latn"),
71
+ batch_size: Optional[int]=24,
72
+ output_dir: Optional[Path]=Path("./"),
73
+ ) -> None:
74
+
75
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name, use_auth_token=True, torch_dtype=torch.float32)
76
+ model.to(DEVICE, torch.float32, True)
77
+ tokenizer = AutoTokenizer.from_pretrained(
78
+ model_name, revision=model_revision, use_auth_token=True, src_lang=source_lang,
79
+ )
80
+
81
+ ds = load_dataset(dataset_name, name=dataset_config, revision=dataset_revision)
82
+ dss = {}
83
+ for lang_code in target_langs:
84
+ translate = partial(to_lang_code, lang_code=lang_code, model=model, tokenizer=tokenizer)
85
+ dss[lang_code] = ds.map(
86
+ lambda batch: {col: translate(batch[col]) for col in dataset_columns},
87
+ batched=True,
88
+ batch_size=batch_size,
89
+ desc=f"Translating to {lang_code}",
90
+ )
91
+ lang_code_short = re.split(r"[-_ /]", lang_code)[0]
92
+ dss[lang_code].save_to_disk(output_dir / lang_code_short, max_shard_size="1GB")
93
+ for split in dataset_splits:
94
+ json_filename = f"{lang_code_short}_{split}.json.tar.gz".lower()
95
+ dss[lang_code][split].to_pandas().to_json(
96
+ output_dir / lang_code_short / json_filename, orient='records', lines=True
97
+ )
98
+
99
+
100
+
101
+ if __name__ == "__main__":
102
+ parser = argparse.ArgumentParser(description="Translate datasets using Facebook's NLLB models")
103
+ parser.add_argument('dataset_name')
104
+ parser.add_argument('dataset_columns', help="Comma separated column names to translate")
105
+ parser.add_argument('--dataset_splits', default="test,validation,train", help="Comma separated splits to translate")
106
+ parser.add_argument('--dataset_config')
107
+ parser.add_argument('--dataset_revision')
108
+ parser.add_argument('--model_name', default="facebook/nllb-200-3.3B")
109
+ parser.add_argument('--model_revision')
110
+ parser.add_argument('--source_lang', default="eng_Latn")
111
+ parser.add_argument('--target_langs', default="nob_Latn,nno_Latn", help="Comma separated target languages to translate to")
112
+ parser.add_argument('--batch_size', '-bs', default=24, type=int, help='Number of inputs per batch for prediction')
113
+ parser.add_argument('--output_dir', '-o', default="./", type=str)
114
+ args = parser.parse_args()
115
+ main(
116
+ dataset_name=args.dataset_name,
117
+ dataset_columns=args.dataset_columns.split(","),
118
+ dataset_splits=args.dataset_splits.split(","),
119
+ dataset_config=args.dataset_config,
120
+ dataset_revision=args.dataset_revision,
121
+ model_name=args.model_name,
122
+ model_revision=args.model_revision,
123
+ source_lang=args.source_lang,
124
+ target_langs=args.target_langs.split(","),
125
+ batch_size=args.batch_size,
126
+ output_dir=Path(args.output_dir),
127
+ )