teragron commited on
Commit
15bfe66
1 Parent(s): 9e452b1

Create tokenizer.py

Browse files
Files changed (1) hide show
  1. tokenizer.py +164 -0
tokenizer.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import json
4
+ import os
5
+ import random
6
+ from typing import List
7
+ from concurrent.futures import ProcessPoolExecutor
8
+ from functools import partial
9
+
10
+ import numpy as np
11
+ import requests
12
+ import sentencepiece as spm
13
+ import torch
14
+ import torch.distributed as dist
15
+ from tqdm import tqdm
16
+
17
+ from tokenizer import Tokenizer
18
+
19
+ DATA_CACHE_DIR = "data"
20
+
21
+
22
+
23
+ def process_shard(args, vocab_size):
24
+ shard_id, shard = args
25
+ tokenizer_model = get_tokenizer_model_path(vocab_size)
26
+ enc = Tokenizer(tokenizer_model)
27
+
28
+ try:
29
+ print(f"Processing shard {shard_id} - {shard}")
30
+
31
+ with open(shard, "r") as f:
32
+ data = json.load(f)
33
+
34
+ all_tokens = []
35
+ for example in tqdm(data, position=shard_id):
36
+ text = example["story"]
37
+ text = text.strip() # get rid of leading/trailing whitespace
38
+ tokens = enc.encode(text, bos=True, eos=False) # encode the text, use BOS
39
+ all_tokens.extend(tokens)
40
+
41
+ # convert to uint16 nparray
42
+ all_tokens = np.array(all_tokens, dtype=np.uint16)
43
+
44
+ if vocab_size == 0:
45
+ # if we're using Llama 2, just save the tokenized file in the same dir
46
+ tokenized_filename = shard.replace(".json", ".bin")
47
+ else:
48
+ # save .bin files into a new tok{N} directory
49
+ bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}")
50
+ shard_basename = os.path.basename(shard)
51
+ bin_basename = shard_basename.replace(".json", ".bin")
52
+ tokenized_filename = os.path.join(bin_dir, bin_basename)
53
+
54
+ # write the bytes
55
+ with open(tokenized_filename, "wb") as f:
56
+ f.write(all_tokens.tobytes())
57
+
58
+ # calculate the average sequence length (they are separated by BOS=1)
59
+ avg_seq_len = all_tokens.size / ((all_tokens == 1).sum())
60
+ print(f"Saved {tokenized_filename}, average seqlen: {avg_seq_len:.2f}")
61
+
62
+ except Exception as e:
63
+ print(f"Error processing shard {shard_id}: {str(e)}")
64
+
65
+ def pretokenize(vocab_size):
66
+ # iterate the shards and tokenize all of them one by one
67
+ data_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data")
68
+ shard_filenames = sorted(glob.glob(os.path.join(data_dir, "*.json")))
69
+
70
+ if vocab_size > 0:
71
+ # .bin files will be saved into tok{N} directory, create it once here
72
+ bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}")
73
+ os.makedirs(bin_dir, exist_ok=True)
74
+
75
+ # process all the shards in a process pool
76
+ fun = partial(process_shard, vocab_size=vocab_size)
77
+ with ProcessPoolExecutor() as executor:
78
+ executor.map(fun, enumerate(shard_filenames))
79
+
80
+ print("Done.")
81
+
82
+ # Call pretokenize with your desired vocab_size
83
+
84
+
85
+
86
+ class PretokDataset(torch.utils.data.IterableDataset):
87
+ """Loads pretokenized examples from disk and yields them as PyTorch tensors."""
88
+
89
+ def __init__(self, split, max_seq_len, vocab_size, vocab_source):
90
+ super().__init__()
91
+ self.split = split
92
+ self.max_seq_len = max_seq_len
93
+ self.vocab_size = vocab_size
94
+ self.vocab_source = vocab_source
95
+
96
+ def __iter__(self):
97
+ # get worker info within a DataLoader
98
+ worker_info = torch.utils.data.get_worker_info()
99
+ worker_id = worker_info.id if worker_info else 0
100
+ # get DDP rank info
101
+ rank = dist.get_rank() if dist.is_initialized() else 0
102
+ # combine the worker_id and worker_rank to create a unique seed for rng
103
+ seed = 42 + worker_id + 1337 * rank
104
+ rng = random.Random(seed)
105
+ print(f"Created a PretokDataset with rng seed {seed}")
106
+ if self.vocab_source == "llama2":
107
+ # the .bin files are right along the .json files
108
+ bin_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data")
109
+ shard_filenames = sorted(glob.glob(os.path.join(bin_dir, "*.bin")))
110
+ elif self.vocab_source == "custom":
111
+ # the .bin files are in tok{N} directory
112
+ bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{self.vocab_size}")
113
+ shard_filenames = sorted(glob.glob(os.path.join(bin_dir, "*.bin")))
114
+ # train/test split. let's use only shard 0 for test split, rest train
115
+ shard_filenames = shard_filenames[1:] if self.split == "train" else shard_filenames[:1]
116
+ assert len(shard_filenames)>0, f"No bin files found in {bin_dir}"
117
+ while True:
118
+ rng.shuffle(shard_filenames)
119
+ for shard in shard_filenames:
120
+ # open the dataset for reading but keep it on disk with memmap
121
+ m = np.memmap(shard, dtype=np.uint16, mode="r")
122
+ num_batches = len(m) // self.max_seq_len
123
+ num_batches -= 1 # drop the last partial batch
124
+ assert num_batches > 0, "this shard is way too small? investigate."
125
+ ixs = list(range(num_batches))
126
+ rng.shuffle(ixs)
127
+ for ix in ixs:
128
+ start = ix * self.max_seq_len
129
+ end = start + self.max_seq_len + 1
130
+ # calling .astype will copy the data into a new numpy array, now in RAM
131
+ chunk = torch.from_numpy((m[start:end]).astype(np.int64))
132
+ x = chunk[:-1]
133
+ y = chunk[1:]
134
+ yield x, y
135
+
136
+ # -----------------------------------------------------------------------------
137
+ # public interface functions
138
+
139
+ def get_tokenizer_model_path(vocab_size):
140
+ """
141
+ Returns path to the sentencepiece tokenizer model for a given vocab size
142
+ vocab_size = 0 designates the default Llama 2 tokenizer, in that case
143
+ None is returned.
144
+ """
145
+ if vocab_size == 0:
146
+ return None
147
+ else:
148
+ return os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}.model")
149
+
150
+ class Task:
151
+
152
+ @staticmethod
153
+ def iter_batches(batch_size, device, num_workers=0, **dataset_kwargs):
154
+ ds = PretokDataset(**dataset_kwargs)
155
+ dl = torch.utils.data.DataLoader(
156
+ ds, batch_size=batch_size, pin_memory=True, num_workers=num_workers
157
+ )
158
+ for x, y in dl:
159
+ x = x.to(device, non_blocking=True)
160
+ y = y.to(device, non_blocking=True)
161
+ yield x, y
162
+
163
+ if __name__ == '__main__':
164
+ pretokenize(vocab_size=0)