TheBloke commited on
Commit
b643476
1 Parent(s): 36e6fa5

Initial GPTQ model commit

Browse files
Files changed (1) hide show
  1. tokenization_baichuan.py +258 -0
tokenization_baichuan.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Baichuan Intelligent Technology. All rights reserved.
2
+
3
+ import os
4
+ from shutil import copyfile
5
+ from typing import Any, Dict, List, Optional, Tuple
6
+
7
+ import sentencepiece as spm
8
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
9
+ from transformers.utils import logging
10
+
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
15
+
16
+ PRETRAINED_VOCAB_FILES_MAP = {
17
+ "vocab_file": {},
18
+ "tokenizer_file": {},
19
+ }
20
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
21
+
22
+
23
+ class BaichuanTokenizer(PreTrainedTokenizer):
24
+ """
25
+ Construct a Baichuan tokenizer. Based on byte-level Byte-Pair-Encoding.
26
+
27
+ Args:
28
+ vocab_file (`str`):
29
+ Path to the vocabulary file.
30
+ """
31
+
32
+ vocab_files_names = VOCAB_FILES_NAMES
33
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
34
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
35
+ model_input_names = ["input_ids", "attention_mask"]
36
+
37
+ def __init__(
38
+ self,
39
+ vocab_file,
40
+ unk_token="<unk>",
41
+ bos_token="<s>",
42
+ eos_token="</s>",
43
+ pad_token=None,
44
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
45
+ add_bos_token=True,
46
+ add_eos_token=False,
47
+ clean_up_tokenization_spaces=False,
48
+ **kwargs,
49
+ ):
50
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
51
+ bos_token = (
52
+ AddedToken(bos_token, lstrip=False, rstrip=False)
53
+ if isinstance(bos_token, str)
54
+ else bos_token
55
+ )
56
+ eos_token = (
57
+ AddedToken(eos_token, lstrip=False, rstrip=False)
58
+ if isinstance(eos_token, str)
59
+ else eos_token
60
+ )
61
+ unk_token = (
62
+ AddedToken(unk_token, lstrip=False, rstrip=False)
63
+ if isinstance(unk_token, str)
64
+ else unk_token
65
+ )
66
+ pad_token = (
67
+ AddedToken(pad_token, lstrip=False, rstrip=False)
68
+ if isinstance(pad_token, str)
69
+ else pad_token
70
+ )
71
+ super().__init__(
72
+ bos_token=bos_token,
73
+ eos_token=eos_token,
74
+ unk_token=unk_token,
75
+ pad_token=pad_token,
76
+ add_bos_token=add_bos_token,
77
+ add_eos_token=add_eos_token,
78
+ sp_model_kwargs=self.sp_model_kwargs,
79
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
80
+ **kwargs,
81
+ )
82
+ self.vocab_file = vocab_file
83
+ self.add_bos_token = add_bos_token
84
+ self.add_eos_token = add_eos_token
85
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
86
+ self.sp_model.Load(vocab_file)
87
+
88
+ def __getstate__(self):
89
+ state = self.__dict__.copy()
90
+ state["sp_model"] = None
91
+ return state
92
+
93
+ def __setstate__(self, d):
94
+ self.__dict__ = d
95
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
96
+ self.sp_model.Load(self.vocab_file)
97
+
98
+ @property
99
+ def vocab_size(self):
100
+ """Returns vocab size"""
101
+ return self.sp_model.get_piece_size()
102
+
103
+ def get_vocab(self):
104
+ """Returns vocab as a dict"""
105
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
106
+ vocab.update(self.added_tokens_encoder)
107
+ return vocab
108
+
109
+ def _tokenize(self, text):
110
+ """Returns a tokenized string."""
111
+ return self.sp_model.encode(text, out_type=str)
112
+
113
+ def _convert_token_to_id(self, token):
114
+ """Converts a token (str) in an id using the vocab."""
115
+ return self.sp_model.piece_to_id(token)
116
+
117
+ def _convert_id_to_token(self, index):
118
+ """Converts an index (integer) in a token (str) using the vocab."""
119
+ token = self.sp_model.IdToPiece(index)
120
+ return token
121
+
122
+ def convert_tokens_to_string(self, tokens):
123
+ """Converts a sequence of tokens (string) in a single string."""
124
+ current_sub_tokens = []
125
+ out_string = ""
126
+ prev_is_special = False
127
+ for i, token in enumerate(tokens):
128
+ # make sure that special tokens are not decoded using sentencepiece model
129
+ if token in self.all_special_tokens:
130
+ if not prev_is_special and i != 0:
131
+ out_string += " "
132
+ out_string += self.sp_model.decode(current_sub_tokens) + token
133
+ prev_is_special = True
134
+ current_sub_tokens = []
135
+ else:
136
+ current_sub_tokens.append(token)
137
+ prev_is_special = False
138
+ out_string += self.sp_model.decode(current_sub_tokens)
139
+ return out_string
140
+
141
+ def save_vocabulary(
142
+ self, save_directory, filename_prefix: Optional[str] = None
143
+ ) -> Tuple[str]:
144
+ """
145
+ Save the vocabulary and special tokens file to a directory.
146
+
147
+ Args:
148
+ save_directory (`str`):
149
+ The directory in which to save the vocabulary.
150
+
151
+ Returns:
152
+ `Tuple(str)`: Paths to the files saved.
153
+ """
154
+ if not os.path.isdir(save_directory):
155
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
156
+ return
157
+ out_vocab_file = os.path.join(
158
+ save_directory,
159
+ (filename_prefix + "-" if filename_prefix else "")
160
+ + VOCAB_FILES_NAMES["vocab_file"],
161
+ )
162
+
163
+ if os.path.abspath(self.vocab_file) != os.path.abspath(
164
+ out_vocab_file
165
+ ) and os.path.isfile(self.vocab_file):
166
+ copyfile(self.vocab_file, out_vocab_file)
167
+ elif not os.path.isfile(self.vocab_file):
168
+ with open(out_vocab_file, "wb") as fi:
169
+ content_spiece_model = self.sp_model.serialized_model_proto()
170
+ fi.write(content_spiece_model)
171
+
172
+ return (out_vocab_file,)
173
+
174
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
175
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
176
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
177
+
178
+ output = bos_token_id + token_ids_0 + eos_token_id
179
+
180
+ if token_ids_1 is not None:
181
+ output = output + bos_token_id + token_ids_1 + eos_token_id
182
+
183
+ return output
184
+
185
+ def get_special_tokens_mask(
186
+ self,
187
+ token_ids_0: List[int],
188
+ token_ids_1: Optional[List[int]] = None,
189
+ already_has_special_tokens: bool = False,
190
+ ) -> List[int]:
191
+ """
192
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
193
+ special tokens using the tokenizer `prepare_for_model` method.
194
+
195
+ Args:
196
+ token_ids_0 (`List[int]`):
197
+ List of IDs.
198
+ token_ids_1 (`List[int]`, *optional*):
199
+ Optional second list of IDs for sequence pairs.
200
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
201
+ Whether or not the token list is already formatted with special tokens for the model.
202
+
203
+ Returns:
204
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
205
+ """
206
+ if already_has_special_tokens:
207
+ return super().get_special_tokens_mask(
208
+ token_ids_0=token_ids_0,
209
+ token_ids_1=token_ids_1,
210
+ already_has_special_tokens=True,
211
+ )
212
+
213
+ bos_token_id = [1] if self.add_bos_token else []
214
+ eos_token_id = [1] if self.add_eos_token else []
215
+
216
+ if token_ids_1 is None:
217
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
218
+ return (
219
+ bos_token_id
220
+ + ([0] * len(token_ids_0))
221
+ + eos_token_id
222
+ + bos_token_id
223
+ + ([0] * len(token_ids_1))
224
+ + eos_token_id
225
+ )
226
+
227
+ def create_token_type_ids_from_sequences(
228
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
229
+ ) -> List[int]:
230
+ """
231
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
232
+ sequence pair mask has the following format:
233
+
234
+ ```
235
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
236
+ | first sequence | second sequence |
237
+ ```
238
+
239
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
240
+
241
+ Args:
242
+ token_ids_0 (`List[int]`):
243
+ List of ids.
244
+ token_ids_1 (`List[int]`, *optional*):
245
+ Optional second list of IDs for sequence pairs.
246
+
247
+ Returns:
248
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
249
+ """
250
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
251
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
252
+
253
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
254
+
255
+ if token_ids_1 is not None:
256
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
257
+
258
+ return output