Crystalcareai commited on
Commit
3148b5a
1 Parent(s): c4e57ac

Upload tokenization_quiet.py

Browse files
Files changed (1) hide show
  1. tokenization_quiet.py +487 -0
tokenization_quiet.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ """Tokenization classes for Quiet."""
22
+ import os
23
+ from shutil import copyfile
24
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
25
+
26
+ import sentencepiece as spm
27
+
28
+ from transformers.convert_slow_tokenizer import import_protobuf
29
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
30
+ from transformers.utils import logging
31
+
32
+
33
+ if TYPE_CHECKING:
34
+ from transformers.tokenization_utils_base import TextInput
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
39
+
40
+ SPIECE_UNDERLINE = "▁"
41
+
42
+ B_INST, E_INST = "[INST]", "[/INST]"
43
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
44
+
45
+ # fmt: off
46
+ DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \
47
+ answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\
48
+ that your responses are socially unbiased and positive in nature.
49
+
50
+ If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \
51
+ correct. If you don't know the answer to a question, please don't share false information."""
52
+ # fmt: on
53
+
54
+
55
+ class QuietTokenizer(PreTrainedTokenizer):
56
+ """
57
+ Construct a Quiet tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is
58
+ no padding token in the original model.
59
+
60
+ Args:
61
+ vocab_file (`str`):
62
+ Path to the vocabulary file.
63
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
64
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
65
+ token instead.
66
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
67
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
68
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
69
+ The end of sequence token.
70
+ pad_token (`str` or `tokenizers.AddedToken`, *optional*):
71
+ A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
72
+ attention mechanisms or loss computation.
73
+ sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):
74
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
75
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
76
+ to set:
77
+
78
+ - `enable_sampling`: Enable subword regularization.
79
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
80
+
81
+ - `nbest_size = {0,1}`: No sampling is performed.
82
+ - `nbest_size > 1`: samples from the nbest_size results.
83
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
84
+ using forward-filtering-and-backward-sampling algorithm.
85
+
86
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
87
+ BPE-dropout.
88
+
89
+ add_bos_token (`bool`, *optional*, defaults to `True`):
90
+ Whether or not to add an `bos_token` at the start of sequences.
91
+ add_eos_token (`bool`, *optional*, defaults to `False`):
92
+ Whether or not to add an `eos_token` at the end of sequences.
93
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
94
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
95
+ extra spaces.
96
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
97
+ Whether or not the default system prompt for Quiet should be used.
98
+ spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):
99
+ Whether or not to add spaces between special tokens.
100
+ legacy (`bool`, *optional*):
101
+ Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622
102
+ and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple
103
+ example:
104
+
105
+ - `legacy=True`:
106
+ ```python
107
+ >>> from transformers import T5Tokenizer
108
+
109
+ >>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=True)
110
+ >>> tokenizer.encode("Hello <extra_id_0>.")
111
+ [8774, 32099, 3, 5, 1]
112
+ ```
113
+ - `legacy=False`:
114
+ ```python
115
+ >>> from transformers import T5Tokenizer
116
+
117
+ >>> tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-base", legacy=False)
118
+ >>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here
119
+ [8774, 32099, 5, 1]
120
+ ```
121
+ Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
122
+ add_prefix_space (`bool`, *optional*, defaults to `True`):
123
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
124
+ other word.
125
+
126
+ """
127
+
128
+ vocab_files_names = VOCAB_FILES_NAMES
129
+ model_input_names = ["input_ids", "attention_mask"]
130
+
131
+ def __init__(
132
+ self,
133
+ vocab_file,
134
+ unk_token="<unk>",
135
+ bos_token="<s>",
136
+ eos_token="</s>",
137
+ pad_token=None,
138
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
139
+ add_bos_token=True,
140
+ add_eos_token=False,
141
+ clean_up_tokenization_spaces=False,
142
+ use_default_system_prompt=False,
143
+ spaces_between_special_tokens=False,
144
+ legacy=None,
145
+ add_prefix_space=True,
146
+ **kwargs,
147
+ ):
148
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
149
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
150
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
151
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
152
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
153
+
154
+ if legacy is None:
155
+ logger.warning_once(
156
+ f"You are using the default legacy behaviour of the {self.__class__}. This is"
157
+ " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you."
158
+ " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it"
159
+ " means, and thoroughly read the reason why this was added as explained in"
160
+ " https://github.com/huggingface/transformers/pull/24565"
161
+ )
162
+ legacy = True
163
+
164
+ self.legacy = legacy
165
+ self.vocab_file = vocab_file
166
+ self.add_bos_token = add_bos_token
167
+ self.add_eos_token = add_eos_token
168
+ self.use_default_system_prompt = use_default_system_prompt
169
+ self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False))
170
+ self.add_prefix_space = add_prefix_space
171
+
172
+ super().__init__(
173
+ bos_token=bos_token,
174
+ eos_token=eos_token,
175
+ unk_token=unk_token,
176
+ pad_token=pad_token,
177
+ add_bos_token=add_bos_token,
178
+ add_eos_token=add_eos_token,
179
+ sp_model_kwargs=self.sp_model_kwargs,
180
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
181
+ use_default_system_prompt=use_default_system_prompt,
182
+ spaces_between_special_tokens=spaces_between_special_tokens,
183
+ legacy=legacy,
184
+ add_prefix_space=add_prefix_space,
185
+ **kwargs,
186
+ )
187
+
188
+ @property
189
+ def unk_token_length(self):
190
+ return len(self.sp_model.encode(str(self.unk_token)))
191
+
192
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor
193
+ def get_spm_processor(self, from_slow=False):
194
+ tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
195
+ if self.legacy or from_slow: # no dependency on protobuf
196
+ tokenizer.Load(self.vocab_file)
197
+ return tokenizer
198
+
199
+ with open(self.vocab_file, "rb") as f:
200
+ sp_model = f.read()
201
+ model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)")
202
+ model = model_pb2.ModelProto.FromString(sp_model)
203
+ normalizer_spec = model_pb2.NormalizerSpec()
204
+ normalizer_spec.add_dummy_prefix = False
205
+ model.normalizer_spec.MergeFrom(normalizer_spec)
206
+ sp_model = model.SerializeToString()
207
+ tokenizer.LoadFromSerializedProto(sp_model)
208
+ return tokenizer
209
+
210
+ def __getstate__(self):
211
+ state = self.__dict__.copy()
212
+ state["sp_model"] = None
213
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
214
+ return state
215
+
216
+ def __setstate__(self, d):
217
+ self.__dict__ = d
218
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
219
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
220
+
221
+ @property
222
+ def vocab_size(self):
223
+ """Returns vocab size"""
224
+ return self.sp_model.get_piece_size()
225
+
226
+ def get_vocab(self):
227
+ """Returns vocab as a dict"""
228
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
229
+ vocab.update(self.added_tokens_encoder)
230
+ return vocab
231
+
232
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
233
+ def tokenize(self, text: "TextInput", **kwargs) -> List[str]:
234
+ """
235
+ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
236
+ first token is special.
237
+ """
238
+ if self.legacy or len(text) == 0:
239
+ return super().tokenize(text, **kwargs)
240
+
241
+ text = text.replace(SPIECE_UNDERLINE, " ")
242
+ if self.add_prefix_space:
243
+ text = SPIECE_UNDERLINE + text
244
+
245
+ tokens = super().tokenize(text, **kwargs)
246
+
247
+ if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
248
+ tokens = tokens[1:]
249
+ return tokens
250
+
251
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
252
+ def _tokenize(self, text, **kwargs):
253
+ """
254
+ Returns a tokenized string.
255
+
256
+ We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
257
+ SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
258
+ `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
259
+ `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
260
+ `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
261
+ """
262
+ tokens = self.sp_model.encode(text, out_type=str)
263
+ if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
264
+ return tokens
265
+
266
+ # 1. Encode string + prefix ex: "<unk> Hey"
267
+ tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
268
+ # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
269
+ return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
270
+
271
+ def _convert_token_to_id(self, token):
272
+ """Converts a token (str) in an id using the vocab."""
273
+ return self.sp_model.piece_to_id(token)
274
+
275
+ def _convert_id_to_token(self, index):
276
+ """Converts an index (integer) in a token (str) using the vocab."""
277
+ token = self.sp_model.IdToPiece(index)
278
+ return token
279
+
280
+ def convert_tokens_to_string(self, tokens):
281
+ """Converts a sequence of tokens (string) in a single string."""
282
+ # since we manually add the prefix space, we have to remove it when decoding
283
+ if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
284
+ tokens[0] = tokens[0][1:]
285
+
286
+ current_sub_tokens = []
287
+ out_string = ""
288
+ prev_is_special = False
289
+ for i, token in enumerate(tokens):
290
+ # make sure that special tokens are not decoded using sentencepiece model
291
+ if token in self.all_special_tokens:
292
+ if not prev_is_special and i != 0 and self.legacy:
293
+ out_string += " "
294
+ out_string += self.sp_model.decode(current_sub_tokens) + token
295
+ prev_is_special = True
296
+ current_sub_tokens = []
297
+ else:
298
+ if prev_is_special and i == 1 and self.add_prefix_space and not token.startswith(SPIECE_UNDERLINE):
299
+ out_string += " "
300
+ current_sub_tokens.append(token)
301
+ prev_is_special = False
302
+ out_string += self.sp_model.decode(current_sub_tokens)
303
+ return out_string
304
+
305
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
306
+ """
307
+ Save the vocabulary and special tokens file to a directory.
308
+
309
+ Args:
310
+ save_directory (`str`):
311
+ The directory in which to save the vocabulary.
312
+
313
+ Returns:
314
+ `Tuple(str)`: Paths to the files saved.
315
+ """
316
+ if not os.path.isdir(save_directory):
317
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
318
+ return
319
+ out_vocab_file = os.path.join(
320
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
321
+ )
322
+
323
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
324
+ copyfile(self.vocab_file, out_vocab_file)
325
+ elif not os.path.isfile(self.vocab_file):
326
+ with open(out_vocab_file, "wb") as fi:
327
+ content_spiece_model = self.sp_model.serialized_model_proto()
328
+ fi.write(content_spiece_model)
329
+
330
+ return (out_vocab_file,)
331
+
332
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
333
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
334
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
335
+
336
+ output = bos_token_id + token_ids_0 + eos_token_id
337
+
338
+ if token_ids_1 is not None:
339
+ output = output + bos_token_id + token_ids_1 + eos_token_id
340
+
341
+ return output
342
+
343
+ def batch_decode(self, token_ids, skip_special_tokens=False):
344
+ """
345
+ Batch version of `decode`.
346
+ """
347
+ if isinstance(token_ids, torch.Tensor):
348
+ token_ids = token_ids.tolist()
349
+ elif isinstance(token_ids, np.ndarray):
350
+ token_ids = token_ids.tolist()
351
+
352
+ decoded_texts = []
353
+ for token_id in token_ids:
354
+ decoded_texts.append(self.decode(token_id, skip_special_tokens=skip_special_tokens))
355
+
356
+ return decoded_texts
357
+
358
+
359
+ def get_special_tokens_mask(
360
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
361
+ ) -> List[int]:
362
+ """
363
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
364
+ special tokens using the tokenizer `prepare_for_model` method.
365
+
366
+ Args:
367
+ token_ids_0 (`List[int]`):
368
+ List of IDs.
369
+ token_ids_1 (`List[int]`, *optional*):
370
+ Optional second list of IDs for sequence pairs.
371
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
372
+ Whether or not the token list is already formatted with special tokens for the model.
373
+
374
+ Returns:
375
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
376
+ """
377
+ if already_has_special_tokens:
378
+ return super().get_special_tokens_mask(
379
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
380
+ )
381
+
382
+ bos_token_id = [1] if self.add_bos_token else []
383
+ eos_token_id = [1] if self.add_eos_token else []
384
+
385
+ if token_ids_1 is None:
386
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
387
+ return (
388
+ bos_token_id
389
+ + ([0] * len(token_ids_0))
390
+ + eos_token_id
391
+ + bos_token_id
392
+ + ([0] * len(token_ids_1))
393
+ + eos_token_id
394
+ )
395
+
396
+ def create_token_type_ids_from_sequences(
397
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
398
+ ) -> List[int]:
399
+ """
400
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
401
+ sequence pair mask has the following format:
402
+
403
+ ```
404
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
405
+ | first sequence | second sequence |
406
+ ```
407
+
408
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
409
+
410
+ Args:
411
+ token_ids_0 (`List[int]`):
412
+ List of ids.
413
+ token_ids_1 (`List[int]`, *optional*):
414
+ Optional second list of IDs for sequence pairs.
415
+
416
+ Returns:
417
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
418
+ """
419
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
420
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
421
+
422
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
423
+
424
+ if token_ids_1 is not None:
425
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
426
+
427
+ return output
428
+
429
+ @property
430
+ def default_chat_template(self):
431
+ """
432
+ Quiet uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages.
433
+ Assistant messages do not have special tokens, because Quiet chat models are generally trained with strict
434
+ user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering
435
+ rather than needing special tokens. The system message is partly 'embedded' in the first user message, which
436
+ results in an unusual token ordering when it is present. This template should definitely be changed if you wish
437
+ to fine-tune a model with more flexible role ordering!
438
+
439
+ The output should look something like:
440
+
441
+ <bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos><bos>[INST] Prompt [/INST] Answer <eos>
442
+ <bos>[INST] Prompt [/INST]
443
+
444
+ The reference for this chat template is [this code
445
+ snippet](https://github.com/facebookresearch/quiet/blob/556949fdfb72da27c2f4a40b7f0e4cf0b8153a28/quiet/generation.py#L320-L362)
446
+ in the original repository.
447
+ """
448
+ logger.warning_once(
449
+ "\nNo chat template is defined for this tokenizer - using the default template "
450
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
451
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
452
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
453
+ )
454
+ template = (
455
+ "{% if messages[0]['role'] == 'system' %}"
456
+ "{% set loop_messages = messages[1:] %}" # Extract system message if it's present
457
+ "{% set system_message = messages[0]['content'] %}"
458
+ "{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}"
459
+ "{% set loop_messages = messages %}" # Or use the default system message if the flag is set
460
+ "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
461
+ "{% else %}"
462
+ "{% set loop_messages = messages %}"
463
+ "{% set system_message = false %}"
464
+ "{% endif %}"
465
+ "{% for message in loop_messages %}" # Loop over all non-system messages
466
+ "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}"
467
+ "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
468
+ "{% endif %}"
469
+ "{% if loop.index0 == 0 and system_message != false %}" # Embed system message in first message
470
+ "{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}"
471
+ "{% else %}"
472
+ "{% set content = message['content'] %}"
473
+ "{% endif %}"
474
+ "{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way
475
+ "{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}"
476
+ "{% elif message['role'] == 'system' %}"
477
+ "{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}"
478
+ "{% elif message['role'] == 'assistant' %}"
479
+ "{{ ' ' + content.strip() + ' ' + eos_token }}"
480
+ "{% endif %}"
481
+ "{% endfor %}"
482
+ )
483
+ template = template.replace("USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false")
484
+ default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'")
485
+ template = template.replace("DEFAULT_SYSTEM_MESSAGE", default_message)
486
+
487
+ return template