jeiku commited on
Commit
f825d6f
1 Parent(s): e2a3a4d

Delete modeling_stablelm_epoch.py

Browse files
Files changed (1) hide show
  1. modeling_stablelm_epoch.py +0 -918
modeling_stablelm_epoch.py DELETED
@@ -1,918 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 Stability AI, EleutherAI, and The HuggingFace Inc. team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- #
16
- # This code is based off the following work:
17
- # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py
18
- # https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py
19
- """ PyTorch StableLM Epoch model. """
20
- from typing import Optional, Tuple, Union
21
- import math
22
- import warnings
23
-
24
- import torch
25
- import torch.nn.functional as F
26
- import torch.utils.checkpoint
27
- from torch import nn
28
- from torch.nn import CrossEntropyLoss
29
-
30
- import transformers
31
-
32
- from transformers.cache_utils import Cache
33
- from transformers.modeling_outputs import (
34
- BaseModelOutputWithPast,
35
- CausalLMOutputWithPast,
36
- )
37
- from transformers.modeling_utils import PreTrainedModel
38
- from transformers.utils import logging, is_flash_attn_greater_or_equal_2_10
39
-
40
- from .configuration_stablelm_epoch import StableLMEpochConfig
41
-
42
- try:
43
- from flash_attn import flash_attn_func, flash_attn_varlen_func
44
- from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
45
- except:
46
- flash_attn_func, flash_attn_varlen_func = None, None
47
- index_first_axis, pad_input, unpad_input = None, None, None
48
-
49
-
50
- logger = logging.get_logger(__name__)
51
-
52
-
53
- # Copied from transformers.models.llama.modeling_llama._get_unpad_data
54
- def _get_unpad_data(attention_mask):
55
- seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
56
- indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
57
- max_seqlen_in_batch = seqlens_in_batch.max().item()
58
- cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
59
- return (
60
- indices,
61
- cu_seqlens,
62
- max_seqlen_in_batch,
63
- )
64
-
65
-
66
- # Copied from transformers.models.bart.modeling_bart._make_causal_mask
67
- def _make_causal_mask(
68
- input_ids_shape: torch.Size,
69
- dtype: torch.dtype,
70
- device: torch.device,
71
- past_key_values_length: int = 0,
72
- ):
73
- """Make causal mask used for bi-directional self-attention."""
74
- batch_size, tgt_len = input_ids_shape
75
- mask = torch.full((tgt_len, tgt_len), torch.finfo(torch.float16).min, device=device)
76
- mask_cond = torch.arange(mask.size(-1), device=device)
77
- mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
78
- mask = mask.to(dtype)
79
- if past_key_values_length > 0:
80
- mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
81
- return mask[None, None, :, :].expand(batch_size, 1, tgt_len, tgt_len + past_key_values_length)
82
-
83
-
84
- # Copied from transformers.models.bart.modeling_bart._expand_mask
85
- def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
86
- """Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, tgt_seq_len, src_seq_len]`."""
87
- batch_size, src_len = mask.size()
88
- tgt_len = tgt_len if tgt_len is not None else src_len
89
-
90
- expanded_mask = mask[:, None, None, :].expand(batch_size, 1, tgt_len, src_len).to(dtype)
91
- inverted_mask = 1.0 - expanded_mask
92
-
93
- return inverted_mask.masked_fill(
94
- inverted_mask.to(torch.bool), torch.finfo(dtype).min
95
- )
96
-
97
-
98
- class RotaryEmbedding(nn.Module):
99
- def __init__(
100
- self,
101
- dim: int,
102
- max_position_embeddings: int,
103
- base: int = 10_000,
104
- device: Optional[torch.device] = None,
105
- ):
106
- super().__init__()
107
-
108
- self.dim = dim
109
- self.max_position_embeddings = max_position_embeddings
110
- self.base = base
111
- inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim))
112
- self.register_buffer("inv_freq", inv_freq, persistent=False)
113
-
114
- # Build here to make `torch.jit.trace` work.
115
- self._set_cos_sin_cache(
116
- seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype(),
117
- )
118
-
119
- def _set_cos_sin_cache(self, seq_len: int, device: torch.device, dtype: torch.dtype):
120
- self.max_seq_len_cached = seq_len
121
- t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.float32)
122
-
123
- # Don't do einsum, it converts fp32 to fp16 under AMP
124
- # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
125
- freqs = torch.outer(t, self.inv_freq)
126
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
127
- emb = torch.cat((freqs, freqs), dim=-1)
128
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
129
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
130
-
131
- def forward(self, x: torch.Tensor, seq_len: Optional[int] = None):
132
- # x: [batch_size, num_heads, seq_len, head_size]
133
- if seq_len > self.max_seq_len_cached:
134
- self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.get_default_dtype())
135
- return (
136
- self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
137
- self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
138
- )
139
-
140
-
141
- def rotate_half(x: torch.Tensor):
142
- """Rotates half the hidden dims of the input."""
143
- x1, x2 = torch.chunk(x, 2, dim=-1)
144
- return torch.cat((-x2, x1), dim=-1)
145
-
146
-
147
- def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
148
- # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
149
- cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
150
- sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
151
- cos = cos[position_ids].unsqueeze(1) # [batch_size, 1, seq_len, dim]
152
- sin = sin[position_ids].unsqueeze(1) # [batch_size, 1, seq_len, dim]
153
- q_embed = (q * cos) + (rotate_half(q) * sin)
154
- k_embed = (k * cos) + (rotate_half(k) * sin)
155
- return q_embed, k_embed
156
-
157
-
158
- class MLP(nn.Module):
159
- def __init__(self, config: StableLMEpochConfig):
160
- super().__init__()
161
- self.config = config
162
- self.hidden_size = config.hidden_size
163
- self.intermediate_size = config.intermediate_size
164
- self.gate_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
165
- self.up_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
166
- self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
167
- self.act_fn = nn.SiLU()
168
-
169
- def forward(self, x: torch.Tensor) -> torch.Tensor:
170
- return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
171
-
172
-
173
- def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
174
- """
175
- This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
176
- num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
177
- """
178
- batch, num_key_value_heads, slen, head_dim = hidden_states.shape
179
- if n_rep == 1:
180
- return hidden_states
181
- hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
182
- return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
183
-
184
-
185
- class Attention(nn.Module):
186
- def __init__(self, config: StableLMEpochConfig):
187
- super().__init__()
188
- self.config = config
189
- self.hidden_size = config.hidden_size
190
- self.num_heads = config.num_attention_heads
191
- self.head_dim = self.hidden_size // self.num_heads
192
- self.num_key_value_heads = config.num_key_value_heads
193
- self.num_key_value_groups = self.num_heads // self.num_key_value_heads
194
- self.max_position_embeddings = config.max_position_embeddings
195
- self.is_causal = True
196
-
197
- if (self.head_dim * self.num_heads) != self.hidden_size:
198
- raise ValueError(
199
- f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
200
- f" and `num_heads`: {self.num_heads})."
201
- )
202
- self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
203
- self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
204
- self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
205
- self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
206
-
207
- self._init_rope()
208
-
209
- def _init_rope(self):
210
- self.rotary_ndims = int(self.head_dim * self.config.rope_pct)
211
- self.rotary_emb = RotaryEmbedding(
212
- self.rotary_ndims,
213
- max_position_embeddings=self.config.max_position_embeddings,
214
- base=self.config.rope_theta,
215
- )
216
-
217
- def forward(
218
- self,
219
- hidden_states: torch.FloatTensor,
220
- attention_mask: torch.FloatTensor,
221
- position_ids: torch.LongTensor,
222
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
223
- output_attentions: Optional[bool] = False,
224
- use_cache: Optional[bool] = False,
225
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
226
- bsz, q_len, _ = hidden_states.size()
227
-
228
- query_states = self.q_proj(hidden_states)
229
- key_states = self.k_proj(hidden_states)
230
- value_states = self.v_proj(hidden_states)
231
-
232
- query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
233
- key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
234
- value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
235
-
236
- query_rot = query_states[..., : self.rotary_ndims]
237
- query_pass = query_states[..., self.rotary_ndims :]
238
- key_rot = key_states[..., : self.rotary_ndims]
239
- key_pass = key_states[..., self.rotary_ndims :]
240
-
241
- kv_seq_len = key_states.shape[-2]
242
- if past_key_value is not None:
243
- kv_seq_len += past_key_value[0].shape[-2]
244
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
245
- query_states, key_states = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
246
-
247
- # [batch_size, num_heads, seq_len, head_dim]
248
- query_states = torch.cat((query_states, query_pass), dim=-1)
249
- key_states = torch.cat((key_states, key_pass), dim=-1)
250
-
251
- if past_key_value is not None:
252
- # Reuse k, v, self_attention
253
- key_states = torch.cat((past_key_value[0], key_states), dim=2)
254
- value_states = torch.cat((past_key_value[1], value_states), dim=2)
255
-
256
- past_key_value = (key_states, value_states) if use_cache else None
257
-
258
- # Repeat k/v heads if n_kv_heads < n_heads
259
- key_states = repeat_kv(key_states, self.num_key_value_groups)
260
- value_states = repeat_kv(value_states, self.num_key_value_groups)
261
-
262
- attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
263
-
264
- if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
265
- raise ValueError(
266
- f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
267
- f" {attn_weights.size()}"
268
- )
269
-
270
- if attention_mask is not None:
271
- if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
272
- raise ValueError(
273
- f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
274
- )
275
- attn_weights = attn_weights + attention_mask
276
-
277
- # Upcast attention to fp32
278
- attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
279
- attn_output = torch.matmul(attn_weights, value_states)
280
-
281
- if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
282
- raise ValueError(
283
- f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
284
- f" {attn_output.size()}"
285
- )
286
-
287
- # Merge heads
288
- attn_output = attn_output.transpose(1, 2).contiguous()
289
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
290
-
291
- # Final linear projection
292
- attn_output = self.o_proj(attn_output)
293
-
294
- if not output_attentions:
295
- attn_weights = None
296
-
297
- return attn_output, attn_weights, past_key_value
298
-
299
-
300
- class FlashAttention2(Attention):
301
- """
302
- Reference: https://github.com/huggingface/transformers/blob/5d36025ca13d05151b7a0c761e90d429c4644a30/src/transformers/models/llama/modeling_llama.py#L456
303
- """
304
-
305
- def __init__(self, *args, **kwargs):
306
- super().__init__(*args, **kwargs)
307
-
308
- # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
309
- # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
310
- # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
311
- self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
312
-
313
- def forward(
314
- self,
315
- hidden_states: torch.Tensor,
316
- attention_mask: Optional[torch.LongTensor] = None,
317
- position_ids: Optional[torch.LongTensor] = None,
318
- past_key_value: Optional[Cache] = None,
319
- output_attentions: bool = False,
320
- use_cache: bool = False,
321
- **kwargs,
322
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
323
- # FlashAttention2 attention does not support output_attentions
324
- if "padding_mask" in kwargs:
325
- warnings.warn(
326
- "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
327
- )
328
-
329
- # overwrite attention_mask with padding_mask
330
- attention_mask = kwargs.pop("padding_mask")
331
-
332
- output_attentions = False
333
-
334
- bsz, q_len, _ = hidden_states.size()
335
-
336
- query_states = self.q_proj(hidden_states)
337
- key_states = self.k_proj(hidden_states)
338
- value_states = self.v_proj(hidden_states)
339
-
340
- # Flash attention requires the input to have the shape
341
- # batch_size x seq_length x head_dim x hidden_dim
342
- # therefore we just need to keep the original shape
343
- query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
344
- key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
345
- value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
346
-
347
- query_rot = query_states[..., : self.rotary_ndims]
348
- query_pass = query_states[..., self.rotary_ndims :]
349
- key_rot = key_states[..., : self.rotary_ndims]
350
- key_pass = key_states[..., self.rotary_ndims :]
351
-
352
- kv_seq_len = key_states.shape[-2]
353
- if past_key_value is not None:
354
- kv_seq_len += past_key_value[0].shape[-2]
355
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
356
- query_states, key_states = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
357
-
358
- # [batch_size, num_heads, seq_len, head_dim]
359
- query_states = torch.cat((query_states, query_pass), dim=-1)
360
- key_states = torch.cat((key_states, key_pass), dim=-1)
361
-
362
- if past_key_value is not None:
363
- # Reuse k, v, self_attention
364
- key_states = torch.cat((past_key_value[0], key_states), dim=2)
365
- value_states = torch.cat((past_key_value[1], value_states), dim=2)
366
-
367
- past_key_value = (key_states, value_states) if use_cache else None
368
-
369
- # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
370
- # to be able to avoid many of these transpose/reshape/view.
371
- query_states = query_states.transpose(1, 2)
372
- key_states = key_states.transpose(1, 2)
373
- value_states = value_states.transpose(1, 2)
374
-
375
- dropout_rate = self.attention_dropout if self.training else 0.0
376
-
377
- attn_output = self._flash_attention_forward(
378
- query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
379
- )
380
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
381
- attn_output = self.o_proj(attn_output)
382
-
383
- if not output_attentions:
384
- attn_weights = None
385
-
386
- return attn_output, attn_weights, past_key_value
387
-
388
- def _flash_attention_forward(
389
- self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
390
- ):
391
- """
392
- Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
393
- first unpad the input, then computes the attention scores and pad the final attention scores.
394
-
395
- Args:
396
- query_states (`torch.Tensor`):
397
- Input query states to be passed to Flash Attention API
398
- key_states (`torch.Tensor`):
399
- Input key states to be passed to Flash Attention API
400
- value_states (`torch.Tensor`):
401
- Input value states to be passed to Flash Attention API
402
- attention_mask (`torch.Tensor`):
403
- The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
404
- position of padding tokens and 1 for the position of non-padding tokens.
405
- dropout (`int`, *optional*):
406
- Attention dropout
407
- softmax_scale (`float`, *optional*):
408
- The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
409
- """
410
- if not self._flash_attn_uses_top_left_mask:
411
- causal = self.is_causal
412
- else:
413
- # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in FlashAttention2 __init__.
414
- causal = self.is_causal and query_length != 1
415
-
416
- # Contains at least one padding token in the sequence
417
- if attention_mask is not None:
418
- batch_size = query_states.shape[0]
419
- query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
420
- query_states, key_states, value_states, attention_mask, query_length
421
- )
422
-
423
- cu_seqlens_q, cu_seqlens_k = cu_seq_lens
424
- max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
425
-
426
- attn_output_unpad = flash_attn_varlen_func(
427
- query_states,
428
- key_states,
429
- value_states,
430
- cu_seqlens_q=cu_seqlens_q,
431
- cu_seqlens_k=cu_seqlens_k,
432
- max_seqlen_q=max_seqlen_in_batch_q,
433
- max_seqlen_k=max_seqlen_in_batch_k,
434
- dropout_p=dropout,
435
- softmax_scale=softmax_scale,
436
- causal=causal,
437
- )
438
-
439
- attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
440
- else:
441
- attn_output = flash_attn_func(
442
- query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
443
- )
444
-
445
- return attn_output
446
-
447
- def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
448
- indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
449
- batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
450
-
451
- key_layer = index_first_axis(
452
- key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
453
- )
454
- value_layer = index_first_axis(
455
- value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
456
- )
457
- if query_length == kv_seq_len:
458
- query_layer = index_first_axis(
459
- query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
460
- )
461
- cu_seqlens_q = cu_seqlens_k
462
- max_seqlen_in_batch_q = max_seqlen_in_batch_k
463
- indices_q = indices_k
464
- elif query_length == 1:
465
- max_seqlen_in_batch_q = 1
466
- cu_seqlens_q = torch.arange(
467
- batch_size + 1, dtype=torch.int32, device=query_layer.device
468
- ) # There is a memcpy here, that is very bad.
469
- indices_q = cu_seqlens_q[:-1]
470
- query_layer = query_layer.squeeze(1)
471
- else:
472
- # The -q_len: slice assumes left padding.
473
- attention_mask = attention_mask[:, -query_length:]
474
- query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
475
-
476
- return (
477
- query_layer,
478
- key_layer,
479
- value_layer,
480
- indices_q,
481
- (cu_seqlens_q, cu_seqlens_k),
482
- (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
483
- )
484
-
485
-
486
- ATTENTION_CLASSES = {
487
- "eager": Attention,
488
- "flash_attention_2": FlashAttention2,
489
- }
490
-
491
-
492
- class DecoderLayer(nn.Module):
493
- def __init__(self, config: StableLMEpochConfig):
494
- super().__init__()
495
- self.self_attn = ATTENTION_CLASSES[config._attn_implementation](config=config)
496
- self.mlp = MLP(config)
497
- self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps)
498
- self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps)
499
-
500
- def forward(
501
- self,
502
- hidden_states: Optional[torch.FloatTensor],
503
- attention_mask: Optional[torch.FloatTensor] = None,
504
- position_ids: Optional[torch.LongTensor] = None,
505
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
506
- output_attentions: Optional[bool] = False,
507
- use_cache: Optional[bool] = False,
508
- ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
509
- residual = hidden_states
510
-
511
- hidden_states = self.input_layernorm(hidden_states)
512
-
513
- # Self Attention
514
- hidden_states, self_attn_weights, present_key_value = self.self_attn(
515
- hidden_states=hidden_states,
516
- attention_mask=attention_mask,
517
- position_ids=position_ids,
518
- past_key_value=past_key_value,
519
- output_attentions=output_attentions,
520
- use_cache=use_cache,
521
- )
522
- hidden_states = residual + hidden_states
523
-
524
- # Fully Connected
525
- residual = hidden_states
526
- hidden_states = self.post_attention_layernorm(hidden_states)
527
- hidden_states = self.mlp(hidden_states)
528
- hidden_states = residual + hidden_states
529
-
530
- outputs = (hidden_states,)
531
-
532
- if output_attentions:
533
- outputs += (self_attn_weights,)
534
-
535
- if use_cache:
536
- outputs += (present_key_value,)
537
-
538
- return outputs
539
-
540
-
541
- class StableLMEpochPreTrainedModel(PreTrainedModel):
542
- """An abstract class to handle weights initialization and a simple interface
543
- for downloading and loading pretrained models.
544
- """
545
-
546
- config_class = StableLMEpochConfig
547
- base_model_prefix = "transformer"
548
- supports_gradient_checkpointing = True
549
- _no_split_modules = ["DecoderLayer"]
550
- _skip_keys_device_placement = "past_key_values"
551
- _supports_flash_attn_2 = True
552
-
553
- def _init_weights(self, module: nn.Module):
554
- """Initialize the weights"""
555
- if isinstance(module, nn.Linear):
556
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
557
- if module.bias is not None:
558
- module.bias.data.zero_()
559
- elif isinstance(module, nn.Embedding):
560
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
561
- if module.padding_idx is not None:
562
- module.weight.data[module.padding_idx].zero_()
563
- elif isinstance(module, nn.LayerNorm):
564
- module.bias.data.zero_()
565
- module.weight.data.fill_(1.0)
566
-
567
- def _set_gradient_checkpointing(self, module: nn.Module, value=False):
568
- if isinstance(module, StableLMEpochModel):
569
- module.gradient_checkpointing = value
570
-
571
-
572
- class StableLMEpochModel(StableLMEpochPreTrainedModel):
573
- def __init__(self, config: StableLMEpochConfig):
574
- super().__init__(config)
575
- self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
576
- self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.num_hidden_layers)])
577
- self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps)
578
-
579
- self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
580
- self.gradient_checkpointing = False
581
- # Initialize weights and apply final processing
582
- self.post_init()
583
-
584
- def get_input_embeddings(self):
585
- return self.embed_tokens
586
-
587
- def set_input_embeddings(self, value: nn.Module):
588
- self.embed_tokens = value
589
-
590
- # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
591
- def _prepare_decoder_attention_mask(
592
- self,
593
- attention_mask: torch.Tensor,
594
- input_shape: torch.Size,
595
- inputs_embeds: torch.Tensor,
596
- past_key_values_length: int,
597
- ):
598
- # Create causal mask
599
- # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
600
- combined_attention_mask = None
601
- if input_shape[-1] > 1:
602
- combined_attention_mask = _make_causal_mask(
603
- input_shape,
604
- inputs_embeds.dtype,
605
- device=inputs_embeds.device,
606
- past_key_values_length=past_key_values_length,
607
- )
608
-
609
- if attention_mask is not None:
610
- # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
611
- expanded_attn_mask = _expand_mask(
612
- attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
613
- ).to(inputs_embeds.device)
614
- combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
615
-
616
- return combined_attention_mask
617
-
618
- def forward(
619
- self,
620
- input_ids: Optional[torch.LongTensor] = None,
621
- attention_mask: Optional[torch.FloatTensor] = None,
622
- position_ids: Optional[torch.LongTensor] = None,
623
- past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
624
- inputs_embeds: Optional[torch.FloatTensor] = None,
625
- use_cache: Optional[bool] = None,
626
- output_attentions: Optional[bool] = None,
627
- output_hidden_states: Optional[bool] = None,
628
- return_dict: Optional[bool] = None,
629
- ) -> Union[Tuple, BaseModelOutputWithPast]:
630
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
631
- output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
632
- use_cache = use_cache if use_cache is not None else self.config.use_cache
633
-
634
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
635
-
636
- # Retrieve input_ids and inputs_embeds
637
- if input_ids is not None and inputs_embeds is not None:
638
- raise ValueError(
639
- "You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
640
- )
641
- elif input_ids is not None:
642
- batch_size, seq_length = input_ids.shape
643
- elif inputs_embeds is not None:
644
- batch_size, seq_length, _ = inputs_embeds.shape
645
- else:
646
- raise ValueError(
647
- "You have to specify either decoder_input_ids or decoder_inputs_embeds"
648
- )
649
-
650
- seq_length_with_past = seq_length
651
- past_key_values_length = 0
652
-
653
- if position_ids is None:
654
- device = input_ids.device if input_ids is not None else inputs_embeds.device
655
- position_ids = torch.arange(
656
- past_key_values_length,
657
- seq_length + past_key_values_length,
658
- dtype=torch.long,
659
- device=device,
660
- )
661
- position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
662
- else:
663
- position_ids = position_ids.view(-1, seq_length).long()
664
-
665
- if inputs_embeds is None:
666
- inputs_embeds = self.embed_tokens(input_ids)
667
- # Embed positions
668
- if self._use_flash_attention_2:
669
- # 2d mask is passed through the layers
670
- attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
671
- else:
672
- if attention_mask is None:
673
- attention_mask = torch.ones(
674
- (batch_size, seq_length_with_past),
675
- dtype=torch.bool,
676
- device=inputs_embeds.device,
677
- )
678
- attention_mask = self._prepare_decoder_attention_mask(
679
- attention_mask,
680
- (batch_size, seq_length),
681
- inputs_embeds,
682
- past_key_values_length,
683
- )
684
-
685
- hidden_states = inputs_embeds
686
-
687
- if self.gradient_checkpointing and self.training:
688
- if use_cache:
689
- logger.warning(
690
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
691
- )
692
- use_cache = False
693
-
694
- # Decoder layers
695
- all_hidden_states = () if output_hidden_states else None
696
- all_self_attns = () if output_attentions else None
697
- next_decoder_cache = () if use_cache else None
698
-
699
- for idx, decoder_layer in enumerate(self.layers):
700
- if output_hidden_states:
701
- all_hidden_states += (hidden_states,)
702
-
703
- past_key_value = (
704
- past_key_values[idx] if past_key_values is not None else None
705
- )
706
-
707
- if self.gradient_checkpointing and self.training:
708
-
709
- def create_custom_forward(module):
710
- def custom_forward(*inputs):
711
- # None for past_key_value
712
- return module(*inputs, past_key_value, output_attentions)
713
-
714
- return custom_forward
715
-
716
- layer_outputs = torch.utils.checkpoint.checkpoint(
717
- create_custom_forward(decoder_layer),
718
- hidden_states,
719
- attention_mask,
720
- position_ids,
721
- )
722
- else:
723
- layer_outputs = decoder_layer(
724
- hidden_states,
725
- attention_mask=attention_mask,
726
- position_ids=position_ids,
727
- past_key_value=past_key_value,
728
- output_attentions=output_attentions,
729
- use_cache=use_cache,
730
- )
731
-
732
- hidden_states = layer_outputs[0]
733
-
734
- if use_cache:
735
- next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
736
-
737
- if output_attentions:
738
- all_self_attns += (layer_outputs[1],)
739
-
740
- hidden_states = self.norm(hidden_states)
741
-
742
- # Add hidden states from the last decoder layer
743
- if output_hidden_states:
744
- all_hidden_states += (hidden_states,)
745
-
746
- next_cache = next_decoder_cache if use_cache else None
747
- if not return_dict:
748
- return tuple(
749
- v
750
- for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
751
- if v is not None
752
- )
753
- return BaseModelOutputWithPast(
754
- last_hidden_state=hidden_states,
755
- past_key_values=next_cache,
756
- hidden_states=all_hidden_states,
757
- attentions=all_self_attns,
758
- )
759
-
760
-
761
- class StableLMEpochForCausalLM(StableLMEpochPreTrainedModel):
762
- _tied_weights_keys = ["lm_head.weight"]
763
-
764
- def __init__(self, config: StableLMEpochConfig):
765
- super().__init__(config)
766
-
767
- self.model = StableLMEpochModel(config)
768
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
769
-
770
- # Initialize weights and apply final processing
771
- self.post_init()
772
-
773
- def get_input_embeddings(self):
774
- return self.model.embed_tokens
775
-
776
- def set_input_embeddings(self, value):
777
- self.model.embed_tokens = value
778
-
779
- def get_output_embeddings(self):
780
- return self.lm_head
781
-
782
- def set_output_embeddings(self, new_embeddings: nn.Module):
783
- self.lm_head = new_embeddings
784
-
785
- def get_decoder(self):
786
- return self.model
787
-
788
- def set_decoder(self, decoder):
789
- self.model = decoder
790
-
791
- def forward(
792
- self,
793
- input_ids: Optional[torch.LongTensor] = None,
794
- attention_mask: Optional[torch.FloatTensor] = None,
795
- position_ids: Optional[torch.LongTensor] = None,
796
- past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
797
- inputs_embeds: Optional[torch.FloatTensor] = None,
798
- labels: Optional[torch.LongTensor] = None,
799
- use_cache: Optional[bool] = None,
800
- output_attentions: Optional[bool] = None,
801
- output_hidden_states: Optional[bool] = None,
802
- return_dict: Optional[bool] = None,
803
- ) -> Union[Tuple, CausalLMOutputWithPast]:
804
- output_attentions = (
805
- output_attentions
806
- if output_attentions is not None
807
- else self.config.output_attentions
808
- )
809
- output_hidden_states = (
810
- output_hidden_states
811
- if output_hidden_states is not None
812
- else self.config.output_hidden_states
813
- )
814
- return_dict = (
815
- return_dict if return_dict is not None else self.config.use_return_dict
816
- )
817
-
818
- # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
819
- outputs = self.model(
820
- input_ids,
821
- attention_mask=attention_mask,
822
- position_ids=position_ids,
823
- past_key_values=past_key_values,
824
- inputs_embeds=inputs_embeds,
825
- use_cache=use_cache,
826
- output_attentions=output_attentions,
827
- output_hidden_states=output_hidden_states,
828
- return_dict=return_dict,
829
- )
830
-
831
- hidden_states = outputs[0]
832
- logits = self.lm_head(hidden_states).float()
833
-
834
- loss = None
835
- if labels is not None:
836
- # Shift so that tokens < n predict n
837
- shift_logits = logits[..., :-1, :].contiguous()
838
- shift_labels = labels[..., 1:].contiguous()
839
- # Flatten the tokens
840
- loss_fct = CrossEntropyLoss()
841
- shift_logits = shift_logits.view(-1, self.config.vocab_size)
842
- shift_labels = shift_labels.view(-1)
843
- # Enable model parallelism
844
- shift_labels = shift_labels.to(shift_logits.device)
845
- loss = loss_fct(shift_logits, shift_labels)
846
-
847
- if not return_dict:
848
- output = (logits,) + outputs[1:]
849
- return (loss,) + output if loss is not None else output
850
-
851
- return CausalLMOutputWithPast(
852
- loss=loss,
853
- logits=logits,
854
- past_key_values=outputs.past_key_values,
855
- hidden_states=outputs.hidden_states,
856
- attentions=outputs.attentions,
857
- )
858
-
859
- def prepare_inputs_for_generation(
860
- self,
861
- input_ids,
862
- past_key_values: Optional[torch.Tensor] = None,
863
- attention_mask: Optional[torch.Tensor] = None,
864
- inputs_embeds: Optional[torch.Tensor] = None,
865
- **kwargs,
866
- ):
867
- # Trim decoder_input_ids if past is used
868
- if past_key_values is not None:
869
- past_length = past_key_values[0][0].shape[2]
870
-
871
- # Some generation methods already pass only the last input ID
872
- if input_ids.shape[1] > past_length:
873
- remove_prefix_length = past_length
874
- else:
875
- # Default to old behavior: keep only final ID
876
- remove_prefix_length = input_ids.shape[1] - 1
877
-
878
- input_ids = input_ids[:, remove_prefix_length:]
879
-
880
- position_ids = kwargs.get("position_ids", None)
881
- if attention_mask is not None and position_ids is None:
882
- # Create position_ids on the fly for batch generation
883
- position_ids = attention_mask.long().cumsum(-1) - 1
884
- position_ids.masked_fill_(attention_mask == 0, 1)
885
- if past_key_values:
886
- position_ids = position_ids[:, -1].unsqueeze(-1)
887
-
888
- # If `inputs_embeds` are passed, we only want to use them in the 1st generation step
889
- if inputs_embeds is not None and past_key_values is None:
890
- model_inputs = {"inputs_embeds": inputs_embeds}
891
- else:
892
- model_inputs = {"input_ids": input_ids}
893
-
894
- model_inputs.update(
895
- {
896
- "attention_mask": attention_mask,
897
- "past_key_values": past_key_values,
898
- "use_cache": kwargs.get("use_cache"),
899
- "position_ids": position_ids,
900
- }
901
- )
902
- return model_inputs
903
-
904
- @staticmethod
905
- def _reorder_cache(past_key_values, beam_idx):
906
- reordered_past = ()
907
- for layer_past in past_key_values:
908
- reordered_past += (
909
- tuple(
910
- past_state.index_select(0, beam_idx.to(past_state.device))
911
- for past_state in layer_past
912
- ),
913
- )
914
- return reordered_past
915
-
916
-
917
- StableLMEpochConfig.register_for_auto_class()
918
- StableLMEpochForCausalLM.register_for_auto_class("AutoModelForCausalLM")