|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" XVERSE model configuration""" |
|
|
|
from transformers.configuration_utils import PretrainedConfig |
|
from transformers.utils import logging |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
XVERSE_PRETRAINED_CONFIG_ARCHIVE_MAP = {} |
|
|
|
|
|
class XverseConfig(PretrainedConfig): |
|
r""" |
|
This is the configuration class to store the configuration of a [`XverseModel`]. It is used to instantiate an Xverse |
|
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the |
|
defaults will yield a similar configuration to that of the XVERSE-13B. |
|
|
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
|
documentation from [`PretrainedConfig`] for more information. |
|
|
|
|
|
Args: |
|
vocab_size (`int`, *optional*, defaults to 100278): |
|
Vocabulary size of the XVERSE model. Defines the number of different tokens that can be represented by the |
|
`inputs_ids` passed when calling [`XverseModel`] |
|
hidden_size (`int`, *optional*, defaults to 5120): |
|
Dimension of the hidden representations. |
|
intermediate_size (`int`, *optional*, defaults to 13824): |
|
Dimension of the MLP representations. |
|
num_hidden_layers (`int`, *optional*, defaults to 40): |
|
Number of hidden layers in the Transformer encoder. |
|
num_attention_heads (`int`, *optional*, defaults to 40): |
|
Number of attention heads for each attention layer in the Transformer encoder. |
|
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): |
|
The non-linear activation function (function or string) in the decoder. |
|
max_position_embeddings (`int`, *optional*, defaults to 8192): |
|
The maximum sequence length that this model might ever be used with. Typically set this to something large |
|
just in case (e.g., 512 or 1024 or 2048). |
|
initializer_range (`float`, *optional*, defaults to 0.02): |
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
|
rms_norm_eps (`float`, *optional*, defaults to 1e-6): |
|
The epsilon used by the rms normalization layers. |
|
use_cache (`bool`, *optional*, defaults to `True`): |
|
Whether or not the model should return the last key/values attentions (not used by all models). Only |
|
relevant if `config.is_decoder=True`. |
|
tie_word_embeddings(`bool`, *optional*, defaults to `False`): |
|
Whether to tie weight embeddings |
|
|
|
Example: |
|
|
|
```python |
|
>>> from transformers import XverseModel, XverseConfig |
|
|
|
>>> # Initializing a Xverse XVERSE-13B style configuration |
|
>>> configuration = XverseConfig() |
|
|
|
>>> # Initializing a model from the XVERSE-13B style configuration |
|
>>> model = XverseModel(configuration) |
|
|
|
>>> # Accessing the model configuration |
|
>>> configuration = model.config |
|
```""" |
|
model_type = "xverse" |
|
keys_to_ignore_at_inference = ["past_key_values"] |
|
|
|
def __init__( |
|
self, |
|
vocab_size=100534, |
|
hidden_size=8192, |
|
intermediate_size=22016, |
|
num_hidden_layers=80, |
|
num_attention_heads=64, |
|
hidden_act="silu", |
|
max_position_embeddings=16384, |
|
max_tokenizer_truncation=16384, |
|
initializer_range=0.02, |
|
rms_norm_eps=1e-6, |
|
use_cache=True, |
|
pad_token_id=None, |
|
bos_token_id=1, |
|
eos_token_id=2, |
|
tie_word_embeddings=False, |
|
**kwargs, |
|
): |
|
self.vocab_size = vocab_size |
|
self.max_position_embeddings = max_position_embeddings |
|
self.hidden_size = hidden_size |
|
self.intermediate_size = intermediate_size |
|
self.num_hidden_layers = num_hidden_layers |
|
self.num_attention_heads = num_attention_heads |
|
|
|
self.hidden_act = hidden_act |
|
self.initializer_range = initializer_range |
|
self.rms_norm_eps = rms_norm_eps |
|
self.use_cache = use_cache |
|
self.max_tokenizer_truncation = max_tokenizer_truncation |
|
|
|
super().__init__( |
|
pad_token_id=pad_token_id, |
|
bos_token_id=bos_token_id, |
|
eos_token_id=eos_token_id, |
|
tie_word_embeddings=tie_word_embeddings, |
|
**kwargs, |
|
) |
|
|