Abhaykoul commited on
Commit
301ffed
1 Parent(s): 3f29dd1

Delete configuration_HelpingAI.py

Browse files
Files changed (1) hide show
  1. configuration_HelpingAI.py +0 -95
configuration_HelpingAI.py DELETED
@@ -1,95 +0,0 @@
1
- """ HelpingAI model configuration"""
2
-
3
- from transformers import PretrainedConfig
4
- from transformers.utils import logging
5
-
6
-
7
- logger = logging.get_logger(__name__)
8
-
9
-
10
- class HelpingAIConfig(PretrainedConfig):
11
- model_type = "HelpingAI"
12
- keys_to_ignore_at_inference = ["past_key_values"]
13
-
14
- def __init__(
15
- self,
16
- vocab_size=50281,
17
- hidden_size=2560,
18
- num_hidden_layers=32,
19
- num_attention_heads=32,
20
- head_dim=256,
21
- num_local_experts=8,
22
- num_experts_per_tok=2,
23
- intermediate_size=6912,
24
- hidden_act="silu",
25
- hidden_dropout=0.0,
26
- attention_dropout=0.0,
27
- classifier_dropout=0.1,
28
- max_position_embeddings=4096,
29
- initializer_range=0.02,
30
- rms_norm_eps=1e-6,
31
- layer_norm_eps=1e-5,
32
- use_cache=False,
33
- bos_token_id=50278,
34
- eos_token_id=50279,
35
- pad_token_id=50279,
36
- tie_word_embeddings=False,
37
- rope_pct=0.25,
38
- rope_theta=10000,
39
- partial_rotary_factor=0.25,
40
- use_qkv_bias=False,
41
- output_router_logits=False,
42
- router_aux_loss_coef=0.02,
43
- **kwargs,
44
- ):
45
- super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
46
- self.vocab_size = vocab_size
47
- self.max_position_embeddings = max_position_embeddings
48
- self.hidden_size = hidden_size
49
- self.num_hidden_layers = num_hidden_layers
50
- self.num_attention_heads = num_attention_heads
51
- self.head_dim = head_dim
52
- self.num_local_experts = num_local_experts
53
- self.num_experts_per_tok = num_experts_per_tok
54
- self.intermediate_size = intermediate_size
55
- self.hidden_act = hidden_act
56
- self.hidden_dropout = hidden_dropout
57
- self.attention_dropout = attention_dropout
58
- self.classifier_dropout = classifier_dropout
59
- self.initializer_range = initializer_range
60
- self.rms_norm_eps = rms_norm_eps
61
- self.layer_norm_eps = layer_norm_eps
62
- self.use_cache = use_cache
63
- self.tie_word_embeddings = tie_word_embeddings
64
- self.rope_pct = rope_pct
65
- self.rope_theta = rope_theta
66
- self.partial_rotary_factor = partial_rotary_factor
67
- self.use_qkv_bias = use_qkv_bias
68
- self.output_router_logits = output_router_logits
69
- self.router_aux_loss_coef = router_aux_loss_coef
70
-
71
- if self.hidden_size % self.num_attention_heads != 0:
72
- raise ValueError(
73
- "The hidden size is not divisble by the number of attention heads! Make sure to update them!"
74
- )
75
-
76
- # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
77
- def _rope_scaling_validation(self):
78
- """
79
- Validate the `rope_scaling` configuration.
80
- """
81
- if self.rope_scaling is None:
82
- return
83
-
84
- if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
85
- raise ValueError(
86
- "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
87
- )
88
- rope_scaling_type = self.rope_scaling.get("type", None)
89
- rope_scaling_factor = self.rope_scaling.get("factor", None)
90
- if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
91
- raise ValueError(
92
- f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
93
- )
94
- if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
95
- raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")