RanchiZhao
commited on
Commit
•
fd8ff8f
1
Parent(s):
d6ca5a3
Update configuration_minicpm.py
Browse files- configuration_minicpm.py +1 -30
configuration_minicpm.py
CHANGED
@@ -33,11 +33,8 @@ class MiniCPM3Config(PretrainedConfig):
|
|
33 |
This is the configuration class to store the configuration of a [`MiniCPMModel`]. It is used to instantiate an MiniCPM
|
34 |
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
35 |
defaults will yield a similar configuration to that of the MiniCPM-7B.
|
36 |
-
|
37 |
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
38 |
documentation from [`PretrainedConfig`] for more information.
|
39 |
-
|
40 |
-
|
41 |
Args:
|
42 |
vocab_size (`int`, *optional*, defaults to 32000):
|
43 |
Vocabulary size of the MiniCPM model. Defines the number of different tokens that can be represented by the
|
@@ -97,16 +94,12 @@ class MiniCPM3Config(PretrainedConfig):
|
|
97 |
Whether to use a bias in the query, key, value and output projection layers during self-attention.
|
98 |
attention_dropout (`float`, *optional*, defaults to 0.0):
|
99 |
The dropout ratio for the attention probabilities.
|
100 |
-
|
101 |
```python
|
102 |
>>> from transformers import MiniCPMModel, MiniCPMConfig
|
103 |
-
|
104 |
>>> # Initializing a MiniCPM minicpm-7b style configuration
|
105 |
>>> configuration = MiniCPMConfig()
|
106 |
-
|
107 |
>>> # Initializing a model from the minicpm-7b style configuration
|
108 |
>>> model = MiniCPMModel(configuration)
|
109 |
-
|
110 |
>>> # Accessing the model configuration
|
111 |
>>> configuration = model.config
|
112 |
```"""
|
@@ -174,7 +167,6 @@ class MiniCPM3Config(PretrainedConfig):
|
|
174 |
self.use_cache = use_cache
|
175 |
self.rope_theta = rope_theta
|
176 |
self.rope_scaling = rope_scaling
|
177 |
-
self._rope_scaling_validation()
|
178 |
self.attention_bias = attention_bias
|
179 |
self.attention_dropout = attention_dropout
|
180 |
self.scale_emb = scale_emb
|
@@ -193,25 +185,4 @@ class MiniCPM3Config(PretrainedConfig):
|
|
193 |
import flash_attn
|
194 |
self._attn_implementation = "flash_attention_2"
|
195 |
except:
|
196 |
-
pass
|
197 |
-
|
198 |
-
def _rope_scaling_validation(self):
|
199 |
-
"""
|
200 |
-
Validate the `rope_scaling` configuration.
|
201 |
-
"""
|
202 |
-
if self.rope_scaling is None:
|
203 |
-
return
|
204 |
-
|
205 |
-
# if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
|
206 |
-
# raise ValueError(
|
207 |
-
# "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
|
208 |
-
# f"got {self.rope_scaling}"
|
209 |
-
# )
|
210 |
-
# rope_scaling_type = self.rope_scaling.get("type", None)
|
211 |
-
# rope_scaling_factor = self.rope_scaling.get("factor", None)
|
212 |
-
# if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
|
213 |
-
# raise ValueError(
|
214 |
-
# f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
|
215 |
-
# )
|
216 |
-
# if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
|
217 |
-
# raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
|
|
|
33 |
This is the configuration class to store the configuration of a [`MiniCPMModel`]. It is used to instantiate an MiniCPM
|
34 |
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
35 |
defaults will yield a similar configuration to that of the MiniCPM-7B.
|
|
|
36 |
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
37 |
documentation from [`PretrainedConfig`] for more information.
|
|
|
|
|
38 |
Args:
|
39 |
vocab_size (`int`, *optional*, defaults to 32000):
|
40 |
Vocabulary size of the MiniCPM model. Defines the number of different tokens that can be represented by the
|
|
|
94 |
Whether to use a bias in the query, key, value and output projection layers during self-attention.
|
95 |
attention_dropout (`float`, *optional*, defaults to 0.0):
|
96 |
The dropout ratio for the attention probabilities.
|
|
|
97 |
```python
|
98 |
>>> from transformers import MiniCPMModel, MiniCPMConfig
|
|
|
99 |
>>> # Initializing a MiniCPM minicpm-7b style configuration
|
100 |
>>> configuration = MiniCPMConfig()
|
|
|
101 |
>>> # Initializing a model from the minicpm-7b style configuration
|
102 |
>>> model = MiniCPMModel(configuration)
|
|
|
103 |
>>> # Accessing the model configuration
|
104 |
>>> configuration = model.config
|
105 |
```"""
|
|
|
167 |
self.use_cache = use_cache
|
168 |
self.rope_theta = rope_theta
|
169 |
self.rope_scaling = rope_scaling
|
|
|
170 |
self.attention_bias = attention_bias
|
171 |
self.attention_dropout = attention_dropout
|
172 |
self.scale_emb = scale_emb
|
|
|
185 |
import flash_attn
|
186 |
self._attn_implementation = "flash_attention_2"
|
187 |
except:
|
188 |
+
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|