Fix config.json
Browse files- config.json +4 -4
config.json
CHANGED
@@ -8,9 +8,9 @@
|
|
8 |
"AutoConfig": "configuration_qwen.QWenConfig",
|
9 |
"AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel"
|
10 |
},
|
11 |
-
"bf16":
|
12 |
"emb_dropout_prob": 0.0,
|
13 |
-
"fp16":
|
14 |
"fp32": false,
|
15 |
"hidden_size": 5120,
|
16 |
"initializer_range": 0.02,
|
@@ -60,7 +60,7 @@
|
|
60 |
"use_cache_kernel": false,
|
61 |
"use_cache_quantization": false,
|
62 |
"use_dynamic_ntk": true,
|
63 |
-
"use_flash_attn":
|
64 |
"use_logn_attn": true,
|
65 |
"vocab_size": 152064
|
66 |
-
}
|
|
|
8 |
"AutoConfig": "configuration_qwen.QWenConfig",
|
9 |
"AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel"
|
10 |
},
|
11 |
+
"bf16": false,
|
12 |
"emb_dropout_prob": 0.0,
|
13 |
+
"fp16": true,
|
14 |
"fp32": false,
|
15 |
"hidden_size": 5120,
|
16 |
"initializer_range": 0.02,
|
|
|
60 |
"use_cache_kernel": false,
|
61 |
"use_cache_quantization": false,
|
62 |
"use_dynamic_ntk": true,
|
63 |
+
"use_flash_attn": "auto",
|
64 |
"use_logn_attn": true,
|
65 |
"vocab_size": 152064
|
66 |
+
}
|