marksverdhei
commited on
Commit
•
07fad64
1
Parent(s):
7baec41
Add quantization config to config.json
Browse files- config.json +13 -2
config.json
CHANGED
@@ -22,5 +22,16 @@
|
|
22 |
"torch_dtype": "float16",
|
23 |
"transformers_version": "4.31.0",
|
24 |
"use_cache": true,
|
25 |
-
"vocab_size": 32000
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
"torch_dtype": "float16",
|
23 |
"transformers_version": "4.31.0",
|
24 |
"use_cache": true,
|
25 |
+
"vocab_size": 32000,
|
26 |
+
"quantization_config": {
|
27 |
+
"bits": 4,
|
28 |
+
"group_size": 128,
|
29 |
+
"damp_percent": 0.01,
|
30 |
+
"desc_act": false,
|
31 |
+
"sym": true,
|
32 |
+
"true_sequential": true,
|
33 |
+
"model_name_or_path": null,
|
34 |
+
"model_file_base_name": "model",
|
35 |
+
"quant_method": "gptq"
|
36 |
+
}
|
37 |
+
}
|