PEFT
natnitaract commited on
Commit
4f71e0f
1 Parent(s): 667de24

Uploading model to Hugging Face Hub.

Browse files
Files changed (3) hide show
  1. README.md +39 -0
  2. adapter_config.json +6 -6
  3. adapter_model.bin +1 -1
README.md CHANGED
@@ -124,6 +124,42 @@ The following `bitsandbytes` quantization config was used during training:
124
  - bnb_4bit_use_double_quant: False
125
  - bnb_4bit_compute_dtype: float32
126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  The following `bitsandbytes` quantization config was used during training:
128
  - quant_method: bitsandbytes
129
  - load_in_8bit: False
@@ -147,5 +183,8 @@ The following `bitsandbytes` quantization config was used during training:
147
  - PEFT 0.5.0
148
  - PEFT 0.5.0
149
  - PEFT 0.5.0
 
 
 
150
 
151
  - PEFT 0.5.0
 
124
  - bnb_4bit_use_double_quant: False
125
  - bnb_4bit_compute_dtype: float32
126
 
127
+ The following `bitsandbytes` quantization config was used during training:
128
+ - quant_method: bitsandbytes
129
+ - load_in_8bit: False
130
+ - load_in_4bit: True
131
+ - llm_int8_threshold: 6.0
132
+ - llm_int8_skip_modules: None
133
+ - llm_int8_enable_fp32_cpu_offload: False
134
+ - llm_int8_has_fp16_weight: False
135
+ - bnb_4bit_quant_type: nf4
136
+ - bnb_4bit_use_double_quant: False
137
+ - bnb_4bit_compute_dtype: float32
138
+
139
+ The following `bitsandbytes` quantization config was used during training:
140
+ - quant_method: bitsandbytes
141
+ - load_in_8bit: False
142
+ - load_in_4bit: True
143
+ - llm_int8_threshold: 6.0
144
+ - llm_int8_skip_modules: None
145
+ - llm_int8_enable_fp32_cpu_offload: False
146
+ - llm_int8_has_fp16_weight: False
147
+ - bnb_4bit_quant_type: nf4
148
+ - bnb_4bit_use_double_quant: False
149
+ - bnb_4bit_compute_dtype: float32
150
+
151
+ The following `bitsandbytes` quantization config was used during training:
152
+ - quant_method: bitsandbytes
153
+ - load_in_8bit: False
154
+ - load_in_4bit: True
155
+ - llm_int8_threshold: 6.0
156
+ - llm_int8_skip_modules: None
157
+ - llm_int8_enable_fp32_cpu_offload: False
158
+ - llm_int8_has_fp16_weight: False
159
+ - bnb_4bit_quant_type: nf4
160
+ - bnb_4bit_use_double_quant: False
161
+ - bnb_4bit_compute_dtype: float32
162
+
163
  The following `bitsandbytes` quantization config was used during training:
164
  - quant_method: bitsandbytes
165
  - load_in_8bit: False
 
183
  - PEFT 0.5.0
184
  - PEFT 0.5.0
185
  - PEFT 0.5.0
186
+ - PEFT 0.5.0
187
+ - PEFT 0.5.0
188
+ - PEFT 0.5.0
189
 
190
  - PEFT 0.5.0
adapter_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "mistralai/Mistral-7B-Instruct-v0.1",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
@@ -14,13 +14,13 @@
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
- "k_proj",
18
- "q_proj",
19
- "v_proj",
20
- "gate_proj",
21
  "down_proj",
22
  "o_proj",
23
- "up_proj"
 
 
 
 
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
1
  {
2
  "auto_mapping": null,
3
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
 
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
 
 
 
 
17
  "down_proj",
18
  "o_proj",
19
+ "gate_proj",
20
+ "v_proj",
21
+ "up_proj",
22
+ "k_proj",
23
+ "q_proj"
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c08924efae3ed321fababeb3cb46e017c15f3de6e7a25c291141d49687fbbc6
3
  size 671250189
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:606fdcafd079e58fc594ac04c423c8d14b78549114267ab3affdc41488deae20
3
  size 671250189