automerger commited on
Commit
4aca482
1 Parent(s): f3ca0a0

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -6,28 +6,34 @@ tags:
6
  - lazymergekit
7
  - automerger
8
  base_model:
 
9
  - yam-peleg/Experiment28-7B
10
  ---
11
 
12
  # Experiment26Experiment28-7B
13
 
14
  Experiment26Experiment28-7B is an automated merge created by [Maxime Labonne](https://huggingface.co/mlabonne) using the following configuration.
 
15
  * [yam-peleg/Experiment28-7B](https://huggingface.co/yam-peleg/Experiment28-7B)
16
 
17
  ## 🧩 Configuration
18
 
19
  ```yaml
20
- models:
21
- - model: rwitz/experiment26-truthy-iter-0
22
- # No parameters necessary for base model
23
- - model: yam-peleg/Experiment28-7B
24
- parameters:
25
- density: 0.53
26
- weight: 0.6
27
- merge_method: dare_ties
28
- base_model: rwitz/experiment26-truthy-iter-0
29
  parameters:
30
- int8_mask: true
 
 
 
 
 
31
  dtype: bfloat16
32
  random_seed: 0
33
  ```
 
6
  - lazymergekit
7
  - automerger
8
  base_model:
9
+ - yam-peleg/Experiment26-7B
10
  - yam-peleg/Experiment28-7B
11
  ---
12
 
13
  # Experiment26Experiment28-7B
14
 
15
  Experiment26Experiment28-7B is an automated merge created by [Maxime Labonne](https://huggingface.co/mlabonne) using the following configuration.
16
+ * [yam-peleg/Experiment26-7B](https://huggingface.co/yam-peleg/Experiment26-7B)
17
  * [yam-peleg/Experiment28-7B](https://huggingface.co/yam-peleg/Experiment28-7B)
18
 
19
  ## 🧩 Configuration
20
 
21
  ```yaml
22
+ slices:
23
+ - sources:
24
+ - model: yam-peleg/Experiment26-7B
25
+ layer_range: [0, 32]
26
+ - model: yam-peleg/Experiment28-7B
27
+ layer_range: [0, 32]
28
+ merge_method: slerp
29
+ base_model: yam-peleg/Experiment26-7B
 
30
  parameters:
31
+ t:
32
+ - filter: self_attn
33
+ value: [0, 0.5, 0.3, 0.7, 1]
34
+ - filter: mlp
35
+ value: [1, 0.5, 0.7, 0.3, 0]
36
+ - value: 0.5
37
  dtype: bfloat16
38
  random_seed: 0
39
  ```
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "rwitz/experiment26-truthy-iter-0",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.38.2",
24
  "use_cache": true,
25
  "vocab_size": 32000
26
  }
 
1
  {
2
+ "_name_or_path": "yam-peleg/Experiment26-7B",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.39.0",
24
  "use_cache": true,
25
  "vocab_size": 32000
26
  }
mergekit_config.yml CHANGED
@@ -1,15 +1,19 @@
1
 
2
- models:
3
- - model: rwitz/experiment26-truthy-iter-0
4
- # No parameters necessary for base model
5
- - model: yam-peleg/Experiment28-7B
6
- parameters:
7
- density: 0.53
8
- weight: 0.6
9
- merge_method: dare_ties
10
- base_model: rwitz/experiment26-truthy-iter-0
11
  parameters:
12
- int8_mask: true
 
 
 
 
 
13
  dtype: bfloat16
14
  random_seed: 0
15
 
 
1
 
2
+ slices:
3
+ - sources:
4
+ - model: yam-peleg/Experiment26-7B
5
+ layer_range: [0, 32]
6
+ - model: yam-peleg/Experiment28-7B
7
+ layer_range: [0, 32]
8
+ merge_method: slerp
9
+ base_model: yam-peleg/Experiment26-7B
 
10
  parameters:
11
+ t:
12
+ - filter: self_attn
13
+ value: [0, 0.5, 0.3, 0.7, 1]
14
+ - filter: mlp
15
+ value: [1, 0.5, 0.7, 0.3, 0]
16
+ - value: 0.5
17
  dtype: bfloat16
18
  random_seed: 0
19
 
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b43aa5a9d908e87c45a6e8d0adf530f5bfb4490659cab6b0f4cec1340c8ec0f4
3
  size 9942981696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c2527ba2538f1e5f1a8593810370e6bf72f61f18976d1e7cdcf84e14a008754
3
  size 9942981696
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c241eecd76a700d76e563b52e90c89ccdd21eb9e5787416e8f7594d0031e2625
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f8fd4b147717993bcf934c946412a23fc166f42a1192c4479e8b8d688b71388
3
  size 4540516344