HuggingFaceTB/SmolLM-360M-Instruct
Browse files- README.md +5 -24
- adapter_config.json +4 -4
- adapter_model.safetensors +1 -1
- runs/Aug29_09-53-03_algo-2/events.out.tfevents.1724925199.algo-2.67.0 +3 -0
- training_args.bin +1 -1
README.md
CHANGED
@@ -20,7 +20,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
20 |
|
21 |
This model is a fine-tuned version of [HuggingFaceTB/SmolLM-360M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM-360M-Instruct) on the generator dataset.
|
22 |
It achieves the following results on the evaluation set:
|
23 |
-
- Loss:
|
24 |
|
25 |
## Model description
|
26 |
|
@@ -51,32 +51,13 @@ The following hyperparameters were used during training:
|
|
51 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
52 |
- lr_scheduler_type: cosine
|
53 |
- lr_scheduler_warmup_ratio: 0.03
|
54 |
-
- num_epochs:
|
55 |
|
56 |
### Training results
|
57 |
|
58 |
-
| Training Loss | Epoch
|
59 |
-
|
60 |
-
| 2.
|
61 |
-
| 2.1107 | 2.0 | 21 | 2.0371 |
|
62 |
-
| 2.0147 | 2.9524 | 31 | 1.9717 |
|
63 |
-
| 1.9577 | 4.0 | 42 | 1.9244 |
|
64 |
-
| 1.9087 | 4.9524 | 52 | 1.8953 |
|
65 |
-
| 1.8825 | 6.0 | 63 | 1.8716 |
|
66 |
-
| 1.8667 | 6.9524 | 73 | 1.8558 |
|
67 |
-
| 1.8488 | 8.0 | 84 | 1.8429 |
|
68 |
-
| 1.8284 | 8.9524 | 94 | 1.8343 |
|
69 |
-
| 1.8201 | 10.0 | 105 | 1.8270 |
|
70 |
-
| 1.8129 | 10.9524 | 115 | 1.8219 |
|
71 |
-
| 1.8028 | 12.0 | 126 | 1.8179 |
|
72 |
-
| 1.7987 | 12.9524 | 136 | 1.8154 |
|
73 |
-
| 1.7938 | 14.0 | 147 | 1.8137 |
|
74 |
-
| 1.79 | 14.9524 | 157 | 1.8130 |
|
75 |
-
| 1.7903 | 16.0 | 168 | 1.8125 |
|
76 |
-
| 1.7884 | 16.9524 | 178 | 1.8125 |
|
77 |
-
| 1.7892 | 18.0 | 189 | 1.8124 |
|
78 |
-
| 1.7825 | 18.9524 | 199 | 1.8124 |
|
79 |
-
| 1.7906 | 19.0476 | 200 | 1.8124 |
|
80 |
|
81 |
|
82 |
### Framework versions
|
|
|
20 |
|
21 |
This model is a fine-tuned version of [HuggingFaceTB/SmolLM-360M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM-360M-Instruct) on the generator dataset.
|
22 |
It achieves the following results on the evaluation set:
|
23 |
+
- Loss: 2.1967
|
24 |
|
25 |
## Model description
|
26 |
|
|
|
51 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
52 |
- lr_scheduler_type: cosine
|
53 |
- lr_scheduler_warmup_ratio: 0.03
|
54 |
+
- num_epochs: 1
|
55 |
|
56 |
### Training results
|
57 |
|
58 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
59 |
+
|:-------------:|:------:|:----:|:---------------:|
|
60 |
+
| 2.281 | 0.9524 | 10 | 2.1967 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
|
63 |
### Framework versions
|
adapter_config.json
CHANGED
@@ -20,13 +20,13 @@
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
23 |
-
"down_proj",
|
24 |
-
"up_proj",
|
25 |
"k_proj",
|
26 |
"o_proj",
|
27 |
-
"
|
28 |
"q_proj",
|
29 |
-
"
|
|
|
|
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
|
|
|
|
23 |
"k_proj",
|
24 |
"o_proj",
|
25 |
+
"up_proj",
|
26 |
"q_proj",
|
27 |
+
"down_proj",
|
28 |
+
"v_proj",
|
29 |
+
"gate_proj"
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 17426248
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:88536d07a980a23f28eddfbbda484919ec2112f4a032cb129724c5eab6eddb1e
|
3 |
size 17426248
|
runs/Aug29_09-53-03_algo-2/events.out.tfevents.1724925199.algo-2.67.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4b4df0fb9a426ddf9482bc08abad68b5aae9cab475d3722f229e33a1639fc4e6
|
3 |
+
size 6227
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5240
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:61da54db0dec494feaa5d19e3c84a34d16e393b20453c129162579f377e03995
|
3 |
size 5240
|