willtensora
commited on
Training in progress, step 3, checkpoint
Browse files- last-checkpoint/adapter_config.json +3 -3
- last-checkpoint/adapter_model.safetensors +1 -1
- last-checkpoint/optimizer.pt +1 -1
- last-checkpoint/rng_state_0.pth +2 -2
- last-checkpoint/rng_state_1.pth +2 -2
- last-checkpoint/rng_state_2.pth +3 -0
- last-checkpoint/rng_state_3.pth +3 -0
- last-checkpoint/rng_state_4.pth +3 -0
- last-checkpoint/rng_state_5.pth +3 -0
- last-checkpoint/rng_state_6.pth +3 -0
- last-checkpoint/rng_state_7.pth +3 -0
- last-checkpoint/scheduler.pt +1 -1
- last-checkpoint/trainer_state.json +23 -88
- last-checkpoint/training_args.bin +1 -1
last-checkpoint/adapter_config.json
CHANGED
@@ -20,13 +20,13 @@
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
23 |
-
"down_proj",
|
24 |
-
"o_proj",
|
25 |
"q_proj",
|
|
|
|
|
26 |
"v_proj",
|
27 |
"up_proj",
|
28 |
"gate_proj",
|
29 |
-
"
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
|
|
|
|
23 |
"q_proj",
|
24 |
+
"down_proj",
|
25 |
+
"k_proj",
|
26 |
"v_proj",
|
27 |
"up_proj",
|
28 |
"gate_proj",
|
29 |
+
"o_proj"
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14696
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8a206fd6fc82dc47865ca6972a848f653d5bb4c2cc530c6dc040a0487086171
|
3 |
size 14696
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 39398
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1904d8aee5cd393e6742756d030dadbcc2a508b70f966870e41930044f0afabe
|
3 |
size 39398
|
last-checkpoint/rng_state_0.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:49d56c6fd229b79724ed3dcdb4b672517c1648204c72ac049f2492d414e60de1
|
3 |
+
size 15920
|
last-checkpoint/rng_state_1.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ce7631bd4ab517ce695982f0c187210a288656770e5c42ef4faa0f4c7edd7a3b
|
3 |
+
size 15984
|
last-checkpoint/rng_state_2.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:82b85acc8eedcc692a4ea50ce2ab6c5db7c647c8c83776e141cce4bb67023ceb
|
3 |
+
size 15984
|
last-checkpoint/rng_state_3.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:44e5a47f6caf446465a431fdda949366f3e32ce41cb8af2309a1630b254ebe18
|
3 |
+
size 15984
|
last-checkpoint/rng_state_4.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d17c75edd95d4e6d6a49098208613a584190e481ff384794811ac9f97688a1f4
|
3 |
+
size 15984
|
last-checkpoint/rng_state_5.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:55f9a29d74bc179bd8107b8481e9c3e9e5582d1833bda5a5f55c2b0024f64b20
|
3 |
+
size 15984
|
last-checkpoint/rng_state_6.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d1c4d9a201fe2b5cd47cdb7e914822e96d14ce765e17bad5e47ded0b3392b99
|
3 |
+
size 15984
|
last-checkpoint/rng_state_7.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7969b1d053e2a68474c1dcbcff3192620975ee90d4433cd6a1c7abbad15fa205
|
3 |
+
size 15984
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ffd8c58e5d02492554dbaa495f8cf80dff41fabc0e1288cb2fd18a103d402219
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,114 +1,49 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 3,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
-
"epoch": 0.
|
13 |
-
"grad_norm": 0.
|
14 |
"learning_rate": 2e-05,
|
15 |
-
"loss": 11.
|
16 |
"step": 1
|
17 |
},
|
18 |
{
|
19 |
-
"epoch": 0.
|
20 |
-
"eval_loss": 11.
|
21 |
-
"eval_runtime":
|
22 |
-
"eval_samples_per_second":
|
23 |
-
"eval_steps_per_second":
|
24 |
"step": 1
|
25 |
},
|
26 |
{
|
27 |
-
"epoch": 0.
|
28 |
-
"grad_norm": 0.
|
29 |
"learning_rate": 4e-05,
|
30 |
-
"loss": 11.
|
31 |
"step": 2
|
32 |
},
|
33 |
{
|
34 |
-
"epoch": 0.
|
35 |
-
"grad_norm": 0.
|
36 |
"learning_rate": 6e-05,
|
37 |
-
"loss": 11.
|
38 |
"step": 3
|
39 |
},
|
40 |
{
|
41 |
-
"epoch": 0.
|
42 |
-
"eval_loss": 11.
|
43 |
-
"eval_runtime":
|
44 |
-
"eval_samples_per_second":
|
45 |
-
"eval_steps_per_second":
|
46 |
"step": 3
|
47 |
-
},
|
48 |
-
{
|
49 |
-
"epoch": 0.002243409983174425,
|
50 |
-
"grad_norm": 0.01584225706756115,
|
51 |
-
"learning_rate": 8e-05,
|
52 |
-
"loss": 11.9328,
|
53 |
-
"step": 4
|
54 |
-
},
|
55 |
-
{
|
56 |
-
"epoch": 0.0028042624789680315,
|
57 |
-
"grad_norm": 0.01661496050655842,
|
58 |
-
"learning_rate": 0.0001,
|
59 |
-
"loss": 11.9248,
|
60 |
-
"step": 5
|
61 |
-
},
|
62 |
-
{
|
63 |
-
"epoch": 0.0033651149747616375,
|
64 |
-
"grad_norm": 0.008873275481164455,
|
65 |
-
"learning_rate": 0.00012,
|
66 |
-
"loss": 11.926,
|
67 |
-
"step": 6
|
68 |
-
},
|
69 |
-
{
|
70 |
-
"epoch": 0.0033651149747616375,
|
71 |
-
"eval_loss": 11.931286811828613,
|
72 |
-
"eval_runtime": 2.8054,
|
73 |
-
"eval_samples_per_second": 267.701,
|
74 |
-
"eval_steps_per_second": 134.029,
|
75 |
-
"step": 6
|
76 |
-
},
|
77 |
-
{
|
78 |
-
"epoch": 0.003925967470555244,
|
79 |
-
"grad_norm": 0.011052712798118591,
|
80 |
-
"learning_rate": 0.00014,
|
81 |
-
"loss": 11.9324,
|
82 |
-
"step": 7
|
83 |
-
},
|
84 |
-
{
|
85 |
-
"epoch": 0.00448681996634885,
|
86 |
-
"grad_norm": 0.005740743596106768,
|
87 |
-
"learning_rate": 0.00016,
|
88 |
-
"loss": 11.935,
|
89 |
-
"step": 8
|
90 |
-
},
|
91 |
-
{
|
92 |
-
"epoch": 0.005047672462142457,
|
93 |
-
"grad_norm": 0.011872372590005398,
|
94 |
-
"learning_rate": 0.00018,
|
95 |
-
"loss": 11.9287,
|
96 |
-
"step": 9
|
97 |
-
},
|
98 |
-
{
|
99 |
-
"epoch": 0.005047672462142457,
|
100 |
-
"eval_loss": 11.93126392364502,
|
101 |
-
"eval_runtime": 2.7223,
|
102 |
-
"eval_samples_per_second": 275.873,
|
103 |
-
"eval_steps_per_second": 138.12,
|
104 |
-
"step": 9
|
105 |
-
},
|
106 |
-
{
|
107 |
-
"epoch": 0.005608524957936063,
|
108 |
-
"grad_norm": 0.007526675704866648,
|
109 |
-
"learning_rate": 0.0002,
|
110 |
-
"loss": 11.9335,
|
111 |
-
"step": 10
|
112 |
}
|
113 |
],
|
114 |
"logging_steps": 1,
|
@@ -123,12 +58,12 @@
|
|
123 |
"should_evaluate": false,
|
124 |
"should_log": false,
|
125 |
"should_save": true,
|
126 |
-
"should_training_stop":
|
127 |
},
|
128 |
"attributes": {}
|
129 |
}
|
130 |
},
|
131 |
-
"total_flos":
|
132 |
"train_batch_size": 2,
|
133 |
"trial_name": null,
|
134 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.013452914798206279,
|
5 |
"eval_steps": 3,
|
6 |
+
"global_step": 3,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
+
"epoch": 0.004484304932735426,
|
13 |
+
"grad_norm": 0.006755814887583256,
|
14 |
"learning_rate": 2e-05,
|
15 |
+
"loss": 11.9309,
|
16 |
"step": 1
|
17 |
},
|
18 |
{
|
19 |
+
"epoch": 0.004484304932735426,
|
20 |
+
"eval_loss": 11.931302070617676,
|
21 |
+
"eval_runtime": 0.5278,
|
22 |
+
"eval_samples_per_second": 1422.827,
|
23 |
+
"eval_steps_per_second": 89.045,
|
24 |
"step": 1
|
25 |
},
|
26 |
{
|
27 |
+
"epoch": 0.008968609865470852,
|
28 |
+
"grad_norm": 0.008150096982717514,
|
29 |
"learning_rate": 4e-05,
|
30 |
+
"loss": 11.9311,
|
31 |
"step": 2
|
32 |
},
|
33 |
{
|
34 |
+
"epoch": 0.013452914798206279,
|
35 |
+
"grad_norm": 0.00839826837182045,
|
36 |
"learning_rate": 6e-05,
|
37 |
+
"loss": 11.9313,
|
38 |
"step": 3
|
39 |
},
|
40 |
{
|
41 |
+
"epoch": 0.013452914798206279,
|
42 |
+
"eval_loss": 11.931297302246094,
|
43 |
+
"eval_runtime": 0.521,
|
44 |
+
"eval_samples_per_second": 1441.543,
|
45 |
+
"eval_steps_per_second": 90.216,
|
46 |
"step": 3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
}
|
48 |
],
|
49 |
"logging_steps": 1,
|
|
|
58 |
"should_evaluate": false,
|
59 |
"should_log": false,
|
60 |
"should_save": true,
|
61 |
+
"should_training_stop": false
|
62 |
},
|
63 |
"attributes": {}
|
64 |
}
|
65 |
},
|
66 |
+
"total_flos": 2894954496.0,
|
67 |
"train_batch_size": 2,
|
68 |
"trial_name": null,
|
69 |
"trial_params": null
|
last-checkpoint/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 6776
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b480b5981b44b1a685dac017205977827e4a56934f64c3c150725d74276fd6fb
|
3 |
size 6776
|