leixa commited on
Commit
1ee58a6
·
verified ·
1 Parent(s): e401187

Training in progress, step 62, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d16ed9ab9c4bfd98f29bf5f819bde84ba824e2c54a8db2353da1e7189ad0cb07
3
  size 639691872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31c49463f3821582d9d78755f5eb97a42b46bfdd8f9601d6971f4f497316456e
3
  size 639691872
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef2f01e8f872a1305ddc3fab247debab31a6dcb442e8b21b5af235956dfb761a
3
  size 325339796
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:255c08b2f79fe4bae4226d36f7cf03b9c43f1a10cbb716e63d3c74250abc8ff2
3
  size 325339796
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9b6d5df961b682e8c36756802b4652eb8397fb64bca4fff86cbd89ba5e2ecf2
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:149f5934ccd723cc5ecad7bf2a9242c754be0eb33e2819fc4e2d0c12aedee181
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:24fc915c71652348fb7923ecbe3044804dd3d0b1630ceca627060bf9ca491be1
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d1b8a36fcf0317e6a9c8738d638639c228d4b5886701e2af2c36b54e118f8ba
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2515212981744422,
5
  "eval_steps": 31,
6
- "global_step": 31,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -93,6 +93,84 @@
93
  "eval_samples_per_second": 5.902,
94
  "eval_steps_per_second": 0.738,
95
  "step": 31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  }
97
  ],
98
  "logging_steps": 3,
@@ -112,7 +190,7 @@
112
  "attributes": {}
113
  }
114
  },
115
- "total_flos": 4.124536900209869e+16,
116
  "train_batch_size": 8,
117
  "trial_name": null,
118
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5030425963488844,
5
  "eval_steps": 31,
6
+ "global_step": 62,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
93
  "eval_samples_per_second": 5.902,
94
  "eval_steps_per_second": 0.738,
95
  "step": 31
96
+ },
97
+ {
98
+ "epoch": 0.26774847870182555,
99
+ "grad_norm": 5.427604675292969,
100
+ "learning_rate": 9.899623523104149e-05,
101
+ "loss": 2.4006,
102
+ "step": 33
103
+ },
104
+ {
105
+ "epoch": 0.2920892494929006,
106
+ "grad_norm": 7.537416458129883,
107
+ "learning_rate": 9.871850323926177e-05,
108
+ "loss": 2.7397,
109
+ "step": 36
110
+ },
111
+ {
112
+ "epoch": 0.31643002028397565,
113
+ "grad_norm": 8.674805641174316,
114
+ "learning_rate": 9.84073820189054e-05,
115
+ "loss": 2.4828,
116
+ "step": 39
117
+ },
118
+ {
119
+ "epoch": 0.3407707910750507,
120
+ "grad_norm": 8.167730331420898,
121
+ "learning_rate": 9.806308479691595e-05,
122
+ "loss": 2.5302,
123
+ "step": 42
124
+ },
125
+ {
126
+ "epoch": 0.36511156186612576,
127
+ "grad_norm": 10.937932014465332,
128
+ "learning_rate": 9.768584753741134e-05,
129
+ "loss": 2.3383,
130
+ "step": 45
131
+ },
132
+ {
133
+ "epoch": 0.3894523326572008,
134
+ "grad_norm": 3.8246636390686035,
135
+ "learning_rate": 9.727592877996585e-05,
136
+ "loss": 2.212,
137
+ "step": 48
138
+ },
139
+ {
140
+ "epoch": 0.41379310344827586,
141
+ "grad_norm": 4.667800426483154,
142
+ "learning_rate": 9.683360946241989e-05,
143
+ "loss": 2.3763,
144
+ "step": 51
145
+ },
146
+ {
147
+ "epoch": 0.4381338742393509,
148
+ "grad_norm": 4.990477561950684,
149
+ "learning_rate": 9.635919272833938e-05,
150
+ "loss": 2.3995,
151
+ "step": 54
152
+ },
153
+ {
154
+ "epoch": 0.46247464503042596,
155
+ "grad_norm": 9.09072494506836,
156
+ "learning_rate": 9.58530037192562e-05,
157
+ "loss": 2.545,
158
+ "step": 57
159
+ },
160
+ {
161
+ "epoch": 0.486815415821501,
162
+ "grad_norm": 5.338695049285889,
163
+ "learning_rate": 9.53153893518325e-05,
164
+ "loss": 2.3183,
165
+ "step": 60
166
+ },
167
+ {
168
+ "epoch": 0.5030425963488844,
169
+ "eval_loss": 0.5645254850387573,
170
+ "eval_runtime": 13.4155,
171
+ "eval_samples_per_second": 15.504,
172
+ "eval_steps_per_second": 1.938,
173
+ "step": 62
174
  }
175
  ],
176
  "logging_steps": 3,
 
190
  "attributes": {}
191
  }
192
  },
193
+ "total_flos": 8.249073800419738e+16,
194
  "train_batch_size": 8,
195
  "trial_name": null,
196
  "trial_params": null