leixa commited on
Commit
184e16b
·
verified ·
1 Parent(s): 1b4b8de

Training in progress, step 78, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e27c7ca276dab3b694b57a434df8e3d6893b7178979b1cac675c9ec8f60e479
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1a71acfb6d4b2e67e14a0a94145dd571edcee7a4a0a7465524db480b4f1d9fa
3
  size 671149168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d34c31e1a088ba21fbeeda48d254b861c5ec0f4870c287a673f116c47f5ce695
3
  size 341314196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a36fb42f8a18ebd6fe10bfd465c98efa16fe456c6841de6fd1a7d506f68c0fbd
3
  size 341314196
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de1efa6b5fe80dcfbe062f5d7edc55d1dd44809f7d923ce612aa87101548ec21
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c92c501f0c4a9123fc8a015c0ef0273054caea0a6d5ebb564fbcba98901fbea
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb438ab9499d5178d553290d304fad6bd105cd4f59f8f03d9657a249b7bd5f14
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4d6e5f76805b36e4d76ee2a3b48ba3bedb1c2bda79be4f5c70f809dd0d57438
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.1818181818181819,
5
  "eval_steps": 13,
6
- "global_step": 65,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -202,6 +202,49 @@
202
  "eval_samples_per_second": 9.959,
203
  "eval_steps_per_second": 1.285,
204
  "step": 65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  }
206
  ],
207
  "logging_steps": 3,
@@ -221,7 +264,7 @@
221
  "attributes": {}
222
  }
223
  },
224
- "total_flos": 1.4684436868104192e+17,
225
  "train_batch_size": 8,
226
  "trial_name": null,
227
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.4181818181818182,
5
  "eval_steps": 13,
6
+ "global_step": 78,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
202
  "eval_samples_per_second": 9.959,
203
  "eval_steps_per_second": 1.285,
204
  "step": 65
205
+ },
206
+ {
207
+ "epoch": 1.2,
208
+ "grad_norm": 0.6527738571166992,
209
+ "learning_rate": 6.545084971874738e-05,
210
+ "loss": 0.179,
211
+ "step": 66
212
+ },
213
+ {
214
+ "epoch": 1.2545454545454544,
215
+ "grad_norm": 0.7428808808326721,
216
+ "learning_rate": 6.22170203068947e-05,
217
+ "loss": 0.174,
218
+ "step": 69
219
+ },
220
+ {
221
+ "epoch": 1.309090909090909,
222
+ "grad_norm": 0.6538995504379272,
223
+ "learning_rate": 5.8927844739931834e-05,
224
+ "loss": 0.16,
225
+ "step": 72
226
+ },
227
+ {
228
+ "epoch": 1.3636363636363638,
229
+ "grad_norm": 0.6126785278320312,
230
+ "learning_rate": 5.559822380516539e-05,
231
+ "loss": 0.1336,
232
+ "step": 75
233
+ },
234
+ {
235
+ "epoch": 1.4181818181818182,
236
+ "grad_norm": 0.718612551689148,
237
+ "learning_rate": 5.2243241517525754e-05,
238
+ "loss": 0.132,
239
+ "step": 78
240
+ },
241
+ {
242
+ "epoch": 1.4181818181818182,
243
+ "eval_loss": 0.23752006888389587,
244
+ "eval_runtime": 9.3452,
245
+ "eval_samples_per_second": 9.952,
246
+ "eval_steps_per_second": 1.284,
247
+ "step": 78
248
  }
249
  ],
250
  "logging_steps": 3,
 
264
  "attributes": {}
265
  }
266
  },
267
+ "total_flos": 1.7663751186127258e+17,
268
  "train_batch_size": 8,
269
  "trial_name": null,
270
  "trial_params": null