leixa commited on
Commit
9d842fc
·
verified ·
1 Parent(s): 68aab8f

Training in progress, step 104, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:91178314d33fe4afb30171470054c3838d439aeb15a938f2197669aa0f5393ad
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8df7bdccf9b9ea018cf08637989d48944be5e3ee8a0970bee5233527cdc8082c
3
  size 671149168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aac2b837938592cae880bcac77ee86985aa64ee15912e19a5994c92cef802b1f
3
  size 341314196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abf811487858e391fe9c39fee5a148ac7ba91980cf9e43cff52525b26848fb97
3
  size 341314196
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a663f94f9a2a2ab6df61c276b200d0d2fd085e9bf8a9f2e6c62a991d9b2f00cf
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:919715e521e6fafa91c21eb71f1ef59a2a480a7d0fc641c8aac7133bb8caf2b2
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e37872b45362e9c63412b1b167580155e0789faf44bbc81b017091d0c0912d87
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c89b61481eb2980a04190d12a0165f95c9fc0a4db20ad8cc3d895835eac42f68
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.6545454545454545,
5
  "eval_steps": 13,
6
- "global_step": 91,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -281,6 +281,42 @@
281
  "eval_samples_per_second": 9.964,
282
  "eval_steps_per_second": 1.286,
283
  "step": 91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  }
285
  ],
286
  "logging_steps": 3,
@@ -300,7 +336,7 @@
300
  "attributes": {}
301
  }
302
  },
303
- "total_flos": 2.068077834361897e+17,
304
  "train_batch_size": 8,
305
  "trial_name": null,
306
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.8909090909090909,
5
  "eval_steps": 13,
6
+ "global_step": 104,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
281
  "eval_samples_per_second": 9.964,
282
  "eval_steps_per_second": 1.286,
283
  "step": 91
284
+ },
285
+ {
286
+ "epoch": 1.690909090909091,
287
+ "grad_norm": 0.5781804323196411,
288
+ "learning_rate": 3.562003362839914e-05,
289
+ "loss": 0.1118,
290
+ "step": 93
291
+ },
292
+ {
293
+ "epoch": 1.7454545454545456,
294
+ "grad_norm": 0.557227373123169,
295
+ "learning_rate": 3.243125879593286e-05,
296
+ "loss": 0.0887,
297
+ "step": 96
298
+ },
299
+ {
300
+ "epoch": 1.8,
301
+ "grad_norm": 0.6238749623298645,
302
+ "learning_rate": 2.932207475167398e-05,
303
+ "loss": 0.0828,
304
+ "step": 99
305
+ },
306
+ {
307
+ "epoch": 1.8545454545454545,
308
+ "grad_norm": 0.6486019492149353,
309
+ "learning_rate": 2.630656687635007e-05,
310
+ "loss": 0.1015,
311
+ "step": 102
312
+ },
313
+ {
314
+ "epoch": 1.8909090909090909,
315
+ "eval_loss": 0.15355004370212555,
316
+ "eval_runtime": 9.3338,
317
+ "eval_samples_per_second": 9.964,
318
+ "eval_steps_per_second": 1.286,
319
+ "step": 104
320
  }
321
  ],
322
  "logging_steps": 3,
 
336
  "attributes": {}
337
  }
338
  },
339
+ "total_flos": 2.362237982217339e+17,
340
  "train_batch_size": 8,
341
  "trial_name": null,
342
  "trial_params": null