lesso03 commited on
Commit
2bbbe5f
1 Parent(s): da4b4ac

Training in progress, step 33, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b63498a2660f30b0a8c089ba171ebf547dfa7198092728a160419855d9a54285
3
  size 250422888
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d18355b73610ce0117af90f511f34cd12129e14a63abd3deee67f153508f406
3
  size 250422888
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02067ea159020e6efe3a1f4a59871f44cf527be7da24b8514ccb07a5958e517f
3
  size 501168482
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b014d5ddeb01cfc7e54d5aabb38c2a2f1bf011ae92b0e154bafc4b95dc9bc16
3
  size 501168482
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:38de67135451dc9d085c3f7b16c13ead71106b6555c04a7ef9f50f76037e78c1
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c22c11b99d235cc38475fa1ebfdb7919352b0451c1412d19b7d8d73482a9b10d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:457a0a52259cced6b76c949d89a7eea5b5ea9329c8fcb41a32547784be1dd99e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cd3f6e9267a2b7ffad41dddfa33b74e45aa7c480b8283eebbe1a013b2da49f9
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.2727272727272725,
5
  "eval_steps": 3,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -254,6 +254,86 @@
254
  "learning_rate": 2.6996748113442394e-05,
255
  "loss": 2.7049,
256
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  }
258
  ],
259
  "logging_steps": 1,
@@ -268,12 +348,12 @@
268
  "should_evaluate": false,
269
  "should_log": false,
270
  "should_save": true,
271
- "should_training_stop": false
272
  },
273
  "attributes": {}
274
  }
275
  },
276
- "total_flos": 6.3478299623424e+16,
277
  "train_batch_size": 8,
278
  "trial_name": null,
279
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
  "eval_steps": 3,
6
+ "global_step": 33,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
254
  "learning_rate": 2.6996748113442394e-05,
255
  "loss": 2.7049,
256
  "step": 25
257
+ },
258
+ {
259
+ "epoch": 2.3636363636363638,
260
+ "grad_norm": 0.3256295323371887,
261
+ "learning_rate": 2.1165983894256647e-05,
262
+ "loss": 2.9422,
263
+ "step": 26
264
+ },
265
+ {
266
+ "epoch": 2.4545454545454546,
267
+ "grad_norm": 0.3431509733200073,
268
+ "learning_rate": 1.5872342839067306e-05,
269
+ "loss": 2.6983,
270
+ "step": 27
271
+ },
272
+ {
273
+ "epoch": 2.4545454545454546,
274
+ "eval_loss": 1.3413066864013672,
275
+ "eval_runtime": 5.3602,
276
+ "eval_samples_per_second": 1.866,
277
+ "eval_steps_per_second": 0.373,
278
+ "step": 27
279
+ },
280
+ {
281
+ "epoch": 2.5454545454545454,
282
+ "grad_norm": 0.34741801023483276,
283
+ "learning_rate": 1.1214435464779006e-05,
284
+ "loss": 2.7043,
285
+ "step": 28
286
+ },
287
+ {
288
+ "epoch": 2.6363636363636362,
289
+ "grad_norm": 0.370277464389801,
290
+ "learning_rate": 7.2790297726755716e-06,
291
+ "loss": 2.5969,
292
+ "step": 29
293
+ },
294
+ {
295
+ "epoch": 2.7272727272727275,
296
+ "grad_norm": 0.39455196261405945,
297
+ "learning_rate": 4.139434924727359e-06,
298
+ "loss": 2.7283,
299
+ "step": 30
300
+ },
301
+ {
302
+ "epoch": 2.7272727272727275,
303
+ "eval_loss": 1.3317468166351318,
304
+ "eval_runtime": 5.3596,
305
+ "eval_samples_per_second": 1.866,
306
+ "eval_steps_per_second": 0.373,
307
+ "step": 30
308
+ },
309
+ {
310
+ "epoch": 2.8181818181818183,
311
+ "grad_norm": 0.4511341452598572,
312
+ "learning_rate": 1.8541356326100433e-06,
313
+ "loss": 2.5703,
314
+ "step": 31
315
+ },
316
+ {
317
+ "epoch": 2.909090909090909,
318
+ "grad_norm": 0.3779066801071167,
319
+ "learning_rate": 4.6570269818346224e-07,
320
+ "loss": 2.6689,
321
+ "step": 32
322
+ },
323
+ {
324
+ "epoch": 3.0,
325
+ "grad_norm": 0.39674925804138184,
326
+ "learning_rate": 0.0,
327
+ "loss": 2.6383,
328
+ "step": 33
329
+ },
330
+ {
331
+ "epoch": 3.0,
332
+ "eval_loss": 1.3296959400177002,
333
+ "eval_runtime": 5.3524,
334
+ "eval_samples_per_second": 1.868,
335
+ "eval_steps_per_second": 0.374,
336
+ "step": 33
337
  }
338
  ],
339
  "logging_steps": 1,
 
348
  "should_evaluate": false,
349
  "should_log": false,
350
  "should_save": true,
351
+ "should_training_stop": true
352
  },
353
  "attributes": {}
354
  }
355
  },
356
+ "total_flos": 8.379135550291968e+16,
357
  "train_batch_size": 8,
358
  "trial_name": null,
359
  "trial_params": null