RoyJoy commited on
Commit
34d99a4
·
verified ·
1 Parent(s): 3a54e8f

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e7c4e406b6b492e5b1cd44134448ffe93a128d82c3fe83e24d35b3488a4113f
3
  size 50624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06d63e1cb1fdb0b6ca5fecb1669687ba0a79fff92708ed2efb572d77f306c1bb
3
  size 50624
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4df340a64944e8a232195ab8c6c7bdf8deeaf83b8b70e6c98c874309b57771ba
3
  size 118090
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4f0ba882b4b5a3d2c9b16f849d82d52936054968a9dfe94e1efc2955ffebc38
3
  size 118090
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ccd5f96a0c54d9c7fc696cd2de657ab87643082dd5b551287b669ca224896764
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68d8240f8b64502e023794a3da2d8c6aa526ac0f753e37b389fdd87097fabd55
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b163f39dfe27dc2b3dbffe608a672c5c24782a59c01d78ba19fb42e537b1c880
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f117663bca47e6dbbfa0c77eb3eb3ed1ed85a3dd0c9f866227e737a73eb5c422
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3c2e9f653665898599ba92c14fead2834af70cd76abf31d5deed9f0b6254a84
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a7d87a0b1f18c774b632725605bde69fd2b07215dcf9ac48e19e6caa47a6c61
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b04fb1bf3df1d2f6147ab7478673cead0a366e5db2e2dbeb350bb77196eaef95
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3daeca24213bd8e7f686bd89b9bb6ba8b6995471fd68b15febf485beb753100
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c931ece4da598a541d357f6c98f67481603252e193960022d37ddd49c584b1f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a89ffc445067fef9d6d02bb3ff9e61d5e3209e6fa67c7259b3b364b90dbaa2cd
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.1366120218579235,
5
  "eval_steps": 9,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -206,6 +206,205 @@
206
  "learning_rate": 9.330127018922194e-05,
207
  "loss": 10.3781,
208
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  }
210
  ],
211
  "logging_steps": 1,
@@ -225,7 +424,7 @@
225
  "attributes": {}
226
  }
227
  },
228
- "total_flos": 10460489318400.0,
229
  "train_batch_size": 8,
230
  "trial_name": null,
231
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.273224043715847,
5
  "eval_steps": 9,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
206
  "learning_rate": 9.330127018922194e-05,
207
  "loss": 10.3781,
208
  "step": 25
209
+ },
210
+ {
211
+ "epoch": 0.14207650273224043,
212
+ "grad_norm": 0.02928001433610916,
213
+ "learning_rate": 9.24024048078213e-05,
214
+ "loss": 10.3773,
215
+ "step": 26
216
+ },
217
+ {
218
+ "epoch": 0.14754098360655737,
219
+ "grad_norm": 0.027695804834365845,
220
+ "learning_rate": 9.145187862775209e-05,
221
+ "loss": 10.3766,
222
+ "step": 27
223
+ },
224
+ {
225
+ "epoch": 0.14754098360655737,
226
+ "eval_loss": 10.377593994140625,
227
+ "eval_runtime": 0.8333,
228
+ "eval_samples_per_second": 739.27,
229
+ "eval_steps_per_second": 24.002,
230
+ "step": 27
231
+ },
232
+ {
233
+ "epoch": 0.15300546448087432,
234
+ "grad_norm": 0.0281513761729002,
235
+ "learning_rate": 9.045084971874738e-05,
236
+ "loss": 10.3775,
237
+ "step": 28
238
+ },
239
+ {
240
+ "epoch": 0.15846994535519127,
241
+ "grad_norm": 0.028438687324523926,
242
+ "learning_rate": 8.940053768033609e-05,
243
+ "loss": 10.3773,
244
+ "step": 29
245
+ },
246
+ {
247
+ "epoch": 0.16393442622950818,
248
+ "grad_norm": 0.02885403297841549,
249
+ "learning_rate": 8.83022221559489e-05,
250
+ "loss": 10.3769,
251
+ "step": 30
252
+ },
253
+ {
254
+ "epoch": 0.16939890710382513,
255
+ "grad_norm": 0.02971421554684639,
256
+ "learning_rate": 8.715724127386972e-05,
257
+ "loss": 10.3776,
258
+ "step": 31
259
+ },
260
+ {
261
+ "epoch": 0.17486338797814208,
262
+ "grad_norm": 0.033431265503168106,
263
+ "learning_rate": 8.596699001693255e-05,
264
+ "loss": 10.3759,
265
+ "step": 32
266
+ },
267
+ {
268
+ "epoch": 0.18032786885245902,
269
+ "grad_norm": 0.03296530991792679,
270
+ "learning_rate": 8.473291852294987e-05,
271
+ "loss": 10.3772,
272
+ "step": 33
273
+ },
274
+ {
275
+ "epoch": 0.18579234972677597,
276
+ "grad_norm": 0.03332926705479622,
277
+ "learning_rate": 8.345653031794292e-05,
278
+ "loss": 10.377,
279
+ "step": 34
280
+ },
281
+ {
282
+ "epoch": 0.1912568306010929,
283
+ "grad_norm": 0.03217661380767822,
284
+ "learning_rate": 8.213938048432697e-05,
285
+ "loss": 10.3761,
286
+ "step": 35
287
+ },
288
+ {
289
+ "epoch": 0.19672131147540983,
290
+ "grad_norm": 0.033675920218229294,
291
+ "learning_rate": 8.07830737662829e-05,
292
+ "loss": 10.3767,
293
+ "step": 36
294
+ },
295
+ {
296
+ "epoch": 0.19672131147540983,
297
+ "eval_loss": 10.376678466796875,
298
+ "eval_runtime": 0.8446,
299
+ "eval_samples_per_second": 729.317,
300
+ "eval_steps_per_second": 23.679,
301
+ "step": 36
302
+ },
303
+ {
304
+ "epoch": 0.20218579234972678,
305
+ "grad_norm": 0.0315808467566967,
306
+ "learning_rate": 7.938926261462366e-05,
307
+ "loss": 10.3776,
308
+ "step": 37
309
+ },
310
+ {
311
+ "epoch": 0.20765027322404372,
312
+ "grad_norm": 0.038899537175893784,
313
+ "learning_rate": 7.795964517353735e-05,
314
+ "loss": 10.3754,
315
+ "step": 38
316
+ },
317
+ {
318
+ "epoch": 0.21311475409836064,
319
+ "grad_norm": 0.03773730248212814,
320
+ "learning_rate": 7.649596321166024e-05,
321
+ "loss": 10.3778,
322
+ "step": 39
323
+ },
324
+ {
325
+ "epoch": 0.2185792349726776,
326
+ "grad_norm": 0.04042154923081398,
327
+ "learning_rate": 7.500000000000001e-05,
328
+ "loss": 10.3765,
329
+ "step": 40
330
+ },
331
+ {
332
+ "epoch": 0.22404371584699453,
333
+ "grad_norm": 0.039911359548568726,
334
+ "learning_rate": 7.347357813929454e-05,
335
+ "loss": 10.3754,
336
+ "step": 41
337
+ },
338
+ {
339
+ "epoch": 0.22950819672131148,
340
+ "grad_norm": 0.04403088241815567,
341
+ "learning_rate": 7.191855733945387e-05,
342
+ "loss": 10.3759,
343
+ "step": 42
344
+ },
345
+ {
346
+ "epoch": 0.23497267759562843,
347
+ "grad_norm": 0.03892216086387634,
348
+ "learning_rate": 7.033683215379002e-05,
349
+ "loss": 10.3762,
350
+ "step": 43
351
+ },
352
+ {
353
+ "epoch": 0.24043715846994534,
354
+ "grad_norm": 0.038504015654325485,
355
+ "learning_rate": 6.873032967079561e-05,
356
+ "loss": 10.3756,
357
+ "step": 44
358
+ },
359
+ {
360
+ "epoch": 0.2459016393442623,
361
+ "grad_norm": 0.04363120719790459,
362
+ "learning_rate": 6.710100716628344e-05,
363
+ "loss": 10.376,
364
+ "step": 45
365
+ },
366
+ {
367
+ "epoch": 0.2459016393442623,
368
+ "eval_loss": 10.375645637512207,
369
+ "eval_runtime": 0.8348,
370
+ "eval_samples_per_second": 737.924,
371
+ "eval_steps_per_second": 23.959,
372
+ "step": 45
373
+ },
374
+ {
375
+ "epoch": 0.25136612021857924,
376
+ "grad_norm": 0.043945375829935074,
377
+ "learning_rate": 6.545084971874738e-05,
378
+ "loss": 10.3751,
379
+ "step": 46
380
+ },
381
+ {
382
+ "epoch": 0.2568306010928962,
383
+ "grad_norm": 0.04311802238225937,
384
+ "learning_rate": 6.378186779084995e-05,
385
+ "loss": 10.3757,
386
+ "step": 47
387
+ },
388
+ {
389
+ "epoch": 0.26229508196721313,
390
+ "grad_norm": 0.04382125660777092,
391
+ "learning_rate": 6.209609477998338e-05,
392
+ "loss": 10.3757,
393
+ "step": 48
394
+ },
395
+ {
396
+ "epoch": 0.2677595628415301,
397
+ "grad_norm": 0.04658404737710953,
398
+ "learning_rate": 6.0395584540887963e-05,
399
+ "loss": 10.3758,
400
+ "step": 49
401
+ },
402
+ {
403
+ "epoch": 0.273224043715847,
404
+ "grad_norm": 0.045132722705602646,
405
+ "learning_rate": 5.868240888334653e-05,
406
+ "loss": 10.3745,
407
+ "step": 50
408
  }
409
  ],
410
  "logging_steps": 1,
 
424
  "attributes": {}
425
  }
426
  },
427
+ "total_flos": 20920978636800.0,
428
  "train_batch_size": 8,
429
  "trial_name": null,
430
  "trial_params": null