RoyJoy commited on
Commit
f7cc80a
1 Parent(s): e1fd0b0

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a3ffcbaa20b7adbe3fb13d9c5ea00dbdbae59c512774db3d51955792d1ebe7a
3
  size 78480072
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0586752f3979653235538cb76d586e933e30062b1a35ebf40ce0480b2ae556b4
3
  size 78480072
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63651ce56714645983f45e2492848593058dab03a9c536764e4e5cf34a8e37e1
3
  size 157104826
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b976f4f7ca39bcc598e3150657e41d22566bfff384b274ef60bea64b58e4ba2
3
  size 157104826
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2080f537fe1f5b09c5545e0deb689d18a7c273db36d4eb7e485a02ec70332d29
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b3c850cd945b590d66f8306a5dc2bcd4719c74f40e07cce9a03515237e59666
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:14c54e2808505d707e404604e9ed2ad38d20d45b7d9957640670391eec62b327
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7929042630bdf72e45e8596f43c5141f02517487a96e15f8463617c2259ba7ce
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae9a47a29badd350d6d01c3321234fd35296302a9338745a084148705487be3a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afc2344264333f58c1f67871a8080082f1e7869b84cb88c38366c734877c212e
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1fcc8134a255b5c925280b9faeb8ef8acd9edf7ac109ec4b34e9b03a05777d45
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef2c5cf3ce45f3fb7bf3784c5e4fb05a993b91767ce8e1730eb9c27ddb38b6c6
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ad2841b888ce0ae948634757c3fcacf0119c249e0fec8f3ca61ea266369ef92
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5d2a6c6aafc669cea03b9634666f204de949a3d45ce2f48a07e7e3eaf18c715
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.1013789251446724,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.19436345966958213,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -381,6 +381,372 @@
381
  "eval_samples_per_second": 65.71,
382
  "eval_steps_per_second": 2.628,
383
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -409,7 +775,7 @@
409
  "attributes": {}
410
  }
411
  },
412
- "total_flos": 1.1654663750077645e+17,
413
  "train_batch_size": 8,
414
  "trial_name": null,
415
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.08561168611049652,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 0.38872691933916426,
5
  "eval_steps": 25,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
381
  "eval_samples_per_second": 65.71,
382
  "eval_steps_per_second": 2.628,
383
  "step": 50
384
+ },
385
+ {
386
+ "epoch": 0.19825072886297376,
387
+ "grad_norm": 0.13819019496440887,
388
+ "learning_rate": 0.0002668315918143169,
389
+ "loss": 0.0541,
390
+ "step": 51
391
+ },
392
+ {
393
+ "epoch": 0.2021379980563654,
394
+ "grad_norm": 0.1316405087709427,
395
+ "learning_rate": 0.00026526016662852886,
396
+ "loss": 0.0405,
397
+ "step": 52
398
+ },
399
+ {
400
+ "epoch": 0.20602526724975703,
401
+ "grad_norm": 0.06152060255408287,
402
+ "learning_rate": 0.00026365723046405023,
403
+ "loss": 0.0351,
404
+ "step": 53
405
+ },
406
+ {
407
+ "epoch": 0.2099125364431487,
408
+ "grad_norm": 0.053634870797395706,
409
+ "learning_rate": 0.0002620232215476231,
410
+ "loss": 0.0469,
411
+ "step": 54
412
+ },
413
+ {
414
+ "epoch": 0.21379980563654033,
415
+ "grad_norm": 0.06233103573322296,
416
+ "learning_rate": 0.0002603585866009697,
417
+ "loss": 0.0538,
418
+ "step": 55
419
+ },
420
+ {
421
+ "epoch": 0.21768707482993196,
422
+ "grad_norm": 0.07677210867404938,
423
+ "learning_rate": 0.00025866378071866334,
424
+ "loss": 0.0547,
425
+ "step": 56
426
+ },
427
+ {
428
+ "epoch": 0.22157434402332363,
429
+ "grad_norm": 0.09678474068641663,
430
+ "learning_rate": 0.00025693926724370956,
431
+ "loss": 0.0678,
432
+ "step": 57
433
+ },
434
+ {
435
+ "epoch": 0.22546161321671526,
436
+ "grad_norm": 0.12184121459722519,
437
+ "learning_rate": 0.00025518551764087326,
438
+ "loss": 0.1066,
439
+ "step": 58
440
+ },
441
+ {
442
+ "epoch": 0.2293488824101069,
443
+ "grad_norm": 0.1470523178577423,
444
+ "learning_rate": 0.00025340301136778483,
445
+ "loss": 0.1323,
446
+ "step": 59
447
+ },
448
+ {
449
+ "epoch": 0.23323615160349853,
450
+ "grad_norm": 0.18343396484851837,
451
+ "learning_rate": 0.00025159223574386114,
452
+ "loss": 0.1909,
453
+ "step": 60
454
+ },
455
+ {
456
+ "epoch": 0.2371234207968902,
457
+ "grad_norm": 0.18938356637954712,
458
+ "learning_rate": 0.0002497536858170772,
459
+ "loss": 0.2124,
460
+ "step": 61
461
+ },
462
+ {
463
+ "epoch": 0.24101068999028183,
464
+ "grad_norm": 0.34483832120895386,
465
+ "learning_rate": 0.00024788786422862526,
466
+ "loss": 0.36,
467
+ "step": 62
468
+ },
469
+ {
470
+ "epoch": 0.24489795918367346,
471
+ "grad_norm": 0.07590112835168839,
472
+ "learning_rate": 0.00024599528107549745,
473
+ "loss": 0.0741,
474
+ "step": 63
475
+ },
476
+ {
477
+ "epoch": 0.2487852283770651,
478
+ "grad_norm": 0.0571330301463604,
479
+ "learning_rate": 0.00024407645377103054,
480
+ "loss": 0.05,
481
+ "step": 64
482
+ },
483
+ {
484
+ "epoch": 0.25267249757045673,
485
+ "grad_norm": 0.045787546783685684,
486
+ "learning_rate": 0.00024213190690345018,
487
+ "loss": 0.0402,
488
+ "step": 65
489
+ },
490
+ {
491
+ "epoch": 0.2565597667638484,
492
+ "grad_norm": 0.05736127123236656,
493
+ "learning_rate": 0.00024016217209245374,
494
+ "loss": 0.0411,
495
+ "step": 66
496
+ },
497
+ {
498
+ "epoch": 0.26044703595724006,
499
+ "grad_norm": 0.07004929333925247,
500
+ "learning_rate": 0.00023816778784387094,
501
+ "loss": 0.0401,
502
+ "step": 67
503
+ },
504
+ {
505
+ "epoch": 0.26433430515063167,
506
+ "grad_norm": 0.07149740308523178,
507
+ "learning_rate": 0.0002361492994024415,
508
+ "loss": 0.0527,
509
+ "step": 68
510
+ },
511
+ {
512
+ "epoch": 0.26822157434402333,
513
+ "grad_norm": 0.08593209832906723,
514
+ "learning_rate": 0.0002341072586027509,
515
+ "loss": 0.0663,
516
+ "step": 69
517
+ },
518
+ {
519
+ "epoch": 0.272108843537415,
520
+ "grad_norm": 0.09691403806209564,
521
+ "learning_rate": 0.00023204222371836405,
522
+ "loss": 0.091,
523
+ "step": 70
524
+ },
525
+ {
526
+ "epoch": 0.2759961127308066,
527
+ "grad_norm": 0.10673736780881882,
528
+ "learning_rate": 0.00022995475930919905,
529
+ "loss": 0.1131,
530
+ "step": 71
531
+ },
532
+ {
533
+ "epoch": 0.27988338192419826,
534
+ "grad_norm": 0.12909558415412903,
535
+ "learning_rate": 0.00022784543606718227,
536
+ "loss": 0.1338,
537
+ "step": 72
538
+ },
539
+ {
540
+ "epoch": 0.28377065111758987,
541
+ "grad_norm": 0.18475858867168427,
542
+ "learning_rate": 0.00022571483066022657,
543
+ "loss": 0.1931,
544
+ "step": 73
545
+ },
546
+ {
547
+ "epoch": 0.28765792031098153,
548
+ "grad_norm": 0.24942803382873535,
549
+ "learning_rate": 0.0002235635255745762,
550
+ "loss": 0.2827,
551
+ "step": 74
552
+ },
553
+ {
554
+ "epoch": 0.2915451895043732,
555
+ "grad_norm": 0.34233248233795166,
556
+ "learning_rate": 0.00022139210895556104,
557
+ "loss": 0.3967,
558
+ "step": 75
559
+ },
560
+ {
561
+ "epoch": 0.2915451895043732,
562
+ "eval_loss": 0.09369731694459915,
563
+ "eval_runtime": 0.7613,
564
+ "eval_samples_per_second": 65.673,
565
+ "eval_steps_per_second": 2.627,
566
+ "step": 75
567
+ },
568
+ {
569
+ "epoch": 0.2954324586977648,
570
+ "grad_norm": 0.0579577200114727,
571
+ "learning_rate": 0.00021920117444680317,
572
+ "loss": 0.0548,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 0.29931972789115646,
577
+ "grad_norm": 0.05240903049707413,
578
+ "learning_rate": 0.00021699132102792097,
579
+ "loss": 0.0393,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 0.3032069970845481,
584
+ "grad_norm": 0.061587151139974594,
585
+ "learning_rate": 0.0002147631528507739,
586
+ "loss": 0.0334,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 0.30709426627793973,
591
+ "grad_norm": 0.05840449780225754,
592
+ "learning_rate": 0.00021251727907429355,
593
+ "loss": 0.039,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 0.3109815354713314,
598
+ "grad_norm": 0.060217294842004776,
599
+ "learning_rate": 0.0002102543136979454,
600
+ "loss": 0.0456,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 0.31486880466472306,
605
+ "grad_norm": 0.08354919403791428,
606
+ "learning_rate": 0.0002079748753938678,
607
+ "loss": 0.067,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 0.31875607385811466,
612
+ "grad_norm": 0.08509140461683273,
613
+ "learning_rate": 0.0002056795873377331,
614
+ "loss": 0.0761,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 0.3226433430515063,
619
+ "grad_norm": 0.09227221459150314,
620
+ "learning_rate": 0.00020336907703837748,
621
+ "loss": 0.0854,
622
+ "step": 83
623
+ },
624
+ {
625
+ "epoch": 0.32653061224489793,
626
+ "grad_norm": 0.11894426494836807,
627
+ "learning_rate": 0.00020104397616624645,
628
+ "loss": 0.1199,
629
+ "step": 84
630
+ },
631
+ {
632
+ "epoch": 0.3304178814382896,
633
+ "grad_norm": 0.12877412140369415,
634
+ "learning_rate": 0.00019870492038070252,
635
+ "loss": 0.1401,
636
+ "step": 85
637
+ },
638
+ {
639
+ "epoch": 0.33430515063168126,
640
+ "grad_norm": 0.2026694416999817,
641
+ "learning_rate": 0.0001963525491562421,
642
+ "loss": 0.2156,
643
+ "step": 86
644
+ },
645
+ {
646
+ "epoch": 0.33819241982507287,
647
+ "grad_norm": 0.25777146220207214,
648
+ "learning_rate": 0.0001939875056076697,
649
+ "loss": 0.2802,
650
+ "step": 87
651
+ },
652
+ {
653
+ "epoch": 0.34207968901846453,
654
+ "grad_norm": 0.07381570339202881,
655
+ "learning_rate": 0.00019161043631427666,
656
+ "loss": 0.0682,
657
+ "step": 88
658
+ },
659
+ {
660
+ "epoch": 0.3459669582118562,
661
+ "grad_norm": 0.047046344727277756,
662
+ "learning_rate": 0.00018922199114307294,
663
+ "loss": 0.0355,
664
+ "step": 89
665
+ },
666
+ {
667
+ "epoch": 0.3498542274052478,
668
+ "grad_norm": 0.07161912322044373,
669
+ "learning_rate": 0.00018682282307111987,
670
+ "loss": 0.0368,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 0.35374149659863946,
675
+ "grad_norm": 0.04765067622065544,
676
+ "learning_rate": 0.00018441358800701273,
677
+ "loss": 0.0342,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 0.3576287657920311,
682
+ "grad_norm": 0.05586829036474228,
683
+ "learning_rate": 0.00018199494461156203,
684
+ "loss": 0.0383,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 0.36151603498542273,
689
+ "grad_norm": 0.05112822353839874,
690
+ "learning_rate": 0.000179567554117722,
691
+ "loss": 0.0457,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 0.3654033041788144,
696
+ "grad_norm": 0.06726202368736267,
697
+ "learning_rate": 0.00017713208014981648,
698
+ "loss": 0.0575,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 0.369290573372206,
703
+ "grad_norm": 0.11716607213020325,
704
+ "learning_rate": 0.00017468918854211007,
705
+ "loss": 0.0752,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 0.37317784256559766,
710
+ "grad_norm": 0.12760676443576813,
711
+ "learning_rate": 0.00017223954715677627,
712
+ "loss": 0.1276,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 0.3770651117589893,
717
+ "grad_norm": 0.14696328341960907,
718
+ "learning_rate": 0.00016978382570131034,
719
+ "loss": 0.14,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 0.38095238095238093,
724
+ "grad_norm": 0.16512975096702576,
725
+ "learning_rate": 0.00016732269554543794,
726
+ "loss": 0.2012,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 0.3848396501457726,
731
+ "grad_norm": 0.22031673789024353,
732
+ "learning_rate": 0.00016485682953756942,
733
+ "loss": 0.2332,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 0.38872691933916426,
738
+ "grad_norm": 0.332925021648407,
739
+ "learning_rate": 0.00016238690182084986,
740
+ "loss": 0.394,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 0.38872691933916426,
745
+ "eval_loss": 0.08561168611049652,
746
+ "eval_runtime": 0.7609,
747
+ "eval_samples_per_second": 65.715,
748
+ "eval_steps_per_second": 2.629,
749
+ "step": 100
750
  }
751
  ],
752
  "logging_steps": 1,
 
775
  "attributes": {}
776
  }
777
  },
778
+ "total_flos": 2.328842228371292e+17,
779
  "train_batch_size": 8,
780
  "trial_name": null,
781
  "trial_params": null