bbytxt's picture
Training in progress, step 200, checkpoint
c87c705 verified
{
"best_metric": 2.242912530899048,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.07110479068527242,
"eval_steps": 25,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0003555239534263621,
"grad_norm": 5.148736953735352,
"learning_rate": 2.9999999999999997e-05,
"loss": 10.6832,
"step": 1
},
{
"epoch": 0.0003555239534263621,
"eval_loss": 4.129520893096924,
"eval_runtime": 3.3586,
"eval_samples_per_second": 14.887,
"eval_steps_per_second": 2.084,
"step": 1
},
{
"epoch": 0.0007110479068527242,
"grad_norm": 9.437398910522461,
"learning_rate": 5.9999999999999995e-05,
"loss": 13.3188,
"step": 2
},
{
"epoch": 0.0010665718602790863,
"grad_norm": 11.877067565917969,
"learning_rate": 8.999999999999999e-05,
"loss": 13.3741,
"step": 3
},
{
"epoch": 0.0014220958137054485,
"grad_norm": 10.118707656860352,
"learning_rate": 0.00011999999999999999,
"loss": 15.4794,
"step": 4
},
{
"epoch": 0.0017776197671318106,
"grad_norm": 10.598353385925293,
"learning_rate": 0.00015,
"loss": 14.2279,
"step": 5
},
{
"epoch": 0.0021331437205581727,
"grad_norm": 10.02908706665039,
"learning_rate": 0.00017999999999999998,
"loss": 14.693,
"step": 6
},
{
"epoch": 0.002488667673984535,
"grad_norm": 9.211532592773438,
"learning_rate": 0.00020999999999999998,
"loss": 13.2508,
"step": 7
},
{
"epoch": 0.002844191627410897,
"grad_norm": 7.770359039306641,
"learning_rate": 0.00023999999999999998,
"loss": 11.5868,
"step": 8
},
{
"epoch": 0.003199715580837259,
"grad_norm": 7.963329315185547,
"learning_rate": 0.00027,
"loss": 11.9802,
"step": 9
},
{
"epoch": 0.003555239534263621,
"grad_norm": 7.712958335876465,
"learning_rate": 0.0003,
"loss": 11.9173,
"step": 10
},
{
"epoch": 0.003910763487689983,
"grad_norm": 9.166488647460938,
"learning_rate": 0.0002999794957488703,
"loss": 10.7235,
"step": 11
},
{
"epoch": 0.004266287441116345,
"grad_norm": 7.82598352432251,
"learning_rate": 0.0002999179886011389,
"loss": 11.9476,
"step": 12
},
{
"epoch": 0.0046218113945427075,
"grad_norm": 6.442850589752197,
"learning_rate": 0.0002998154953722457,
"loss": 11.8718,
"step": 13
},
{
"epoch": 0.00497733534796907,
"grad_norm": 6.2762250900268555,
"learning_rate": 0.00029967204408281613,
"loss": 11.2581,
"step": 14
},
{
"epoch": 0.005332859301395432,
"grad_norm": 6.239474296569824,
"learning_rate": 0.00029948767395100045,
"loss": 11.5523,
"step": 15
},
{
"epoch": 0.005688383254821794,
"grad_norm": 6.828975200653076,
"learning_rate": 0.0002992624353817517,
"loss": 10.8527,
"step": 16
},
{
"epoch": 0.006043907208248156,
"grad_norm": 5.866279125213623,
"learning_rate": 0.0002989963899530457,
"loss": 10.2987,
"step": 17
},
{
"epoch": 0.006399431161674518,
"grad_norm": 6.213174819946289,
"learning_rate": 0.00029868961039904624,
"loss": 11.1893,
"step": 18
},
{
"epoch": 0.00675495511510088,
"grad_norm": 6.3075947761535645,
"learning_rate": 0.00029834218059022024,
"loss": 11.7585,
"step": 19
},
{
"epoch": 0.007110479068527242,
"grad_norm": 5.69662618637085,
"learning_rate": 0.00029795419551040833,
"loss": 10.9056,
"step": 20
},
{
"epoch": 0.0074660030219536044,
"grad_norm": 5.039904594421387,
"learning_rate": 0.00029752576123085736,
"loss": 10.9972,
"step": 21
},
{
"epoch": 0.007821526975379967,
"grad_norm": 7.389419078826904,
"learning_rate": 0.0002970569948812214,
"loss": 10.7294,
"step": 22
},
{
"epoch": 0.008177050928806329,
"grad_norm": 5.38754940032959,
"learning_rate": 0.0002965480246175399,
"loss": 11.4038,
"step": 23
},
{
"epoch": 0.00853257488223269,
"grad_norm": 4.84633207321167,
"learning_rate": 0.0002959989895872009,
"loss": 11.3331,
"step": 24
},
{
"epoch": 0.008888098835659053,
"grad_norm": 8.040166854858398,
"learning_rate": 0.0002954100398908995,
"loss": 9.1027,
"step": 25
},
{
"epoch": 0.008888098835659053,
"eval_loss": 2.6589479446411133,
"eval_runtime": 3.4083,
"eval_samples_per_second": 14.67,
"eval_steps_per_second": 2.054,
"step": 25
},
{
"epoch": 0.009243622789085415,
"grad_norm": 6.918544292449951,
"learning_rate": 0.0002947813365416023,
"loss": 8.9439,
"step": 26
},
{
"epoch": 0.009599146742511777,
"grad_norm": 7.847255229949951,
"learning_rate": 0.0002941130514205272,
"loss": 11.8663,
"step": 27
},
{
"epoch": 0.00995467069593814,
"grad_norm": 7.098354816436768,
"learning_rate": 0.0002934053672301536,
"loss": 10.7316,
"step": 28
},
{
"epoch": 0.010310194649364501,
"grad_norm": 6.682184219360352,
"learning_rate": 0.00029265847744427303,
"loss": 11.5576,
"step": 29
},
{
"epoch": 0.010665718602790863,
"grad_norm": 6.960466384887695,
"learning_rate": 0.00029187258625509513,
"loss": 12.3166,
"step": 30
},
{
"epoch": 0.011021242556217226,
"grad_norm": 5.996150493621826,
"learning_rate": 0.00029104790851742417,
"loss": 10.4602,
"step": 31
},
{
"epoch": 0.011376766509643588,
"grad_norm": 4.758652687072754,
"learning_rate": 0.0002901846696899191,
"loss": 10.4963,
"step": 32
},
{
"epoch": 0.01173229046306995,
"grad_norm": 7.130350112915039,
"learning_rate": 0.00028928310577345606,
"loss": 10.7115,
"step": 33
},
{
"epoch": 0.012087814416496312,
"grad_norm": 6.909444332122803,
"learning_rate": 0.0002883434632466077,
"loss": 11.7185,
"step": 34
},
{
"epoch": 0.012443338369922674,
"grad_norm": 5.900872230529785,
"learning_rate": 0.00028736599899825856,
"loss": 11.569,
"step": 35
},
{
"epoch": 0.012798862323349036,
"grad_norm": 6.807727336883545,
"learning_rate": 0.00028635098025737434,
"loss": 11.2401,
"step": 36
},
{
"epoch": 0.013154386276775398,
"grad_norm": 5.605060577392578,
"learning_rate": 0.00028529868451994384,
"loss": 10.745,
"step": 37
},
{
"epoch": 0.01350991023020176,
"grad_norm": 6.011505126953125,
"learning_rate": 0.0002842093994731145,
"loss": 11.2352,
"step": 38
},
{
"epoch": 0.013865434183628123,
"grad_norm": 5.524464130401611,
"learning_rate": 0.00028308342291654174,
"loss": 11.7443,
"step": 39
},
{
"epoch": 0.014220958137054485,
"grad_norm": 6.286143779754639,
"learning_rate": 0.00028192106268097334,
"loss": 11.7776,
"step": 40
},
{
"epoch": 0.014576482090480847,
"grad_norm": 6.927269458770752,
"learning_rate": 0.00028072263654409154,
"loss": 11.5985,
"step": 41
},
{
"epoch": 0.014932006043907209,
"grad_norm": 6.525967121124268,
"learning_rate": 0.0002794884721436361,
"loss": 10.8744,
"step": 42
},
{
"epoch": 0.015287529997333571,
"grad_norm": 8.965846061706543,
"learning_rate": 0.00027821890688783083,
"loss": 11.2533,
"step": 43
},
{
"epoch": 0.015643053950759933,
"grad_norm": 5.708492755889893,
"learning_rate": 0.0002769142878631403,
"loss": 11.8151,
"step": 44
},
{
"epoch": 0.015998577904186295,
"grad_norm": 6.520906925201416,
"learning_rate": 0.00027557497173937923,
"loss": 12.0673,
"step": 45
},
{
"epoch": 0.016354101857612657,
"grad_norm": 6.278893947601318,
"learning_rate": 0.000274201324672203,
"loss": 11.4272,
"step": 46
},
{
"epoch": 0.01670962581103902,
"grad_norm": 6.297929763793945,
"learning_rate": 0.00027279372220300385,
"loss": 12.42,
"step": 47
},
{
"epoch": 0.01706514976446538,
"grad_norm": 6.474391460418701,
"learning_rate": 0.0002713525491562421,
"loss": 12.3248,
"step": 48
},
{
"epoch": 0.017420673717891744,
"grad_norm": 7.879404067993164,
"learning_rate": 0.00026987819953423867,
"loss": 11.704,
"step": 49
},
{
"epoch": 0.017776197671318106,
"grad_norm": 11.09743595123291,
"learning_rate": 0.00026837107640945905,
"loss": 11.4028,
"step": 50
},
{
"epoch": 0.017776197671318106,
"eval_loss": 2.671022653579712,
"eval_runtime": 3.4117,
"eval_samples_per_second": 14.655,
"eval_steps_per_second": 2.052,
"step": 50
},
{
"epoch": 0.018131721624744468,
"grad_norm": 7.684964179992676,
"learning_rate": 0.0002668315918143169,
"loss": 9.6814,
"step": 51
},
{
"epoch": 0.01848724557817083,
"grad_norm": 8.576638221740723,
"learning_rate": 0.00026526016662852886,
"loss": 9.9166,
"step": 52
},
{
"epoch": 0.018842769531597192,
"grad_norm": 6.030575275421143,
"learning_rate": 0.00026365723046405023,
"loss": 9.4847,
"step": 53
},
{
"epoch": 0.019198293485023554,
"grad_norm": 5.481552600860596,
"learning_rate": 0.0002620232215476231,
"loss": 10.3622,
"step": 54
},
{
"epoch": 0.019553817438449916,
"grad_norm": 6.241498947143555,
"learning_rate": 0.0002603585866009697,
"loss": 10.2548,
"step": 55
},
{
"epoch": 0.01990934139187628,
"grad_norm": 6.387746334075928,
"learning_rate": 0.00025866378071866334,
"loss": 11.7413,
"step": 56
},
{
"epoch": 0.02026486534530264,
"grad_norm": 5.217421054840088,
"learning_rate": 0.00025693926724370956,
"loss": 10.0964,
"step": 57
},
{
"epoch": 0.020620389298729003,
"grad_norm": 5.437652587890625,
"learning_rate": 0.00025518551764087326,
"loss": 10.6227,
"step": 58
},
{
"epoch": 0.020975913252155365,
"grad_norm": 5.892682075500488,
"learning_rate": 0.00025340301136778483,
"loss": 10.518,
"step": 59
},
{
"epoch": 0.021331437205581727,
"grad_norm": 5.065958499908447,
"learning_rate": 0.00025159223574386114,
"loss": 10.1033,
"step": 60
},
{
"epoch": 0.02168696115900809,
"grad_norm": 4.1559953689575195,
"learning_rate": 0.0002497536858170772,
"loss": 10.8079,
"step": 61
},
{
"epoch": 0.02204248511243445,
"grad_norm": 4.143509387969971,
"learning_rate": 0.00024788786422862526,
"loss": 10.3888,
"step": 62
},
{
"epoch": 0.022398009065860813,
"grad_norm": 4.2139363288879395,
"learning_rate": 0.00024599528107549745,
"loss": 10.6368,
"step": 63
},
{
"epoch": 0.022753533019287175,
"grad_norm": 4.801663398742676,
"learning_rate": 0.00024407645377103054,
"loss": 9.8445,
"step": 64
},
{
"epoch": 0.023109056972713538,
"grad_norm": 5.6707024574279785,
"learning_rate": 0.00024213190690345018,
"loss": 9.8583,
"step": 65
},
{
"epoch": 0.0234645809261399,
"grad_norm": 4.797868728637695,
"learning_rate": 0.00024016217209245374,
"loss": 10.7598,
"step": 66
},
{
"epoch": 0.023820104879566262,
"grad_norm": 4.8325371742248535,
"learning_rate": 0.00023816778784387094,
"loss": 9.2898,
"step": 67
},
{
"epoch": 0.024175628832992624,
"grad_norm": 4.890848159790039,
"learning_rate": 0.0002361492994024415,
"loss": 10.3457,
"step": 68
},
{
"epoch": 0.024531152786418986,
"grad_norm": 7.144769191741943,
"learning_rate": 0.0002341072586027509,
"loss": 9.929,
"step": 69
},
{
"epoch": 0.024886676739845348,
"grad_norm": 5.622339248657227,
"learning_rate": 0.00023204222371836405,
"loss": 9.5537,
"step": 70
},
{
"epoch": 0.02524220069327171,
"grad_norm": 8.047707557678223,
"learning_rate": 0.00022995475930919905,
"loss": 10.8773,
"step": 71
},
{
"epoch": 0.025597724646698072,
"grad_norm": 5.305599689483643,
"learning_rate": 0.00022784543606718227,
"loss": 10.2004,
"step": 72
},
{
"epoch": 0.025953248600124434,
"grad_norm": 5.336524963378906,
"learning_rate": 0.00022571483066022657,
"loss": 9.5412,
"step": 73
},
{
"epoch": 0.026308772553550797,
"grad_norm": 5.118494987487793,
"learning_rate": 0.0002235635255745762,
"loss": 9.6813,
"step": 74
},
{
"epoch": 0.02666429650697716,
"grad_norm": 4.906371116638184,
"learning_rate": 0.00022139210895556104,
"loss": 10.4721,
"step": 75
},
{
"epoch": 0.02666429650697716,
"eval_loss": 2.505891799926758,
"eval_runtime": 3.4096,
"eval_samples_per_second": 14.665,
"eval_steps_per_second": 2.053,
"step": 75
},
{
"epoch": 0.02701982046040352,
"grad_norm": 4.981451511383057,
"learning_rate": 0.00021920117444680317,
"loss": 8.8563,
"step": 76
},
{
"epoch": 0.027375344413829883,
"grad_norm": 4.70904016494751,
"learning_rate": 0.00021699132102792097,
"loss": 10.5626,
"step": 77
},
{
"epoch": 0.027730868367256245,
"grad_norm": 5.6545233726501465,
"learning_rate": 0.0002147631528507739,
"loss": 11.0533,
"step": 78
},
{
"epoch": 0.028086392320682607,
"grad_norm": 5.137275218963623,
"learning_rate": 0.00021251727907429355,
"loss": 10.8668,
"step": 79
},
{
"epoch": 0.02844191627410897,
"grad_norm": 5.4811553955078125,
"learning_rate": 0.0002102543136979454,
"loss": 10.9318,
"step": 80
},
{
"epoch": 0.02879744022753533,
"grad_norm": 5.466274261474609,
"learning_rate": 0.0002079748753938678,
"loss": 10.401,
"step": 81
},
{
"epoch": 0.029152964180961694,
"grad_norm": 5.636516094207764,
"learning_rate": 0.0002056795873377331,
"loss": 10.0704,
"step": 82
},
{
"epoch": 0.029508488134388056,
"grad_norm": 5.315571308135986,
"learning_rate": 0.00020336907703837748,
"loss": 11.0001,
"step": 83
},
{
"epoch": 0.029864012087814418,
"grad_norm": 6.53507661819458,
"learning_rate": 0.00020104397616624645,
"loss": 11.992,
"step": 84
},
{
"epoch": 0.03021953604124078,
"grad_norm": 5.709775924682617,
"learning_rate": 0.00019870492038070252,
"loss": 10.4158,
"step": 85
},
{
"epoch": 0.030575059994667142,
"grad_norm": 5.086690425872803,
"learning_rate": 0.0001963525491562421,
"loss": 10.1491,
"step": 86
},
{
"epoch": 0.030930583948093504,
"grad_norm": 5.827716827392578,
"learning_rate": 0.0001939875056076697,
"loss": 11.3729,
"step": 87
},
{
"epoch": 0.031286107901519866,
"grad_norm": 5.376121520996094,
"learning_rate": 0.00019161043631427666,
"loss": 10.2509,
"step": 88
},
{
"epoch": 0.03164163185494623,
"grad_norm": 5.2902445793151855,
"learning_rate": 0.00018922199114307294,
"loss": 9.9306,
"step": 89
},
{
"epoch": 0.03199715580837259,
"grad_norm": 6.116251468658447,
"learning_rate": 0.00018682282307111987,
"loss": 11.4962,
"step": 90
},
{
"epoch": 0.03235267976179895,
"grad_norm": 5.817269325256348,
"learning_rate": 0.00018441358800701273,
"loss": 10.5293,
"step": 91
},
{
"epoch": 0.032708203715225315,
"grad_norm": 5.698713779449463,
"learning_rate": 0.00018199494461156203,
"loss": 9.9072,
"step": 92
},
{
"epoch": 0.03306372766865168,
"grad_norm": 7.766204357147217,
"learning_rate": 0.000179567554117722,
"loss": 10.4333,
"step": 93
},
{
"epoch": 0.03341925162207804,
"grad_norm": 6.0432939529418945,
"learning_rate": 0.00017713208014981648,
"loss": 11.2163,
"step": 94
},
{
"epoch": 0.0337747755755044,
"grad_norm": 6.411512851715088,
"learning_rate": 0.00017468918854211007,
"loss": 11.3715,
"step": 95
},
{
"epoch": 0.03413029952893076,
"grad_norm": 6.964004039764404,
"learning_rate": 0.00017223954715677627,
"loss": 10.0495,
"step": 96
},
{
"epoch": 0.034485823482357125,
"grad_norm": 5.977689743041992,
"learning_rate": 0.00016978382570131034,
"loss": 10.9753,
"step": 97
},
{
"epoch": 0.03484134743578349,
"grad_norm": 6.7813825607299805,
"learning_rate": 0.00016732269554543794,
"loss": 9.5063,
"step": 98
},
{
"epoch": 0.03519687138920985,
"grad_norm": 6.9034318923950195,
"learning_rate": 0.00016485682953756942,
"loss": 10.1179,
"step": 99
},
{
"epoch": 0.03555239534263621,
"grad_norm": 10.123339653015137,
"learning_rate": 0.00016238690182084986,
"loss": 9.7543,
"step": 100
},
{
"epoch": 0.03555239534263621,
"eval_loss": 2.6253039836883545,
"eval_runtime": 3.4096,
"eval_samples_per_second": 14.665,
"eval_steps_per_second": 2.053,
"step": 100
},
{
"epoch": 0.035907919296062574,
"grad_norm": 8.50604248046875,
"learning_rate": 0.0001599135876488549,
"loss": 10.522,
"step": 101
},
{
"epoch": 0.036263443249488936,
"grad_norm": 10.837066650390625,
"learning_rate": 0.00015743756320098332,
"loss": 8.8138,
"step": 102
},
{
"epoch": 0.0366189672029153,
"grad_norm": 6.720249176025391,
"learning_rate": 0.0001549595053975962,
"loss": 7.2665,
"step": 103
},
{
"epoch": 0.03697449115634166,
"grad_norm": 6.120709419250488,
"learning_rate": 0.00015248009171495378,
"loss": 10.0905,
"step": 104
},
{
"epoch": 0.03733001510976802,
"grad_norm": 5.360714912414551,
"learning_rate": 0.00015,
"loss": 10.3304,
"step": 105
},
{
"epoch": 0.037685539063194384,
"grad_norm": 6.324580669403076,
"learning_rate": 0.00014751990828504622,
"loss": 11.3479,
"step": 106
},
{
"epoch": 0.038041063016620746,
"grad_norm": 5.683630466461182,
"learning_rate": 0.00014504049460240375,
"loss": 10.5964,
"step": 107
},
{
"epoch": 0.03839658697004711,
"grad_norm": 4.51971435546875,
"learning_rate": 0.00014256243679901663,
"loss": 9.617,
"step": 108
},
{
"epoch": 0.03875211092347347,
"grad_norm": 6.3022565841674805,
"learning_rate": 0.00014008641235114508,
"loss": 10.4929,
"step": 109
},
{
"epoch": 0.03910763487689983,
"grad_norm": 6.827676296234131,
"learning_rate": 0.00013761309817915014,
"loss": 10.6284,
"step": 110
},
{
"epoch": 0.039463158830326195,
"grad_norm": 5.795304298400879,
"learning_rate": 0.00013514317046243058,
"loss": 9.0099,
"step": 111
},
{
"epoch": 0.03981868278375256,
"grad_norm": 5.27096700668335,
"learning_rate": 0.00013267730445456208,
"loss": 9.5003,
"step": 112
},
{
"epoch": 0.04017420673717892,
"grad_norm": 5.735016345977783,
"learning_rate": 0.00013021617429868963,
"loss": 10.5266,
"step": 113
},
{
"epoch": 0.04052973069060528,
"grad_norm": 5.17439079284668,
"learning_rate": 0.00012776045284322368,
"loss": 9.5267,
"step": 114
},
{
"epoch": 0.04088525464403164,
"grad_norm": 5.072749614715576,
"learning_rate": 0.00012531081145788987,
"loss": 9.9992,
"step": 115
},
{
"epoch": 0.041240778597458005,
"grad_norm": 4.426019191741943,
"learning_rate": 0.00012286791985018355,
"loss": 8.5191,
"step": 116
},
{
"epoch": 0.04159630255088437,
"grad_norm": 5.0171027183532715,
"learning_rate": 0.00012043244588227796,
"loss": 10.0848,
"step": 117
},
{
"epoch": 0.04195182650431073,
"grad_norm": 5.059338092803955,
"learning_rate": 0.00011800505538843798,
"loss": 9.6589,
"step": 118
},
{
"epoch": 0.04230735045773709,
"grad_norm": 4.898255825042725,
"learning_rate": 0.00011558641199298727,
"loss": 9.9281,
"step": 119
},
{
"epoch": 0.042662874411163454,
"grad_norm": 4.372053146362305,
"learning_rate": 0.00011317717692888012,
"loss": 9.323,
"step": 120
},
{
"epoch": 0.043018398364589816,
"grad_norm": 5.065223693847656,
"learning_rate": 0.00011077800885692702,
"loss": 11.5742,
"step": 121
},
{
"epoch": 0.04337392231801618,
"grad_norm": 5.381391525268555,
"learning_rate": 0.00010838956368572334,
"loss": 10.2001,
"step": 122
},
{
"epoch": 0.04372944627144254,
"grad_norm": 4.845911026000977,
"learning_rate": 0.0001060124943923303,
"loss": 9.9141,
"step": 123
},
{
"epoch": 0.0440849702248689,
"grad_norm": 5.539061069488525,
"learning_rate": 0.0001036474508437579,
"loss": 9.1523,
"step": 124
},
{
"epoch": 0.044440494178295264,
"grad_norm": 4.919478893280029,
"learning_rate": 0.00010129507961929748,
"loss": 8.4648,
"step": 125
},
{
"epoch": 0.044440494178295264,
"eval_loss": 2.34893536567688,
"eval_runtime": 3.4101,
"eval_samples_per_second": 14.662,
"eval_steps_per_second": 2.053,
"step": 125
},
{
"epoch": 0.04479601813172163,
"grad_norm": 5.182336807250977,
"learning_rate": 9.895602383375353e-05,
"loss": 9.3801,
"step": 126
},
{
"epoch": 0.04515154208514799,
"grad_norm": 6.5174994468688965,
"learning_rate": 9.663092296162251e-05,
"loss": 9.7324,
"step": 127
},
{
"epoch": 0.04550706603857435,
"grad_norm": 6.033017635345459,
"learning_rate": 9.432041266226686e-05,
"loss": 9.4699,
"step": 128
},
{
"epoch": 0.04586258999200071,
"grad_norm": 5.28855562210083,
"learning_rate": 9.202512460613219e-05,
"loss": 10.3632,
"step": 129
},
{
"epoch": 0.046218113945427075,
"grad_norm": 5.92414665222168,
"learning_rate": 8.97456863020546e-05,
"loss": 10.1867,
"step": 130
},
{
"epoch": 0.04657363789885344,
"grad_norm": 5.728801250457764,
"learning_rate": 8.748272092570646e-05,
"loss": 9.5754,
"step": 131
},
{
"epoch": 0.0469291618522798,
"grad_norm": 5.550567150115967,
"learning_rate": 8.523684714922608e-05,
"loss": 10.5313,
"step": 132
},
{
"epoch": 0.04728468580570616,
"grad_norm": 5.983530521392822,
"learning_rate": 8.300867897207903e-05,
"loss": 8.9917,
"step": 133
},
{
"epoch": 0.047640209759132524,
"grad_norm": 6.547121047973633,
"learning_rate": 8.079882555319684e-05,
"loss": 10.6116,
"step": 134
},
{
"epoch": 0.047995733712558886,
"grad_norm": 6.877899169921875,
"learning_rate": 7.860789104443896e-05,
"loss": 10.4791,
"step": 135
},
{
"epoch": 0.04835125766598525,
"grad_norm": 6.698514461517334,
"learning_rate": 7.643647442542382e-05,
"loss": 10.5741,
"step": 136
},
{
"epoch": 0.04870678161941161,
"grad_norm": 5.82236385345459,
"learning_rate": 7.428516933977347e-05,
"loss": 10.0061,
"step": 137
},
{
"epoch": 0.04906230557283797,
"grad_norm": 7.110109806060791,
"learning_rate": 7.215456393281776e-05,
"loss": 11.2455,
"step": 138
},
{
"epoch": 0.049417829526264334,
"grad_norm": 5.919317245483398,
"learning_rate": 7.004524069080096e-05,
"loss": 10.2149,
"step": 139
},
{
"epoch": 0.049773353479690696,
"grad_norm": 5.82124662399292,
"learning_rate": 6.795777628163599e-05,
"loss": 11.2277,
"step": 140
},
{
"epoch": 0.05012887743311706,
"grad_norm": 5.727226734161377,
"learning_rate": 6.58927413972491e-05,
"loss": 10.3604,
"step": 141
},
{
"epoch": 0.05048440138654342,
"grad_norm": 6.384329795837402,
"learning_rate": 6.385070059755846e-05,
"loss": 10.9996,
"step": 142
},
{
"epoch": 0.05083992533996978,
"grad_norm": 7.032529354095459,
"learning_rate": 6.183221215612904e-05,
"loss": 9.4377,
"step": 143
},
{
"epoch": 0.051195449293396145,
"grad_norm": 6.148966312408447,
"learning_rate": 5.983782790754623e-05,
"loss": 9.4402,
"step": 144
},
{
"epoch": 0.05155097324682251,
"grad_norm": 7.128761291503906,
"learning_rate": 5.786809309654982e-05,
"loss": 9.2494,
"step": 145
},
{
"epoch": 0.05190649720024887,
"grad_norm": 5.945568084716797,
"learning_rate": 5.592354622896944e-05,
"loss": 9.6517,
"step": 146
},
{
"epoch": 0.05226202115367523,
"grad_norm": 6.259884834289551,
"learning_rate": 5.40047189245025e-05,
"loss": 9.2707,
"step": 147
},
{
"epoch": 0.05261754510710159,
"grad_norm": 6.439622402191162,
"learning_rate": 5.211213577137469e-05,
"loss": 9.4341,
"step": 148
},
{
"epoch": 0.052973069060527955,
"grad_norm": 6.414124011993408,
"learning_rate": 5.024631418292274e-05,
"loss": 9.6553,
"step": 149
},
{
"epoch": 0.05332859301395432,
"grad_norm": 9.953978538513184,
"learning_rate": 4.840776425613886e-05,
"loss": 6.9357,
"step": 150
},
{
"epoch": 0.05332859301395432,
"eval_loss": 2.314272403717041,
"eval_runtime": 3.4086,
"eval_samples_per_second": 14.669,
"eval_steps_per_second": 2.054,
"step": 150
},
{
"epoch": 0.05368411696738068,
"grad_norm": 8.713624954223633,
"learning_rate": 4.659698863221513e-05,
"loss": 8.3145,
"step": 151
},
{
"epoch": 0.05403964092080704,
"grad_norm": 7.966327667236328,
"learning_rate": 4.481448235912671e-05,
"loss": 8.6993,
"step": 152
},
{
"epoch": 0.054395164874233404,
"grad_norm": 6.538430690765381,
"learning_rate": 4.306073275629044e-05,
"loss": 7.6628,
"step": 153
},
{
"epoch": 0.054750688827659766,
"grad_norm": 8.117944717407227,
"learning_rate": 4.133621928133665e-05,
"loss": 9.9395,
"step": 154
},
{
"epoch": 0.05510621278108613,
"grad_norm": 7.285843849182129,
"learning_rate": 3.964141339903026e-05,
"loss": 9.6924,
"step": 155
},
{
"epoch": 0.05546173673451249,
"grad_norm": 7.61773681640625,
"learning_rate": 3.797677845237696e-05,
"loss": 10.1125,
"step": 156
},
{
"epoch": 0.05581726068793885,
"grad_norm": 5.755801200866699,
"learning_rate": 3.634276953594982e-05,
"loss": 9.539,
"step": 157
},
{
"epoch": 0.056172784641365214,
"grad_norm": 5.638586521148682,
"learning_rate": 3.473983337147118e-05,
"loss": 9.2763,
"step": 158
},
{
"epoch": 0.056528308594791576,
"grad_norm": 5.6890549659729,
"learning_rate": 3.316840818568315e-05,
"loss": 9.0826,
"step": 159
},
{
"epoch": 0.05688383254821794,
"grad_norm": 5.357155799865723,
"learning_rate": 3.162892359054098e-05,
"loss": 10.1697,
"step": 160
},
{
"epoch": 0.0572393565016443,
"grad_norm": 5.9341630935668945,
"learning_rate": 3.0121800465761293e-05,
"loss": 9.6203,
"step": 161
},
{
"epoch": 0.05759488045507066,
"grad_norm": 5.837774753570557,
"learning_rate": 2.8647450843757897e-05,
"loss": 10.0702,
"step": 162
},
{
"epoch": 0.057950404408497025,
"grad_norm": 5.2682785987854,
"learning_rate": 2.7206277796996144e-05,
"loss": 9.561,
"step": 163
},
{
"epoch": 0.05830592836192339,
"grad_norm": 5.310613632202148,
"learning_rate": 2.5798675327796993e-05,
"loss": 9.7458,
"step": 164
},
{
"epoch": 0.05866145231534975,
"grad_norm": 5.741307258605957,
"learning_rate": 2.4425028260620715e-05,
"loss": 10.4844,
"step": 165
},
{
"epoch": 0.05901697626877611,
"grad_norm": 6.544122219085693,
"learning_rate": 2.3085712136859668e-05,
"loss": 8.8046,
"step": 166
},
{
"epoch": 0.05937250022220247,
"grad_norm": 6.212630271911621,
"learning_rate": 2.178109311216913e-05,
"loss": 10.0538,
"step": 167
},
{
"epoch": 0.059728024175628835,
"grad_norm": 5.299189567565918,
"learning_rate": 2.0511527856363912e-05,
"loss": 10.2602,
"step": 168
},
{
"epoch": 0.0600835481290552,
"grad_norm": 5.7751145362854,
"learning_rate": 1.927736345590839e-05,
"loss": 9.5055,
"step": 169
},
{
"epoch": 0.06043907208248156,
"grad_norm": 5.401315212249756,
"learning_rate": 1.8078937319026654e-05,
"loss": 10.03,
"step": 170
},
{
"epoch": 0.06079459603590792,
"grad_norm": 6.487131118774414,
"learning_rate": 1.6916577083458228e-05,
"loss": 10.216,
"step": 171
},
{
"epoch": 0.061150119989334284,
"grad_norm": 5.753161907196045,
"learning_rate": 1.579060052688548e-05,
"loss": 9.377,
"step": 172
},
{
"epoch": 0.061505643942760646,
"grad_norm": 6.108590602874756,
"learning_rate": 1.4701315480056164e-05,
"loss": 9.7962,
"step": 173
},
{
"epoch": 0.06186116789618701,
"grad_norm": 6.228653907775879,
"learning_rate": 1.3649019742625623e-05,
"loss": 9.6701,
"step": 174
},
{
"epoch": 0.06221669184961337,
"grad_norm": 4.956625461578369,
"learning_rate": 1.2634001001741373e-05,
"loss": 8.4442,
"step": 175
},
{
"epoch": 0.06221669184961337,
"eval_loss": 2.2359719276428223,
"eval_runtime": 3.4056,
"eval_samples_per_second": 14.682,
"eval_steps_per_second": 2.055,
"step": 175
},
{
"epoch": 0.06257221580303973,
"grad_norm": 5.659938335418701,
"learning_rate": 1.1656536753392287e-05,
"loss": 9.1614,
"step": 176
},
{
"epoch": 0.0629277397564661,
"grad_norm": 5.632331371307373,
"learning_rate": 1.0716894226543953e-05,
"loss": 9.2837,
"step": 177
},
{
"epoch": 0.06328326370989246,
"grad_norm": 6.158392906188965,
"learning_rate": 9.815330310080887e-06,
"loss": 10.6873,
"step": 178
},
{
"epoch": 0.06363878766331882,
"grad_norm": 5.655581951141357,
"learning_rate": 8.952091482575824e-06,
"loss": 9.3116,
"step": 179
},
{
"epoch": 0.06399431161674518,
"grad_norm": 5.882752895355225,
"learning_rate": 8.127413744904804e-06,
"loss": 9.7047,
"step": 180
},
{
"epoch": 0.06434983557017154,
"grad_norm": 6.070702075958252,
"learning_rate": 7.34152255572697e-06,
"loss": 10.2734,
"step": 181
},
{
"epoch": 0.0647053595235979,
"grad_norm": 6.517852306365967,
"learning_rate": 6.594632769846353e-06,
"loss": 10.6647,
"step": 182
},
{
"epoch": 0.06506088347702427,
"grad_norm": 6.227158069610596,
"learning_rate": 5.886948579472778e-06,
"loss": 9.2133,
"step": 183
},
{
"epoch": 0.06541640743045063,
"grad_norm": 5.783818244934082,
"learning_rate": 5.218663458397715e-06,
"loss": 10.3477,
"step": 184
},
{
"epoch": 0.06577193138387699,
"grad_norm": 5.751105308532715,
"learning_rate": 4.589960109100444e-06,
"loss": 9.6947,
"step": 185
},
{
"epoch": 0.06612745533730335,
"grad_norm": 5.619855880737305,
"learning_rate": 4.001010412799138e-06,
"loss": 10.0627,
"step": 186
},
{
"epoch": 0.06648297929072972,
"grad_norm": 5.333779335021973,
"learning_rate": 3.451975382460109e-06,
"loss": 9.7599,
"step": 187
},
{
"epoch": 0.06683850324415608,
"grad_norm": 6.2255988121032715,
"learning_rate": 2.9430051187785962e-06,
"loss": 10.7451,
"step": 188
},
{
"epoch": 0.06719402719758244,
"grad_norm": 6.961033344268799,
"learning_rate": 2.4742387691426445e-06,
"loss": 10.2383,
"step": 189
},
{
"epoch": 0.0675495511510088,
"grad_norm": 6.666347026824951,
"learning_rate": 2.0458044895916513e-06,
"loss": 10.132,
"step": 190
},
{
"epoch": 0.06790507510443516,
"grad_norm": 7.37328577041626,
"learning_rate": 1.6578194097797258e-06,
"loss": 10.2189,
"step": 191
},
{
"epoch": 0.06826059905786153,
"grad_norm": 7.116367340087891,
"learning_rate": 1.3103896009537207e-06,
"loss": 9.9206,
"step": 192
},
{
"epoch": 0.06861612301128789,
"grad_norm": 6.1720404624938965,
"learning_rate": 1.0036100469542786e-06,
"loss": 10.1334,
"step": 193
},
{
"epoch": 0.06897164696471425,
"grad_norm": 9.900176048278809,
"learning_rate": 7.375646182482875e-07,
"loss": 9.7241,
"step": 194
},
{
"epoch": 0.06932717091814061,
"grad_norm": 6.8333048820495605,
"learning_rate": 5.123260489995229e-07,
"loss": 10.0152,
"step": 195
},
{
"epoch": 0.06968269487156697,
"grad_norm": 6.908794403076172,
"learning_rate": 3.2795591718381975e-07,
"loss": 10.0908,
"step": 196
},
{
"epoch": 0.07003821882499334,
"grad_norm": 7.333555698394775,
"learning_rate": 1.8450462775428942e-07,
"loss": 10.8558,
"step": 197
},
{
"epoch": 0.0703937427784197,
"grad_norm": 7.865172863006592,
"learning_rate": 8.201139886109264e-08,
"loss": 10.5989,
"step": 198
},
{
"epoch": 0.07074926673184606,
"grad_norm": 8.003969192504883,
"learning_rate": 2.0504251129649374e-08,
"loss": 9.9646,
"step": 199
},
{
"epoch": 0.07110479068527242,
"grad_norm": 9.733394622802734,
"learning_rate": 0.0,
"loss": 7.8091,
"step": 200
},
{
"epoch": 0.07110479068527242,
"eval_loss": 2.242912530899048,
"eval_runtime": 3.4081,
"eval_samples_per_second": 14.671,
"eval_steps_per_second": 2.054,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.7806688547858022e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}