yihanwang617's picture
Model save
101c539 verified
raw
history blame
24.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 732,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 9.658415693590747,
"learning_rate": 2.702702702702703e-07,
"loss": 1.4394,
"step": 1
},
{
"epoch": 0.01,
"grad_norm": 9.876292569045443,
"learning_rate": 1.3513513513513515e-06,
"loss": 1.4271,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 9.586588537619404,
"learning_rate": 2.702702702702703e-06,
"loss": 1.4014,
"step": 10
},
{
"epoch": 0.02,
"grad_norm": 3.2928588175082862,
"learning_rate": 4.0540540540540545e-06,
"loss": 1.242,
"step": 15
},
{
"epoch": 0.03,
"grad_norm": 1.7916899568159483,
"learning_rate": 5.405405405405406e-06,
"loss": 1.1312,
"step": 20
},
{
"epoch": 0.03,
"grad_norm": 0.9818778768043777,
"learning_rate": 6.7567567567567575e-06,
"loss": 1.0505,
"step": 25
},
{
"epoch": 0.04,
"grad_norm": 0.8394686945518413,
"learning_rate": 8.108108108108109e-06,
"loss": 1.0424,
"step": 30
},
{
"epoch": 0.05,
"grad_norm": 0.7551190468266619,
"learning_rate": 9.45945945945946e-06,
"loss": 1.0518,
"step": 35
},
{
"epoch": 0.05,
"grad_norm": 0.6711648718562863,
"learning_rate": 1.0810810810810812e-05,
"loss": 1.0121,
"step": 40
},
{
"epoch": 0.06,
"grad_norm": 0.6200076782701016,
"learning_rate": 1.2162162162162164e-05,
"loss": 1.057,
"step": 45
},
{
"epoch": 0.07,
"grad_norm": 0.658782644893673,
"learning_rate": 1.3513513513513515e-05,
"loss": 1.013,
"step": 50
},
{
"epoch": 0.08,
"grad_norm": 0.5356817576419274,
"learning_rate": 1.4864864864864865e-05,
"loss": 1.0,
"step": 55
},
{
"epoch": 0.08,
"grad_norm": 0.5048984401091999,
"learning_rate": 1.6216216216216218e-05,
"loss": 1.0219,
"step": 60
},
{
"epoch": 0.09,
"grad_norm": 0.517547275458162,
"learning_rate": 1.756756756756757e-05,
"loss": 1.0327,
"step": 65
},
{
"epoch": 0.1,
"grad_norm": 0.4925361764186976,
"learning_rate": 1.891891891891892e-05,
"loss": 1.0107,
"step": 70
},
{
"epoch": 0.1,
"grad_norm": 0.4945886669805455,
"learning_rate": 1.999988602302209e-05,
"loss": 1.0252,
"step": 75
},
{
"epoch": 0.11,
"grad_norm": 0.49947937008697635,
"learning_rate": 1.9995897101594454e-05,
"loss": 0.9825,
"step": 80
},
{
"epoch": 0.12,
"grad_norm": 0.48799350342765896,
"learning_rate": 1.99862119291555e-05,
"loss": 0.9848,
"step": 85
},
{
"epoch": 0.12,
"grad_norm": 0.49925057798584527,
"learning_rate": 1.997083602488702e-05,
"loss": 0.9982,
"step": 90
},
{
"epoch": 0.13,
"grad_norm": 0.533633849127977,
"learning_rate": 1.994977815088504e-05,
"loss": 1.0089,
"step": 95
},
{
"epoch": 0.14,
"grad_norm": 0.5205221053880554,
"learning_rate": 1.9923050307166655e-05,
"loss": 1.0036,
"step": 100
},
{
"epoch": 0.14,
"grad_norm": 0.4728771654069777,
"learning_rate": 1.989066772483171e-05,
"loss": 1.0057,
"step": 105
},
{
"epoch": 0.15,
"grad_norm": 0.5330378379531007,
"learning_rate": 1.9852648857383224e-05,
"loss": 1.0004,
"step": 110
},
{
"epoch": 0.16,
"grad_norm": 0.5474915550296676,
"learning_rate": 1.9809015370211505e-05,
"loss": 0.9582,
"step": 115
},
{
"epoch": 0.16,
"grad_norm": 0.5065591909160228,
"learning_rate": 1.9759792128247922e-05,
"loss": 0.9875,
"step": 120
},
{
"epoch": 0.17,
"grad_norm": 0.5041379472534961,
"learning_rate": 1.9705007181795416e-05,
"loss": 0.9839,
"step": 125
},
{
"epoch": 0.18,
"grad_norm": 0.5152712578505293,
"learning_rate": 1.964469175054377e-05,
"loss": 0.9817,
"step": 130
},
{
"epoch": 0.18,
"grad_norm": 0.5181637583730708,
"learning_rate": 1.9578880205778793e-05,
"loss": 0.9601,
"step": 135
},
{
"epoch": 0.19,
"grad_norm": 0.5082366273570456,
"learning_rate": 1.950761005079556e-05,
"loss": 0.9594,
"step": 140
},
{
"epoch": 0.2,
"grad_norm": 0.5163440396909443,
"learning_rate": 1.9430921899526786e-05,
"loss": 0.9601,
"step": 145
},
{
"epoch": 0.2,
"grad_norm": 0.5286098804973952,
"learning_rate": 1.934885945339865e-05,
"loss": 0.9603,
"step": 150
},
{
"epoch": 0.21,
"grad_norm": 0.5128368272170564,
"learning_rate": 1.9261469476427122e-05,
"loss": 0.9959,
"step": 155
},
{
"epoch": 0.22,
"grad_norm": 0.5153476529706232,
"learning_rate": 1.916880176856909e-05,
"loss": 0.9494,
"step": 160
},
{
"epoch": 0.23,
"grad_norm": 0.4950657638957343,
"learning_rate": 1.907090913734341e-05,
"loss": 0.9522,
"step": 165
},
{
"epoch": 0.23,
"grad_norm": 0.48067724327955486,
"learning_rate": 1.896784736773805e-05,
"loss": 0.9881,
"step": 170
},
{
"epoch": 0.24,
"grad_norm": 0.5010564864661735,
"learning_rate": 1.885967519042054e-05,
"loss": 0.9664,
"step": 175
},
{
"epoch": 0.25,
"grad_norm": 0.5157306256625271,
"learning_rate": 1.8746454248269777e-05,
"loss": 0.992,
"step": 180
},
{
"epoch": 0.25,
"grad_norm": 0.5051241069978706,
"learning_rate": 1.862824906124826e-05,
"loss": 0.9569,
"step": 185
},
{
"epoch": 0.26,
"grad_norm": 0.5095950283674965,
"learning_rate": 1.850512698963485e-05,
"loss": 0.9741,
"step": 190
},
{
"epoch": 0.27,
"grad_norm": 0.5227451618958809,
"learning_rate": 1.8377158195638877e-05,
"loss": 0.9331,
"step": 195
},
{
"epoch": 0.27,
"grad_norm": 0.5181101956320976,
"learning_rate": 1.8244415603417603e-05,
"loss": 0.9603,
"step": 200
},
{
"epoch": 0.28,
"grad_norm": 0.510093680653842,
"learning_rate": 1.8106974857519737e-05,
"loss": 0.9562,
"step": 205
},
{
"epoch": 0.29,
"grad_norm": 0.5408000977823474,
"learning_rate": 1.7964914279778716e-05,
"loss": 0.9525,
"step": 210
},
{
"epoch": 0.29,
"grad_norm": 0.5221005548724054,
"learning_rate": 1.78183148246803e-05,
"loss": 0.9461,
"step": 215
},
{
"epoch": 0.3,
"grad_norm": 0.5228654219110396,
"learning_rate": 1.7667260033229953e-05,
"loss": 0.93,
"step": 220
},
{
"epoch": 0.31,
"grad_norm": 0.5156616142594198,
"learning_rate": 1.751183598534625e-05,
"loss": 0.9523,
"step": 225
},
{
"epoch": 0.31,
"grad_norm": 0.4992025615094016,
"learning_rate": 1.7352131250807466e-05,
"loss": 0.9447,
"step": 230
},
{
"epoch": 0.32,
"grad_norm": 0.47695347755887807,
"learning_rate": 1.7188236838779297e-05,
"loss": 0.933,
"step": 235
},
{
"epoch": 0.33,
"grad_norm": 0.5219986591713203,
"learning_rate": 1.702024614595248e-05,
"loss": 0.9359,
"step": 240
},
{
"epoch": 0.33,
"grad_norm": 0.5247969957734948,
"learning_rate": 1.6848254903319866e-05,
"loss": 0.9378,
"step": 245
},
{
"epoch": 0.34,
"grad_norm": 0.5260544851199538,
"learning_rate": 1.6672361121623238e-05,
"loss": 0.919,
"step": 250
},
{
"epoch": 0.35,
"grad_norm": 0.5249428172457546,
"learning_rate": 1.6492665035501048e-05,
"loss": 0.9369,
"step": 255
},
{
"epoch": 0.36,
"grad_norm": 0.5162356214174872,
"learning_rate": 1.6309269046368777e-05,
"loss": 0.9515,
"step": 260
},
{
"epoch": 0.36,
"grad_norm": 0.5010926857825899,
"learning_rate": 1.612227766406461e-05,
"loss": 0.9609,
"step": 265
},
{
"epoch": 0.37,
"grad_norm": 0.5382068418300701,
"learning_rate": 1.5931797447293553e-05,
"loss": 0.9447,
"step": 270
},
{
"epoch": 0.38,
"grad_norm": 0.5295330507632734,
"learning_rate": 1.5737936942904025e-05,
"loss": 0.9561,
"step": 275
},
{
"epoch": 0.38,
"grad_norm": 0.5785403455994167,
"learning_rate": 1.554080662403144e-05,
"loss": 0.928,
"step": 280
},
{
"epoch": 0.39,
"grad_norm": 0.49666012212229643,
"learning_rate": 1.5340518827144145e-05,
"loss": 0.9481,
"step": 285
},
{
"epoch": 0.4,
"grad_norm": 0.5289929313831263,
"learning_rate": 1.5137187688027437e-05,
"loss": 0.9284,
"step": 290
},
{
"epoch": 0.4,
"grad_norm": 0.4842301638025414,
"learning_rate": 1.4930929076742317e-05,
"loss": 0.9154,
"step": 295
},
{
"epoch": 0.41,
"grad_norm": 0.5192170165075863,
"learning_rate": 1.4721860531595868e-05,
"loss": 0.922,
"step": 300
},
{
"epoch": 0.42,
"grad_norm": 0.503974988503286,
"learning_rate": 1.451010119216102e-05,
"loss": 0.9149,
"step": 305
},
{
"epoch": 0.42,
"grad_norm": 0.507463468927673,
"learning_rate": 1.4295771731383799e-05,
"loss": 0.9264,
"step": 310
},
{
"epoch": 0.43,
"grad_norm": 0.4963800283400335,
"learning_rate": 1.4078994286816768e-05,
"loss": 0.9354,
"step": 315
},
{
"epoch": 0.44,
"grad_norm": 0.5178048187839067,
"learning_rate": 1.3859892391017867e-05,
"loss": 0.9248,
"step": 320
},
{
"epoch": 0.44,
"grad_norm": 0.4961104164383763,
"learning_rate": 1.3638590901154276e-05,
"loss": 0.947,
"step": 325
},
{
"epoch": 0.45,
"grad_norm": 0.5205766773557013,
"learning_rate": 1.341521592785145e-05,
"loss": 0.9416,
"step": 330
},
{
"epoch": 0.46,
"grad_norm": 0.5453988235731033,
"learning_rate": 1.3189894763327851e-05,
"loss": 0.9288,
"step": 335
},
{
"epoch": 0.46,
"grad_norm": 0.5021959145630082,
"learning_rate": 1.2962755808856341e-05,
"loss": 0.9545,
"step": 340
},
{
"epoch": 0.47,
"grad_norm": 0.5269775945473103,
"learning_rate": 1.2733928501593587e-05,
"loss": 0.9568,
"step": 345
},
{
"epoch": 0.48,
"grad_norm": 0.5460646239634884,
"learning_rate": 1.2503543240819127e-05,
"loss": 0.9281,
"step": 350
},
{
"epoch": 0.48,
"grad_norm": 0.5290020549006504,
"learning_rate": 1.227173131362619e-05,
"loss": 0.9178,
"step": 355
},
{
"epoch": 0.49,
"grad_norm": 0.517202069917391,
"learning_rate": 1.2038624820106572e-05,
"loss": 0.8777,
"step": 360
},
{
"epoch": 0.5,
"grad_norm": 0.5016455236354784,
"learning_rate": 1.1804356598072223e-05,
"loss": 0.9372,
"step": 365
},
{
"epoch": 0.51,
"grad_norm": 0.49187913515804305,
"learning_rate": 1.1569060147356441e-05,
"loss": 0.8909,
"step": 370
},
{
"epoch": 0.51,
"grad_norm": 0.5137148402464546,
"learning_rate": 1.133286955373779e-05,
"loss": 0.9219,
"step": 375
},
{
"epoch": 0.52,
"grad_norm": 0.48367513170223,
"learning_rate": 1.1095919412530136e-05,
"loss": 0.9152,
"step": 380
},
{
"epoch": 0.53,
"grad_norm": 0.5431723767746645,
"learning_rate": 1.0858344751882304e-05,
"loss": 0.9299,
"step": 385
},
{
"epoch": 0.53,
"grad_norm": 0.499436195510236,
"learning_rate": 1.0620280955831088e-05,
"loss": 0.9133,
"step": 390
},
{
"epoch": 0.54,
"grad_norm": 0.4798245609936736,
"learning_rate": 1.038186368715145e-05,
"loss": 0.9202,
"step": 395
},
{
"epoch": 0.55,
"grad_norm": 0.5224088939393612,
"learning_rate": 1.0143228810047877e-05,
"loss": 0.9039,
"step": 400
},
{
"epoch": 0.55,
"grad_norm": 0.5281235095829454,
"learning_rate": 9.904512312730948e-06,
"loss": 0.9149,
"step": 405
},
{
"epoch": 0.56,
"grad_norm": 0.527644785869163,
"learning_rate": 9.665850229923258e-06,
"loss": 0.9152,
"step": 410
},
{
"epoch": 0.57,
"grad_norm": 0.5060754191665021,
"learning_rate": 9.4273785653388e-06,
"loss": 0.9283,
"step": 415
},
{
"epoch": 0.57,
"grad_norm": 0.47138126227952987,
"learning_rate": 9.189233214180057e-06,
"loss": 0.8978,
"step": 420
},
{
"epoch": 0.58,
"grad_norm": 0.4952269194383896,
"learning_rate": 8.951549885696889e-06,
"loss": 0.9172,
"step": 425
},
{
"epoch": 0.59,
"grad_norm": 0.524189618003053,
"learning_rate": 8.714464025851428e-06,
"loss": 0.923,
"step": 430
},
{
"epoch": 0.59,
"grad_norm": 0.48690727308343434,
"learning_rate": 8.478110740132971e-06,
"loss": 0.926,
"step": 435
},
{
"epoch": 0.6,
"grad_norm": 0.5143235296126093,
"learning_rate": 8.242624716566928e-06,
"loss": 0.9351,
"step": 440
},
{
"epoch": 0.61,
"grad_norm": 0.5131865229287261,
"learning_rate": 8.008140148961642e-06,
"loss": 0.9169,
"step": 445
},
{
"epoch": 0.61,
"grad_norm": 0.49549144988639615,
"learning_rate": 7.774790660436857e-06,
"loss": 0.9089,
"step": 450
},
{
"epoch": 0.62,
"grad_norm": 0.48031633340251334,
"learning_rate": 7.542709227277396e-06,
"loss": 0.923,
"step": 455
},
{
"epoch": 0.63,
"grad_norm": 0.5244467963752784,
"learning_rate": 7.312028103155426e-06,
"loss": 0.8991,
"step": 460
},
{
"epoch": 0.64,
"grad_norm": 0.5448301982574317,
"learning_rate": 7.0828787437645455e-06,
"loss": 0.8748,
"step": 465
},
{
"epoch": 0.64,
"grad_norm": 0.5320333401965247,
"learning_rate": 6.8553917319085676e-06,
"loss": 0.9169,
"step": 470
},
{
"epoch": 0.65,
"grad_norm": 0.4778407461352207,
"learning_rate": 6.629696703087755e-06,
"loss": 0.9127,
"step": 475
},
{
"epoch": 0.66,
"grad_norm": 0.5225492873477832,
"learning_rate": 6.405922271624874e-06,
"loss": 0.9112,
"step": 480
},
{
"epoch": 0.66,
"grad_norm": 0.5123626688586312,
"learning_rate": 6.184195957373176e-06,
"loss": 0.9097,
"step": 485
},
{
"epoch": 0.67,
"grad_norm": 0.5025191272317765,
"learning_rate": 5.964644113048079e-06,
"loss": 0.9147,
"step": 490
},
{
"epoch": 0.68,
"grad_norm": 0.48965471328713384,
"learning_rate": 5.74739185222394e-06,
"loss": 0.9222,
"step": 495
},
{
"epoch": 0.68,
"grad_norm": 0.5288978316966069,
"learning_rate": 5.532562978036964e-06,
"loss": 0.8984,
"step": 500
},
{
"epoch": 0.69,
"grad_norm": 0.5102011962342545,
"learning_rate": 5.320279912634907e-06,
"loss": 0.8971,
"step": 505
},
{
"epoch": 0.7,
"grad_norm": 0.5046037978403557,
"learning_rate": 5.110663627413695e-06,
"loss": 0.8965,
"step": 510
},
{
"epoch": 0.7,
"grad_norm": 0.5527850768232141,
"learning_rate": 4.903833574080825e-06,
"loss": 0.9333,
"step": 515
},
{
"epoch": 0.71,
"grad_norm": 0.5015924720632916,
"learning_rate": 4.6999076165847214e-06,
"loss": 0.9128,
"step": 520
},
{
"epoch": 0.72,
"grad_norm": 0.5259030791021958,
"learning_rate": 4.499001963948929e-06,
"loss": 0.8977,
"step": 525
},
{
"epoch": 0.72,
"grad_norm": 0.4922610443183964,
"learning_rate": 4.301231104049359e-06,
"loss": 0.8812,
"step": 530
},
{
"epoch": 0.73,
"grad_norm": 0.513772562818194,
"learning_rate": 4.106707738372357e-06,
"loss": 0.9142,
"step": 535
},
{
"epoch": 0.74,
"grad_norm": 0.48791049706062967,
"learning_rate": 3.915542717790759e-06,
"loss": 0.9158,
"step": 540
},
{
"epoch": 0.74,
"grad_norm": 0.5169968541006628,
"learning_rate": 3.727844979394526e-06,
"loss": 0.9277,
"step": 545
},
{
"epoch": 0.75,
"grad_norm": 0.5077943410753647,
"learning_rate": 3.543721484411976e-06,
"loss": 0.8959,
"step": 550
},
{
"epoch": 0.76,
"grad_norm": 0.4817889325039375,
"learning_rate": 3.3632771572569878e-06,
"loss": 0.9101,
"step": 555
},
{
"epoch": 0.77,
"grad_norm": 0.5385996666793137,
"learning_rate": 3.1866148257368666e-06,
"loss": 0.8938,
"step": 560
},
{
"epoch": 0.77,
"grad_norm": 0.5225728816923544,
"learning_rate": 3.0138351624550165e-06,
"loss": 0.8894,
"step": 565
},
{
"epoch": 0.78,
"grad_norm": 0.5124915386197431,
"learning_rate": 2.845036627441755e-06,
"loss": 0.9064,
"step": 570
},
{
"epoch": 0.79,
"grad_norm": 0.5061857842019366,
"learning_rate": 2.6803154120460007e-06,
"loss": 0.8816,
"step": 575
},
{
"epoch": 0.79,
"grad_norm": 0.5114177573176913,
"learning_rate": 2.5197653841197546e-06,
"loss": 0.9156,
"step": 580
},
{
"epoch": 0.8,
"grad_norm": 0.49787558407973875,
"learning_rate": 2.3634780345266805e-06,
"loss": 0.9244,
"step": 585
},
{
"epoch": 0.81,
"grad_norm": 0.5208494862823159,
"learning_rate": 2.211542425005223e-06,
"loss": 0.8811,
"step": 590
},
{
"epoch": 0.81,
"grad_norm": 0.5079793748028331,
"learning_rate": 2.064045137415982e-06,
"loss": 0.9066,
"step": 595
},
{
"epoch": 0.82,
"grad_norm": 0.49224088294773566,
"learning_rate": 1.9210702244022616e-06,
"loss": 0.9038,
"step": 600
},
{
"epoch": 0.83,
"grad_norm": 0.5159941101908895,
"learning_rate": 1.7826991614919264e-06,
"loss": 0.9188,
"step": 605
},
{
"epoch": 0.83,
"grad_norm": 0.5064241877692456,
"learning_rate": 1.6490108006678495e-06,
"loss": 0.8913,
"step": 610
},
{
"epoch": 0.84,
"grad_norm": 0.504152765761692,
"learning_rate": 1.5200813254334013e-06,
"loss": 0.9058,
"step": 615
},
{
"epoch": 0.85,
"grad_norm": 0.4798941893934502,
"learning_rate": 1.3959842073986085e-06,
"loss": 0.9029,
"step": 620
},
{
"epoch": 0.85,
"grad_norm": 0.5155303614528817,
"learning_rate": 1.2767901644116943e-06,
"loss": 0.9095,
"step": 625
},
{
"epoch": 0.86,
"grad_norm": 0.4959233986544881,
"learning_rate": 1.1625671202598875e-06,
"loss": 0.8903,
"step": 630
},
{
"epoch": 0.87,
"grad_norm": 0.5554850290932043,
"learning_rate": 1.0533801659624531e-06,
"loss": 0.9355,
"step": 635
},
{
"epoch": 0.87,
"grad_norm": 0.5083400957738834,
"learning_rate": 9.492915226779809e-07,
"loss": 0.9263,
"step": 640
},
{
"epoch": 0.88,
"grad_norm": 0.45712422940481173,
"learning_rate": 8.503605062471187e-07,
"loss": 0.9049,
"step": 645
},
{
"epoch": 0.89,
"grad_norm": 0.5031609635595137,
"learning_rate": 7.566434933909006e-07,
"loss": 0.9123,
"step": 650
},
{
"epoch": 0.89,
"grad_norm": 0.4981707548553113,
"learning_rate": 6.681938895839746e-07,
"loss": 0.8784,
"step": 655
},
{
"epoch": 0.9,
"grad_norm": 0.4720191005790808,
"learning_rate": 5.850620986210198e-07,
"loss": 0.8619,
"step": 660
},
{
"epoch": 0.91,
"grad_norm": 0.5135344387921242,
"learning_rate": 5.072954938936925e-07,
"loss": 0.9377,
"step": 665
},
{
"epoch": 0.92,
"grad_norm": 0.5221400934605558,
"learning_rate": 4.3493839139447716e-07,
"loss": 0.9072,
"step": 670
},
{
"epoch": 0.92,
"grad_norm": 0.46747238598666885,
"learning_rate": 3.6803202446282217e-07,
"loss": 0.8966,
"step": 675
},
{
"epoch": 0.93,
"grad_norm": 0.5058287813526128,
"learning_rate": 3.0661452028795335e-07,
"loss": 0.922,
"step": 680
},
{
"epoch": 0.94,
"grad_norm": 0.4891511781301968,
"learning_rate": 2.507208781817638e-07,
"loss": 0.9009,
"step": 685
},
{
"epoch": 0.94,
"grad_norm": 0.4800266914233775,
"learning_rate": 2.0038294963413251e-07,
"loss": 0.8974,
"step": 690
},
{
"epoch": 0.95,
"grad_norm": 0.4962683953232167,
"learning_rate": 1.556294201620734e-07,
"loss": 0.9288,
"step": 695
},
{
"epoch": 0.96,
"grad_norm": 0.529691888573167,
"learning_rate": 1.1648579296304252e-07,
"loss": 0.8941,
"step": 700
},
{
"epoch": 0.96,
"grad_norm": 0.4622837679446212,
"learning_rate": 8.297437438170797e-08,
"loss": 0.9002,
"step": 705
},
{
"epoch": 0.97,
"grad_norm": 0.5095413176345687,
"learning_rate": 5.51142611984834e-08,
"loss": 0.9122,
"step": 710
},
{
"epoch": 0.98,
"grad_norm": 0.5169397392729471,
"learning_rate": 3.2921329747056527e-08,
"loss": 0.8869,
"step": 715
},
{
"epoch": 0.98,
"grad_norm": 0.4761774021864735,
"learning_rate": 1.6408226867118404e-08,
"loss": 0.8698,
"step": 720
},
{
"epoch": 0.99,
"grad_norm": 0.49969690572300623,
"learning_rate": 5.584362697453882e-09,
"loss": 0.9178,
"step": 725
},
{
"epoch": 1.0,
"grad_norm": 0.5099959778794521,
"learning_rate": 4.5590531348227443e-10,
"loss": 0.9086,
"step": 730
},
{
"epoch": 1.0,
"eval_loss": 0.8863584399223328,
"eval_runtime": 9.4646,
"eval_samples_per_second": 52.828,
"eval_steps_per_second": 1.691,
"step": 732
},
{
"epoch": 1.0,
"step": 732,
"total_flos": 106034689474560.0,
"train_loss": 0.9459957421803084,
"train_runtime": 6147.7675,
"train_samples_per_second": 15.232,
"train_steps_per_second": 0.119
}
],
"logging_steps": 5,
"max_steps": 732,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 106034689474560.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}