Mistral-7B-Ours-SFT / trainer_state.json
luzimu's picture
first commit
055a260
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1089,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 482.9424331713062,
"learning_rate": 9.174311926605506e-08,
"loss": 3.4943,
"step": 1
},
{
"epoch": 0.01,
"grad_norm": 464.5597009797656,
"learning_rate": 4.587155963302753e-07,
"loss": 3.4503,
"step": 5
},
{
"epoch": 0.03,
"grad_norm": 100.13740932500578,
"learning_rate": 9.174311926605506e-07,
"loss": 2.2737,
"step": 10
},
{
"epoch": 0.04,
"grad_norm": 39.17620710874241,
"learning_rate": 1.3761467889908258e-06,
"loss": 1.0059,
"step": 15
},
{
"epoch": 0.06,
"grad_norm": 18.07390105117993,
"learning_rate": 1.8348623853211011e-06,
"loss": 0.8065,
"step": 20
},
{
"epoch": 0.07,
"grad_norm": 9.975454018413377,
"learning_rate": 2.2935779816513764e-06,
"loss": 0.6968,
"step": 25
},
{
"epoch": 0.08,
"grad_norm": 7.488660433798423,
"learning_rate": 2.7522935779816517e-06,
"loss": 0.6259,
"step": 30
},
{
"epoch": 0.1,
"grad_norm": 5.334133955551258,
"learning_rate": 3.211009174311927e-06,
"loss": 0.588,
"step": 35
},
{
"epoch": 0.11,
"grad_norm": 3.6202860181289047,
"learning_rate": 3.6697247706422022e-06,
"loss": 0.5455,
"step": 40
},
{
"epoch": 0.12,
"grad_norm": 3.1626175750631953,
"learning_rate": 4.128440366972478e-06,
"loss": 0.5367,
"step": 45
},
{
"epoch": 0.14,
"grad_norm": 2.696473869878566,
"learning_rate": 4.587155963302753e-06,
"loss": 0.5142,
"step": 50
},
{
"epoch": 0.15,
"grad_norm": 2.618221659772195,
"learning_rate": 5.045871559633028e-06,
"loss": 0.508,
"step": 55
},
{
"epoch": 0.17,
"grad_norm": 2.4524484169619005,
"learning_rate": 5.504587155963303e-06,
"loss": 0.4905,
"step": 60
},
{
"epoch": 0.18,
"grad_norm": 2.2548682855618027,
"learning_rate": 5.963302752293578e-06,
"loss": 0.4843,
"step": 65
},
{
"epoch": 0.19,
"grad_norm": 2.3040454952673666,
"learning_rate": 6.422018348623854e-06,
"loss": 0.472,
"step": 70
},
{
"epoch": 0.21,
"grad_norm": 2.2392018621959497,
"learning_rate": 6.880733944954129e-06,
"loss": 0.4704,
"step": 75
},
{
"epoch": 0.22,
"grad_norm": 2.178936820032253,
"learning_rate": 7.3394495412844045e-06,
"loss": 0.4701,
"step": 80
},
{
"epoch": 0.23,
"grad_norm": 4.848234290009287,
"learning_rate": 7.79816513761468e-06,
"loss": 0.4521,
"step": 85
},
{
"epoch": 0.25,
"grad_norm": 2.16061767352121,
"learning_rate": 8.256880733944956e-06,
"loss": 0.4399,
"step": 90
},
{
"epoch": 0.26,
"grad_norm": 2.252938346732769,
"learning_rate": 8.71559633027523e-06,
"loss": 0.4419,
"step": 95
},
{
"epoch": 0.28,
"grad_norm": 2.096290173421098,
"learning_rate": 9.174311926605506e-06,
"loss": 0.4448,
"step": 100
},
{
"epoch": 0.29,
"grad_norm": 2.174963846986537,
"learning_rate": 9.633027522935781e-06,
"loss": 0.4426,
"step": 105
},
{
"epoch": 0.3,
"grad_norm": 2.1492247832364035,
"learning_rate": 9.999974308631955e-06,
"loss": 0.4351,
"step": 110
},
{
"epoch": 0.32,
"grad_norm": 2.408895333478705,
"learning_rate": 9.99907513847195e-06,
"loss": 0.4279,
"step": 115
},
{
"epoch": 0.33,
"grad_norm": 2.2454817325259446,
"learning_rate": 9.996891663915955e-06,
"loss": 0.4181,
"step": 120
},
{
"epoch": 0.34,
"grad_norm": 2.3729752688467634,
"learning_rate": 9.993424445916923e-06,
"loss": 0.4239,
"step": 125
},
{
"epoch": 0.36,
"grad_norm": 2.5044100058846284,
"learning_rate": 9.98867437523228e-06,
"loss": 0.412,
"step": 130
},
{
"epoch": 0.37,
"grad_norm": 2.3958686207860445,
"learning_rate": 9.982642672195093e-06,
"loss": 0.4169,
"step": 135
},
{
"epoch": 0.39,
"grad_norm": 2.4954881466652776,
"learning_rate": 9.975330886400531e-06,
"loss": 0.4156,
"step": 140
},
{
"epoch": 0.4,
"grad_norm": 2.242375638494122,
"learning_rate": 9.966740896307791e-06,
"loss": 0.4197,
"step": 145
},
{
"epoch": 0.41,
"grad_norm": 2.1178977307464684,
"learning_rate": 9.956874908757482e-06,
"loss": 0.4049,
"step": 150
},
{
"epoch": 0.43,
"grad_norm": 2.328213390988304,
"learning_rate": 9.945735458404681e-06,
"loss": 0.4079,
"step": 155
},
{
"epoch": 0.44,
"grad_norm": 2.3991070068709703,
"learning_rate": 9.93332540706776e-06,
"loss": 0.3932,
"step": 160
},
{
"epoch": 0.45,
"grad_norm": 2.1330210872114685,
"learning_rate": 9.91964794299315e-06,
"loss": 0.4151,
"step": 165
},
{
"epoch": 0.47,
"grad_norm": 2.339763249719483,
"learning_rate": 9.904706580036265e-06,
"loss": 0.3991,
"step": 170
},
{
"epoch": 0.48,
"grad_norm": 2.253034148149308,
"learning_rate": 9.888505156758758e-06,
"loss": 0.3894,
"step": 175
},
{
"epoch": 0.5,
"grad_norm": 2.592051451054994,
"learning_rate": 9.871047835442365e-06,
"loss": 0.3926,
"step": 180
},
{
"epoch": 0.51,
"grad_norm": 2.3384829460370664,
"learning_rate": 9.852339101019574e-06,
"loss": 0.3778,
"step": 185
},
{
"epoch": 0.52,
"grad_norm": 2.3237193171596315,
"learning_rate": 9.832383759921415e-06,
"loss": 0.3876,
"step": 190
},
{
"epoch": 0.54,
"grad_norm": 2.1577325540917682,
"learning_rate": 9.811186938842645e-06,
"loss": 0.3931,
"step": 195
},
{
"epoch": 0.55,
"grad_norm": 2.4399307510769512,
"learning_rate": 9.788754083424654e-06,
"loss": 0.4016,
"step": 200
},
{
"epoch": 0.56,
"grad_norm": 2.2221384901823797,
"learning_rate": 9.765090956856437e-06,
"loss": 0.3729,
"step": 205
},
{
"epoch": 0.58,
"grad_norm": 2.308949316156903,
"learning_rate": 9.740203638393984e-06,
"loss": 0.3726,
"step": 210
},
{
"epoch": 0.59,
"grad_norm": 2.2045141554131935,
"learning_rate": 9.714098521798466e-06,
"loss": 0.3877,
"step": 215
},
{
"epoch": 0.61,
"grad_norm": 2.2837872858373736,
"learning_rate": 9.686782313693622e-06,
"loss": 0.3674,
"step": 220
},
{
"epoch": 0.62,
"grad_norm": 2.43720762592657,
"learning_rate": 9.658262031842772e-06,
"loss": 0.3739,
"step": 225
},
{
"epoch": 0.63,
"grad_norm": 2.3353813140909994,
"learning_rate": 9.6285450033459e-06,
"loss": 0.3674,
"step": 230
},
{
"epoch": 0.65,
"grad_norm": 2.261420264375102,
"learning_rate": 9.597638862757255e-06,
"loss": 0.3607,
"step": 235
},
{
"epoch": 0.66,
"grad_norm": 2.2416634782061906,
"learning_rate": 9.565551550123967e-06,
"loss": 0.3742,
"step": 240
},
{
"epoch": 0.67,
"grad_norm": 2.2997316285411475,
"learning_rate": 9.532291308946191e-06,
"loss": 0.3665,
"step": 245
},
{
"epoch": 0.69,
"grad_norm": 2.4184869571843506,
"learning_rate": 9.497866684059278e-06,
"loss": 0.3561,
"step": 250
},
{
"epoch": 0.7,
"grad_norm": 2.708823081570206,
"learning_rate": 9.462286519438531e-06,
"loss": 0.3538,
"step": 255
},
{
"epoch": 0.72,
"grad_norm": 2.251486751483044,
"learning_rate": 9.425559955927118e-06,
"loss": 0.361,
"step": 260
},
{
"epoch": 0.73,
"grad_norm": 2.3764896592186475,
"learning_rate": 9.387696428887715e-06,
"loss": 0.3432,
"step": 265
},
{
"epoch": 0.74,
"grad_norm": 2.4138174075052947,
"learning_rate": 9.348705665778479e-06,
"loss": 0.3485,
"step": 270
},
{
"epoch": 0.76,
"grad_norm": 5.080374687572826,
"learning_rate": 9.308597683653976e-06,
"loss": 0.3609,
"step": 275
},
{
"epoch": 0.77,
"grad_norm": 2.741760384833186,
"learning_rate": 9.26738278659173e-06,
"loss": 0.3494,
"step": 280
},
{
"epoch": 0.79,
"grad_norm": 2.8966878345447284,
"learning_rate": 9.225071563045007e-06,
"loss": 0.3772,
"step": 285
},
{
"epoch": 0.8,
"grad_norm": 2.57787692140152,
"learning_rate": 9.181674883122554e-06,
"loss": 0.3492,
"step": 290
},
{
"epoch": 0.81,
"grad_norm": 2.3491613484458758,
"learning_rate": 9.137203895795983e-06,
"loss": 0.3483,
"step": 295
},
{
"epoch": 0.83,
"grad_norm": 2.4035469933770925,
"learning_rate": 9.0916700260355e-06,
"loss": 0.3385,
"step": 300
},
{
"epoch": 0.84,
"grad_norm": 2.5217983017859287,
"learning_rate": 9.045084971874738e-06,
"loss": 0.3363,
"step": 305
},
{
"epoch": 0.85,
"grad_norm": 2.408467182342914,
"learning_rate": 8.997460701405431e-06,
"loss": 0.3434,
"step": 310
},
{
"epoch": 0.87,
"grad_norm": 2.4535113461612044,
"learning_rate": 8.948809449702712e-06,
"loss": 0.339,
"step": 315
},
{
"epoch": 0.88,
"grad_norm": 2.6840892962294594,
"learning_rate": 8.899143715681822e-06,
"loss": 0.3382,
"step": 320
},
{
"epoch": 0.9,
"grad_norm": 2.514776491838317,
"learning_rate": 8.84847625888703e-06,
"loss": 0.3415,
"step": 325
},
{
"epoch": 0.91,
"grad_norm": 15.50105108482074,
"learning_rate": 8.7968200962136e-06,
"loss": 0.3571,
"step": 330
},
{
"epoch": 0.92,
"grad_norm": 2.585883974467356,
"learning_rate": 8.74418849856364e-06,
"loss": 0.3446,
"step": 335
},
{
"epoch": 0.94,
"grad_norm": 2.7223008451894115,
"learning_rate": 8.690594987436705e-06,
"loss": 0.3422,
"step": 340
},
{
"epoch": 0.95,
"grad_norm": 2.5622958284447828,
"learning_rate": 8.636053331455986e-06,
"loss": 0.3261,
"step": 345
},
{
"epoch": 0.96,
"grad_norm": 8.344613597434167,
"learning_rate": 8.580577542831072e-06,
"loss": 0.3294,
"step": 350
},
{
"epoch": 0.98,
"grad_norm": 27.593051888264625,
"learning_rate": 8.52418187375806e-06,
"loss": 0.3289,
"step": 355
},
{
"epoch": 0.99,
"grad_norm": 3.1854259696014,
"learning_rate": 8.466880812758064e-06,
"loss": 0.3262,
"step": 360
},
{
"epoch": 1.0,
"eval_loss": 0.318780779838562,
"eval_runtime": 0.7082,
"eval_samples_per_second": 29.652,
"eval_steps_per_second": 1.412,
"step": 363
},
{
"epoch": 1.01,
"grad_norm": 2.5412468443821186,
"learning_rate": 8.408689080954997e-06,
"loss": 0.3094,
"step": 365
},
{
"epoch": 1.02,
"grad_norm": 2.5960718641491933,
"learning_rate": 8.349621628293578e-06,
"loss": 0.2823,
"step": 370
},
{
"epoch": 1.03,
"grad_norm": 2.730052799805784,
"learning_rate": 8.289693629698564e-06,
"loss": 0.2918,
"step": 375
},
{
"epoch": 1.05,
"grad_norm": 2.673767069351067,
"learning_rate": 8.228920481176202e-06,
"loss": 0.3068,
"step": 380
},
{
"epoch": 1.06,
"grad_norm": 2.586082350335276,
"learning_rate": 8.16731779585885e-06,
"loss": 0.2872,
"step": 385
},
{
"epoch": 1.07,
"grad_norm": 2.746929683803303,
"learning_rate": 8.104901399993837e-06,
"loss": 0.2738,
"step": 390
},
{
"epoch": 1.09,
"grad_norm": 2.7651080564632036,
"learning_rate": 8.041687328877566e-06,
"loss": 0.2826,
"step": 395
},
{
"epoch": 1.1,
"grad_norm": 2.471378457694663,
"learning_rate": 7.977691822735914e-06,
"loss": 0.2802,
"step": 400
},
{
"epoch": 1.12,
"grad_norm": 2.525112042533388,
"learning_rate": 7.912931322551981e-06,
"loss": 0.2912,
"step": 405
},
{
"epoch": 1.13,
"grad_norm": 2.326287031654939,
"learning_rate": 7.84742246584226e-06,
"loss": 0.2798,
"step": 410
},
{
"epoch": 1.14,
"grad_norm": 2.5727601459857183,
"learning_rate": 7.781182082382325e-06,
"loss": 0.2667,
"step": 415
},
{
"epoch": 1.16,
"grad_norm": 2.4087608944116408,
"learning_rate": 7.714227189883112e-06,
"loss": 0.2864,
"step": 420
},
{
"epoch": 1.17,
"grad_norm": 2.317260745652955,
"learning_rate": 7.646574989618938e-06,
"loss": 0.2722,
"step": 425
},
{
"epoch": 1.18,
"grad_norm": 3.054972113137932,
"learning_rate": 7.578242862008336e-06,
"loss": 0.2654,
"step": 430
},
{
"epoch": 1.2,
"grad_norm": 2.3952394787945535,
"learning_rate": 7.509248362148889e-06,
"loss": 0.2624,
"step": 435
},
{
"epoch": 1.21,
"grad_norm": 2.3580949206062285,
"learning_rate": 7.439609215307173e-06,
"loss": 0.2724,
"step": 440
},
{
"epoch": 1.23,
"grad_norm": 2.359989598212989,
"learning_rate": 7.369343312364994e-06,
"loss": 0.2803,
"step": 445
},
{
"epoch": 1.24,
"grad_norm": 2.3241522880939245,
"learning_rate": 7.2984687052230585e-06,
"loss": 0.2722,
"step": 450
},
{
"epoch": 1.25,
"grad_norm": 2.7180879142331573,
"learning_rate": 7.227003602163296e-06,
"loss": 0.2735,
"step": 455
},
{
"epoch": 1.27,
"grad_norm": 2.3639669980317244,
"learning_rate": 7.154966363171003e-06,
"loss": 0.2707,
"step": 460
},
{
"epoch": 1.28,
"grad_norm": 2.476880995232887,
"learning_rate": 7.082375495217996e-06,
"loss": 0.2642,
"step": 465
},
{
"epoch": 1.29,
"grad_norm": 2.394174321663528,
"learning_rate": 7.009249647508028e-06,
"loss": 0.2633,
"step": 470
},
{
"epoch": 1.31,
"grad_norm": 2.3149517217992206,
"learning_rate": 6.935607606685642e-06,
"loss": 0.2646,
"step": 475
},
{
"epoch": 1.32,
"grad_norm": 2.362938067207863,
"learning_rate": 6.8614682920097265e-06,
"loss": 0.2722,
"step": 480
},
{
"epoch": 1.34,
"grad_norm": 2.221862832028165,
"learning_rate": 6.786850750493006e-06,
"loss": 0.2731,
"step": 485
},
{
"epoch": 1.35,
"grad_norm": 2.480585954865694,
"learning_rate": 6.71177415200869e-06,
"loss": 0.2647,
"step": 490
},
{
"epoch": 1.36,
"grad_norm": 2.601383750536699,
"learning_rate": 6.636257784365585e-06,
"loss": 0.2651,
"step": 495
},
{
"epoch": 1.38,
"grad_norm": 2.365789134129327,
"learning_rate": 6.5603210483528864e-06,
"loss": 0.2558,
"step": 500
},
{
"epoch": 1.39,
"grad_norm": 2.3600890606849756,
"learning_rate": 6.483983452755953e-06,
"loss": 0.2599,
"step": 505
},
{
"epoch": 1.4,
"grad_norm": 2.2597199615645995,
"learning_rate": 6.407264609344344e-06,
"loss": 0.257,
"step": 510
},
{
"epoch": 1.42,
"grad_norm": 2.6801929299706195,
"learning_rate": 6.330184227833376e-06,
"loss": 0.2578,
"step": 515
},
{
"epoch": 1.43,
"grad_norm": 2.5972482010235245,
"learning_rate": 6.252762110820548e-06,
"loss": 0.2609,
"step": 520
},
{
"epoch": 1.45,
"grad_norm": 2.18407067589225,
"learning_rate": 6.175018148698077e-06,
"loss": 0.2557,
"step": 525
},
{
"epoch": 1.46,
"grad_norm": 2.3098952297720805,
"learning_rate": 6.096972314542889e-06,
"loss": 0.2529,
"step": 530
},
{
"epoch": 1.47,
"grad_norm": 2.286804305301903,
"learning_rate": 6.018644658985378e-06,
"loss": 0.2506,
"step": 535
},
{
"epoch": 1.49,
"grad_norm": 2.3719247145054565,
"learning_rate": 5.940055305058219e-06,
"loss": 0.2887,
"step": 540
},
{
"epoch": 1.5,
"grad_norm": 2.192651511655319,
"learning_rate": 5.861224443026595e-06,
"loss": 0.2512,
"step": 545
},
{
"epoch": 1.52,
"grad_norm": 2.319247712566637,
"learning_rate": 5.782172325201155e-06,
"loss": 0.2559,
"step": 550
},
{
"epoch": 1.53,
"grad_norm": 2.2722313275015438,
"learning_rate": 5.702919260735015e-06,
"loss": 0.258,
"step": 555
},
{
"epoch": 1.54,
"grad_norm": 2.212042996156513,
"learning_rate": 5.623485610406174e-06,
"loss": 0.2551,
"step": 560
},
{
"epoch": 1.56,
"grad_norm": 2.628919297520483,
"learning_rate": 5.543891781386655e-06,
"loss": 0.2594,
"step": 565
},
{
"epoch": 1.57,
"grad_norm": 2.2295725919671834,
"learning_rate": 5.464158221999731e-06,
"loss": 0.252,
"step": 570
},
{
"epoch": 1.58,
"grad_norm": 2.346371911738126,
"learning_rate": 5.384305416466584e-06,
"loss": 0.2492,
"step": 575
},
{
"epoch": 1.6,
"grad_norm": 2.128952621835011,
"learning_rate": 5.304353879643727e-06,
"loss": 0.2498,
"step": 580
},
{
"epoch": 1.61,
"grad_norm": 2.309582792426568,
"learning_rate": 5.224324151752575e-06,
"loss": 0.2407,
"step": 585
},
{
"epoch": 1.63,
"grad_norm": 2.186774543447298,
"learning_rate": 5.144236793102485e-06,
"loss": 0.2534,
"step": 590
},
{
"epoch": 1.64,
"grad_norm": 2.285906133102669,
"learning_rate": 5.064112378808636e-06,
"loss": 0.2504,
"step": 595
},
{
"epoch": 1.65,
"grad_norm": 2.282750925627709,
"learning_rate": 4.9839714935061215e-06,
"loss": 0.2564,
"step": 600
},
{
"epoch": 1.67,
"grad_norm": 2.434230750765326,
"learning_rate": 4.903834726061565e-06,
"loss": 0.2487,
"step": 605
},
{
"epoch": 1.68,
"grad_norm": 2.243187765312758,
"learning_rate": 4.823722664283684e-06,
"loss": 0.2445,
"step": 610
},
{
"epoch": 1.69,
"grad_norm": 2.238918307385941,
"learning_rate": 4.743655889634105e-06,
"loss": 0.2434,
"step": 615
},
{
"epoch": 1.71,
"grad_norm": 2.2787450399048716,
"learning_rate": 4.663654971939802e-06,
"loss": 0.2628,
"step": 620
},
{
"epoch": 1.72,
"grad_norm": 18.429833465031482,
"learning_rate": 4.583740464108554e-06,
"loss": 0.2463,
"step": 625
},
{
"epoch": 1.74,
"grad_norm": 2.345262438447504,
"learning_rate": 4.503932896848713e-06,
"loss": 0.2586,
"step": 630
},
{
"epoch": 1.75,
"grad_norm": 3.7488286873573946,
"learning_rate": 4.424252773394704e-06,
"loss": 0.2604,
"step": 635
},
{
"epoch": 1.76,
"grad_norm": 2.238980042562648,
"learning_rate": 4.344720564239567e-06,
"loss": 0.2363,
"step": 640
},
{
"epoch": 1.78,
"grad_norm": 5.301475765556174,
"learning_rate": 4.265356701875911e-06,
"loss": 0.2814,
"step": 645
},
{
"epoch": 1.79,
"grad_norm": 3.186554344112934,
"learning_rate": 4.186181575546651e-06,
"loss": 0.2941,
"step": 650
},
{
"epoch": 1.8,
"grad_norm": 3.0561821381799956,
"learning_rate": 4.107215526006818e-06,
"loss": 0.2807,
"step": 655
},
{
"epoch": 1.82,
"grad_norm": 2.9363395014272036,
"learning_rate": 4.028478840297867e-06,
"loss": 0.2794,
"step": 660
},
{
"epoch": 1.83,
"grad_norm": 2.7904354679575696,
"learning_rate": 3.949991746535753e-06,
"loss": 0.2677,
"step": 665
},
{
"epoch": 1.85,
"grad_norm": 3.69243985318358,
"learning_rate": 3.87177440871417e-06,
"loss": 0.2864,
"step": 670
},
{
"epoch": 1.86,
"grad_norm": 2.5403744359066645,
"learning_rate": 3.7938469215242374e-06,
"loss": 0.2523,
"step": 675
},
{
"epoch": 1.87,
"grad_norm": 2.448473174262864,
"learning_rate": 3.7162293051920185e-06,
"loss": 0.2619,
"step": 680
},
{
"epoch": 1.89,
"grad_norm": 2.374198457162273,
"learning_rate": 3.638941500335145e-06,
"loss": 0.2553,
"step": 685
},
{
"epoch": 1.9,
"grad_norm": 2.3149772372185256,
"learning_rate": 3.562003362839914e-06,
"loss": 0.2658,
"step": 690
},
{
"epoch": 1.91,
"grad_norm": 2.3535700882688677,
"learning_rate": 3.48543465876014e-06,
"loss": 0.2552,
"step": 695
},
{
"epoch": 1.93,
"grad_norm": 2.409924919926454,
"learning_rate": 3.409255059239086e-06,
"loss": 0.2454,
"step": 700
},
{
"epoch": 1.94,
"grad_norm": 2.3087285185101223,
"learning_rate": 3.3334841354557923e-06,
"loss": 0.247,
"step": 705
},
{
"epoch": 1.96,
"grad_norm": 2.097974702680957,
"learning_rate": 3.2581413535970597e-06,
"loss": 0.259,
"step": 710
},
{
"epoch": 1.97,
"grad_norm": 2.311282696059444,
"learning_rate": 3.183246069856443e-06,
"loss": 0.249,
"step": 715
},
{
"epoch": 1.98,
"grad_norm": 2.216797263554876,
"learning_rate": 3.1088175254614616e-06,
"loss": 0.2534,
"step": 720
},
{
"epoch": 2.0,
"grad_norm": 2.2157421181674852,
"learning_rate": 3.0348748417303826e-06,
"loss": 0.2462,
"step": 725
},
{
"epoch": 2.0,
"eval_loss": 0.27171120047569275,
"eval_runtime": 0.6983,
"eval_samples_per_second": 30.074,
"eval_steps_per_second": 1.432,
"step": 726
},
{
"epoch": 2.01,
"grad_norm": 2.300404841110118,
"learning_rate": 2.9614370151597837e-06,
"loss": 0.2101,
"step": 730
},
{
"epoch": 2.02,
"grad_norm": 2.5741196666096893,
"learning_rate": 2.8885229125442022e-06,
"loss": 0.2098,
"step": 735
},
{
"epoch": 2.04,
"grad_norm": 2.279909101731186,
"learning_rate": 2.8161512661290847e-06,
"loss": 0.1947,
"step": 740
},
{
"epoch": 2.05,
"grad_norm": 2.3938996196040314,
"learning_rate": 2.7443406687983267e-06,
"loss": 0.1984,
"step": 745
},
{
"epoch": 2.07,
"grad_norm": 2.446216710229245,
"learning_rate": 2.6731095692976073e-06,
"loss": 0.2088,
"step": 750
},
{
"epoch": 2.08,
"grad_norm": 2.598313354457395,
"learning_rate": 2.6024762674947313e-06,
"loss": 0.2054,
"step": 755
},
{
"epoch": 2.09,
"grad_norm": 2.1333746091625634,
"learning_rate": 2.532458909678266e-06,
"loss": 0.2033,
"step": 760
},
{
"epoch": 2.11,
"grad_norm": 2.338496094538223,
"learning_rate": 2.46307548389559e-06,
"loss": 0.1953,
"step": 765
},
{
"epoch": 2.12,
"grad_norm": 2.1648483458390633,
"learning_rate": 2.394343815331616e-06,
"loss": 0.1979,
"step": 770
},
{
"epoch": 2.13,
"grad_norm": 2.218648225464966,
"learning_rate": 2.3262815617293517e-06,
"loss": 0.2072,
"step": 775
},
{
"epoch": 2.15,
"grad_norm": 2.22344979938061,
"learning_rate": 2.2589062088534837e-06,
"loss": 0.1922,
"step": 780
},
{
"epoch": 2.16,
"grad_norm": 2.1944872952593935,
"learning_rate": 2.1922350659981262e-06,
"loss": 0.1899,
"step": 785
},
{
"epoch": 2.18,
"grad_norm": 2.53207350471496,
"learning_rate": 2.126285261539926e-06,
"loss": 0.1988,
"step": 790
},
{
"epoch": 2.19,
"grad_norm": 2.2231414178470863,
"learning_rate": 2.061073738537635e-06,
"loss": 0.203,
"step": 795
},
{
"epoch": 2.2,
"grad_norm": 2.299191912683916,
"learning_rate": 1.9966172503792986e-06,
"loss": 0.196,
"step": 800
},
{
"epoch": 2.22,
"grad_norm": 2.150078968393613,
"learning_rate": 1.932932356478168e-06,
"loss": 0.2077,
"step": 805
},
{
"epoch": 2.23,
"grad_norm": 2.1937351268393606,
"learning_rate": 1.8700354180184465e-06,
"loss": 0.1884,
"step": 810
},
{
"epoch": 2.25,
"grad_norm": 2.1569756817442887,
"learning_rate": 1.8079425937519729e-06,
"loss": 0.2072,
"step": 815
},
{
"epoch": 2.26,
"grad_norm": 2.314737346791992,
"learning_rate": 1.7466698358468825e-06,
"loss": 0.1954,
"step": 820
},
{
"epoch": 2.27,
"grad_norm": 2.17714632935505,
"learning_rate": 1.6862328857893856e-06,
"loss": 0.2047,
"step": 825
},
{
"epoch": 2.29,
"grad_norm": 2.1882270574947076,
"learning_rate": 1.6266472703396286e-06,
"loss": 0.2201,
"step": 830
},
{
"epoch": 2.3,
"grad_norm": 3.110550765065634,
"learning_rate": 1.567928297542749e-06,
"loss": 0.1928,
"step": 835
},
{
"epoch": 2.31,
"grad_norm": 2.1365808529854586,
"learning_rate": 1.510091052796105e-06,
"loss": 0.1918,
"step": 840
},
{
"epoch": 2.33,
"grad_norm": 2.388612603639893,
"learning_rate": 1.4531503949737107e-06,
"loss": 0.1977,
"step": 845
},
{
"epoch": 2.34,
"grad_norm": 2.191971884772306,
"learning_rate": 1.3971209526088764e-06,
"loss": 0.1845,
"step": 850
},
{
"epoch": 2.36,
"grad_norm": 2.2009119225291354,
"learning_rate": 1.3420171201359933e-06,
"loss": 0.1904,
"step": 855
},
{
"epoch": 2.37,
"grad_norm": 2.2802253001639694,
"learning_rate": 1.2878530541925077e-06,
"loss": 0.187,
"step": 860
},
{
"epoch": 2.38,
"grad_norm": 8.310094573105028,
"learning_rate": 1.234642669981946e-06,
"loss": 0.1953,
"step": 865
},
{
"epoch": 2.4,
"grad_norm": 2.007390735385814,
"learning_rate": 1.1823996376989849e-06,
"loss": 0.2004,
"step": 870
},
{
"epoch": 2.41,
"grad_norm": 2.1635128901100047,
"learning_rate": 1.1311373790174656e-06,
"loss": 0.1912,
"step": 875
},
{
"epoch": 2.42,
"grad_norm": 2.0539344550751313,
"learning_rate": 1.0808690636422587e-06,
"loss": 0.1916,
"step": 880
},
{
"epoch": 2.44,
"grad_norm": 2.0611679807811507,
"learning_rate": 1.031607605925839e-06,
"loss": 0.1955,
"step": 885
},
{
"epoch": 2.45,
"grad_norm": 2.013017015156521,
"learning_rate": 9.833656615504978e-07,
"loss": 0.1972,
"step": 890
},
{
"epoch": 2.47,
"grad_norm": 2.177386578340672,
"learning_rate": 9.361556242769871e-07,
"loss": 0.1914,
"step": 895
},
{
"epoch": 2.48,
"grad_norm": 2.0667008735778043,
"learning_rate": 8.899896227604509e-07,
"loss": 0.1932,
"step": 900
},
{
"epoch": 2.49,
"grad_norm": 2.3033918528237467,
"learning_rate": 8.448795174344803e-07,
"loss": 0.1954,
"step": 905
},
{
"epoch": 2.51,
"grad_norm": 2.000031770097792,
"learning_rate": 8.008368974640634e-07,
"loss": 0.1848,
"step": 910
},
{
"epoch": 2.52,
"grad_norm": 2.0956889327082022,
"learning_rate": 7.578730777682386e-07,
"loss": 0.2009,
"step": 915
},
{
"epoch": 2.53,
"grad_norm": 2.091926612910402,
"learning_rate": 7.159990961131818e-07,
"loss": 0.1941,
"step": 920
},
{
"epoch": 2.55,
"grad_norm": 2.0429730704960356,
"learning_rate": 6.752257102765325e-07,
"loss": 0.1928,
"step": 925
},
{
"epoch": 2.56,
"grad_norm": 2.154324400516288,
"learning_rate": 6.355633952836115e-07,
"loss": 0.1951,
"step": 930
},
{
"epoch": 2.58,
"grad_norm": 2.067265269052674,
"learning_rate": 5.9702234071631e-07,
"loss": 0.207,
"step": 935
},
{
"epoch": 2.59,
"grad_norm": 1.948503840598503,
"learning_rate": 5.596124480952975e-07,
"loss": 0.1929,
"step": 940
},
{
"epoch": 2.6,
"grad_norm": 2.098095576485382,
"learning_rate": 5.233433283362349e-07,
"loss": 0.1886,
"step": 945
},
{
"epoch": 2.62,
"grad_norm": 2.032706968996429,
"learning_rate": 4.882242992806546e-07,
"loss": 0.1846,
"step": 950
},
{
"epoch": 2.63,
"grad_norm": 2.1026476881879748,
"learning_rate": 4.542643833021254e-07,
"loss": 0.1986,
"step": 955
},
{
"epoch": 2.64,
"grad_norm": 2.8329709057520835,
"learning_rate": 4.214723049883307e-07,
"loss": 0.1965,
"step": 960
},
{
"epoch": 2.66,
"grad_norm": 2.0453374464135026,
"learning_rate": 3.8985648889964755e-07,
"loss": 0.2001,
"step": 965
},
{
"epoch": 2.67,
"grad_norm": 2.0157451646489895,
"learning_rate": 3.5942505740480583e-07,
"loss": 0.1854,
"step": 970
},
{
"epoch": 2.69,
"grad_norm": 2.081338811663644,
"learning_rate": 3.301858285941845e-07,
"loss": 0.1905,
"step": 975
},
{
"epoch": 2.7,
"grad_norm": 2.0777703480609295,
"learning_rate": 3.0214631427127883e-07,
"loss": 0.1871,
"step": 980
},
{
"epoch": 2.71,
"grad_norm": 2.0259999411667535,
"learning_rate": 2.7531371802285436e-07,
"loss": 0.1924,
"step": 985
},
{
"epoch": 2.73,
"grad_norm": 2.048118180121309,
"learning_rate": 2.4969493336828353e-07,
"loss": 0.1957,
"step": 990
},
{
"epoch": 2.74,
"grad_norm": 2.1100138033455376,
"learning_rate": 2.2529654198854834e-07,
"loss": 0.204,
"step": 995
},
{
"epoch": 2.75,
"grad_norm": 2.3890567881386358,
"learning_rate": 2.0212481203534083e-07,
"loss": 0.1882,
"step": 1000
},
{
"epoch": 2.77,
"grad_norm": 2.0181369601611188,
"learning_rate": 1.801856965207338e-07,
"loss": 0.1922,
"step": 1005
},
{
"epoch": 2.78,
"grad_norm": 2.0899720091201925,
"learning_rate": 1.594848317877934e-07,
"loss": 0.1858,
"step": 1010
},
{
"epoch": 2.8,
"grad_norm": 2.0181925238394096,
"learning_rate": 1.4002753606256082e-07,
"loss": 0.1892,
"step": 1015
},
{
"epoch": 2.81,
"grad_norm": 2.1440090885906304,
"learning_rate": 1.2181880808775026e-07,
"loss": 0.1989,
"step": 1020
},
{
"epoch": 2.82,
"grad_norm": 2.1388600966336044,
"learning_rate": 1.0486332583853565e-07,
"loss": 0.1896,
"step": 1025
},
{
"epoch": 2.84,
"grad_norm": 2.0742239070550323,
"learning_rate": 8.916544532073413e-08,
"loss": 0.1944,
"step": 1030
},
{
"epoch": 2.85,
"grad_norm": 2.011520486971434,
"learning_rate": 7.47291994517163e-08,
"loss": 0.1921,
"step": 1035
},
{
"epoch": 2.87,
"grad_norm": 1.9887158152528648,
"learning_rate": 6.15582970243117e-08,
"loss": 0.1969,
"step": 1040
},
{
"epoch": 2.88,
"grad_norm": 1.980526039735843,
"learning_rate": 4.9656121753990924e-08,
"loss": 0.2015,
"step": 1045
},
{
"epoch": 2.89,
"grad_norm": 2.0507087687769125,
"learning_rate": 3.902573140956101e-08,
"loss": 0.1899,
"step": 1050
},
{
"epoch": 2.91,
"grad_norm": 2.101435409729603,
"learning_rate": 2.966985702759828e-08,
"loss": 0.1852,
"step": 1055
},
{
"epoch": 2.92,
"grad_norm": 2.0278350613575484,
"learning_rate": 2.159090221082294e-08,
"loss": 0.185,
"step": 1060
},
{
"epoch": 2.93,
"grad_norm": 2.0276084512238635,
"learning_rate": 1.4790942510590767e-08,
"loss": 0.1892,
"step": 1065
},
{
"epoch": 2.95,
"grad_norm": 2.125689035287332,
"learning_rate": 9.27172489366912e-09,
"loss": 0.1986,
"step": 1070
},
{
"epoch": 2.96,
"grad_norm": 2.015031215767539,
"learning_rate": 5.034667293427053e-09,
"loss": 0.1915,
"step": 1075
},
{
"epoch": 2.98,
"grad_norm": 1.9990729935456388,
"learning_rate": 2.0808582455528194e-09,
"loss": 0.1949,
"step": 1080
},
{
"epoch": 2.99,
"grad_norm": 3.674390036726276,
"learning_rate": 4.1105660840368154e-10,
"loss": 0.2012,
"step": 1085
},
{
"epoch": 3.0,
"eval_loss": 0.27156856656074524,
"eval_runtime": 0.7081,
"eval_samples_per_second": 29.656,
"eval_steps_per_second": 1.412,
"step": 1089
},
{
"epoch": 3.0,
"step": 1089,
"total_flos": 227909755207680.0,
"train_loss": 0.3156235526196994,
"train_runtime": 6236.2054,
"train_samples_per_second": 11.165,
"train_steps_per_second": 0.175
}
],
"logging_steps": 5,
"max_steps": 1089,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 400,
"total_flos": 227909755207680.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}