quip-4k-qwen / trainer_state.json
Anis1123's picture
Initial model upload
ae07701 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.971724787935909,
"eval_steps": 500,
"global_step": 792,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03770028275212064,
"grad_norm": 2.404820680618286,
"learning_rate": 4.9995083170283816e-05,
"loss": 4.2968,
"num_input_tokens_seen": 51872,
"step": 5
},
{
"epoch": 0.07540056550424128,
"grad_norm": 1.080649971961975,
"learning_rate": 4.998033461515242e-05,
"loss": 3.9725,
"num_input_tokens_seen": 107024,
"step": 10
},
{
"epoch": 0.11310084825636192,
"grad_norm": 2.0493686199188232,
"learning_rate": 4.9955760135896534e-05,
"loss": 3.8314,
"num_input_tokens_seen": 160080,
"step": 15
},
{
"epoch": 0.15080113100848255,
"grad_norm": 0.927135169506073,
"learning_rate": 4.992136939879856e-05,
"loss": 3.7,
"num_input_tokens_seen": 208816,
"step": 20
},
{
"epoch": 0.1885014137606032,
"grad_norm": 0.9066221714019775,
"learning_rate": 4.9877175931330346e-05,
"loss": 3.6029,
"num_input_tokens_seen": 262448,
"step": 25
},
{
"epoch": 0.22620169651272384,
"grad_norm": 1.361427903175354,
"learning_rate": 4.982319711683221e-05,
"loss": 3.4859,
"num_input_tokens_seen": 315552,
"step": 30
},
{
"epoch": 0.2639019792648445,
"grad_norm": 6.180077075958252,
"learning_rate": 4.975945418767529e-05,
"loss": 3.4653,
"num_input_tokens_seen": 366992,
"step": 35
},
{
"epoch": 0.3016022620169651,
"grad_norm": 1.7034149169921875,
"learning_rate": 4.968597221690986e-05,
"loss": 3.485,
"num_input_tokens_seen": 418816,
"step": 40
},
{
"epoch": 0.3393025447690858,
"grad_norm": 2.64639949798584,
"learning_rate": 4.96027801084029e-05,
"loss": 3.4304,
"num_input_tokens_seen": 469936,
"step": 45
},
{
"epoch": 0.3770028275212064,
"grad_norm": 1.302827000617981,
"learning_rate": 4.950991058546893e-05,
"loss": 3.4635,
"num_input_tokens_seen": 525120,
"step": 50
},
{
"epoch": 0.41470311027332707,
"grad_norm": 1.5340975522994995,
"learning_rate": 4.940740017799833e-05,
"loss": 3.3626,
"num_input_tokens_seen": 576944,
"step": 55
},
{
"epoch": 0.4524033930254477,
"grad_norm": 1.2329437732696533,
"learning_rate": 4.929528920808854e-05,
"loss": 3.3347,
"num_input_tokens_seen": 628544,
"step": 60
},
{
"epoch": 0.49010367577756836,
"grad_norm": 1.7281228303909302,
"learning_rate": 4.917362177418342e-05,
"loss": 3.3347,
"num_input_tokens_seen": 681392,
"step": 65
},
{
"epoch": 0.527803958529689,
"grad_norm": 1.3272786140441895,
"learning_rate": 4.904244573372733e-05,
"loss": 3.3173,
"num_input_tokens_seen": 734928,
"step": 70
},
{
"epoch": 0.5655042412818096,
"grad_norm": 1.3057860136032104,
"learning_rate": 4.8901812684340564e-05,
"loss": 3.3863,
"num_input_tokens_seen": 785840,
"step": 75
},
{
"epoch": 0.6032045240339302,
"grad_norm": 1.3130522966384888,
"learning_rate": 4.8751777943523634e-05,
"loss": 3.2669,
"num_input_tokens_seen": 838112,
"step": 80
},
{
"epoch": 0.6409048067860509,
"grad_norm": 1.4002560377120972,
"learning_rate": 4.8592400526898314e-05,
"loss": 3.249,
"num_input_tokens_seen": 886784,
"step": 85
},
{
"epoch": 0.6786050895381716,
"grad_norm": 1.5897698402404785,
"learning_rate": 4.842374312499405e-05,
"loss": 3.3248,
"num_input_tokens_seen": 936880,
"step": 90
},
{
"epoch": 0.7163053722902922,
"grad_norm": 2.1904706954956055,
"learning_rate": 4.824587207858888e-05,
"loss": 3.1471,
"num_input_tokens_seen": 988864,
"step": 95
},
{
"epoch": 0.7540056550424128,
"grad_norm": 1.859395980834961,
"learning_rate": 4.805885735261454e-05,
"loss": 3.2406,
"num_input_tokens_seen": 1044400,
"step": 100
},
{
"epoch": 0.7917059377945335,
"grad_norm": 1.6112860441207886,
"learning_rate": 4.786277250863599e-05,
"loss": 3.1418,
"num_input_tokens_seen": 1097376,
"step": 105
},
{
"epoch": 0.8294062205466541,
"grad_norm": 1.3354580402374268,
"learning_rate": 4.765769467591625e-05,
"loss": 3.1452,
"num_input_tokens_seen": 1153456,
"step": 110
},
{
"epoch": 0.8671065032987747,
"grad_norm": 1.9464856386184692,
"learning_rate": 4.744370452107789e-05,
"loss": 3.2019,
"num_input_tokens_seen": 1205536,
"step": 115
},
{
"epoch": 0.9048067860508954,
"grad_norm": 1.3771345615386963,
"learning_rate": 4.722088621637309e-05,
"loss": 3.2528,
"num_input_tokens_seen": 1256080,
"step": 120
},
{
"epoch": 0.942507068803016,
"grad_norm": 1.5059542655944824,
"learning_rate": 4.698932740657479e-05,
"loss": 3.0824,
"num_input_tokens_seen": 1308592,
"step": 125
},
{
"epoch": 0.9802073515551367,
"grad_norm": 1.4657851457595825,
"learning_rate": 4.6749119174501975e-05,
"loss": 3.2487,
"num_input_tokens_seen": 1355792,
"step": 130
},
{
"epoch": 1.0179076343072573,
"grad_norm": 1.5458085536956787,
"learning_rate": 4.6500356005192514e-05,
"loss": 3.12,
"num_input_tokens_seen": 1402560,
"step": 135
},
{
"epoch": 1.055607917059378,
"grad_norm": 1.4856982231140137,
"learning_rate": 4.6243135748737864e-05,
"loss": 3.1617,
"num_input_tokens_seen": 1453152,
"step": 140
},
{
"epoch": 1.0933081998114986,
"grad_norm": 1.728014588356018,
"learning_rate": 4.597755958179406e-05,
"loss": 3.0912,
"num_input_tokens_seen": 1505744,
"step": 145
},
{
"epoch": 1.1310084825636193,
"grad_norm": 1.727301836013794,
"learning_rate": 4.570373196778427e-05,
"loss": 2.9448,
"num_input_tokens_seen": 1559264,
"step": 150
},
{
"epoch": 1.1687087653157398,
"grad_norm": 1.8649897575378418,
"learning_rate": 4.5421760615808474e-05,
"loss": 3.1383,
"num_input_tokens_seen": 1604368,
"step": 155
},
{
"epoch": 1.2064090480678604,
"grad_norm": 1.8682725429534912,
"learning_rate": 4.513175643827647e-05,
"loss": 3.0526,
"num_input_tokens_seen": 1657168,
"step": 160
},
{
"epoch": 1.244109330819981,
"grad_norm": 1.723254680633545,
"learning_rate": 4.4833833507280884e-05,
"loss": 3.0386,
"num_input_tokens_seen": 1704960,
"step": 165
},
{
"epoch": 1.2818096135721018,
"grad_norm": 1.6712509393692017,
"learning_rate": 4.4528109009727336e-05,
"loss": 2.9411,
"num_input_tokens_seen": 1756400,
"step": 170
},
{
"epoch": 1.3195098963242224,
"grad_norm": 2.0004794597625732,
"learning_rate": 4.42147032012394e-05,
"loss": 3.1312,
"num_input_tokens_seen": 1806976,
"step": 175
},
{
"epoch": 1.3572101790763431,
"grad_norm": 1.872934103012085,
"learning_rate": 4.389373935885646e-05,
"loss": 3.0785,
"num_input_tokens_seen": 1861264,
"step": 180
},
{
"epoch": 1.3949104618284638,
"grad_norm": 1.8472355604171753,
"learning_rate": 4.356534373254316e-05,
"loss": 3.0567,
"num_input_tokens_seen": 1917872,
"step": 185
},
{
"epoch": 1.4326107445805842,
"grad_norm": 2.1023237705230713,
"learning_rate": 4.322964549552943e-05,
"loss": 3.0526,
"num_input_tokens_seen": 1971888,
"step": 190
},
{
"epoch": 1.4703110273327051,
"grad_norm": 1.9663615226745605,
"learning_rate": 4.288677669350066e-05,
"loss": 3.0147,
"num_input_tokens_seen": 2022112,
"step": 195
},
{
"epoch": 1.5080113100848256,
"grad_norm": 2.509061574935913,
"learning_rate": 4.2536872192658036e-05,
"loss": 3.0681,
"num_input_tokens_seen": 2073088,
"step": 200
},
{
"epoch": 1.5457115928369463,
"grad_norm": 1.9898158311843872,
"learning_rate": 4.218006962666934e-05,
"loss": 2.9832,
"num_input_tokens_seen": 2124080,
"step": 205
},
{
"epoch": 1.583411875589067,
"grad_norm": 1.9694849252700806,
"learning_rate": 4.181650934253132e-05,
"loss": 2.9811,
"num_input_tokens_seen": 2178784,
"step": 210
},
{
"epoch": 1.6211121583411876,
"grad_norm": 2.2799909114837646,
"learning_rate": 4.144633434536467e-05,
"loss": 2.9752,
"num_input_tokens_seen": 2228928,
"step": 215
},
{
"epoch": 1.6588124410933083,
"grad_norm": 2.281668186187744,
"learning_rate": 4.1069690242163484e-05,
"loss": 3.1081,
"num_input_tokens_seen": 2279536,
"step": 220
},
{
"epoch": 1.6965127238454287,
"grad_norm": 2.2970738410949707,
"learning_rate": 4.06867251845213e-05,
"loss": 2.9904,
"num_input_tokens_seen": 2338832,
"step": 225
},
{
"epoch": 1.7342130065975496,
"grad_norm": 2.1776630878448486,
"learning_rate": 4.0297589810356165e-05,
"loss": 3.0209,
"num_input_tokens_seen": 2392784,
"step": 230
},
{
"epoch": 1.77191328934967,
"grad_norm": 2.3412859439849854,
"learning_rate": 3.9902437184657784e-05,
"loss": 2.9835,
"num_input_tokens_seen": 2449088,
"step": 235
},
{
"epoch": 1.8096135721017907,
"grad_norm": 2.2461934089660645,
"learning_rate": 3.9501422739279956e-05,
"loss": 3.0256,
"num_input_tokens_seen": 2503872,
"step": 240
},
{
"epoch": 1.8473138548539114,
"grad_norm": 1.9846028089523315,
"learning_rate": 3.909470421180201e-05,
"loss": 2.9455,
"num_input_tokens_seen": 2556896,
"step": 245
},
{
"epoch": 1.885014137606032,
"grad_norm": 2.456713914871216,
"learning_rate": 3.8682441583483314e-05,
"loss": 2.9629,
"num_input_tokens_seen": 2607712,
"step": 250
},
{
"epoch": 1.9227144203581528,
"grad_norm": 2.048579454421997,
"learning_rate": 3.8264797016335205e-05,
"loss": 3.0255,
"num_input_tokens_seen": 2661696,
"step": 255
},
{
"epoch": 1.9604147031102732,
"grad_norm": 2.1914753913879395,
"learning_rate": 3.7841934789335164e-05,
"loss": 2.9352,
"num_input_tokens_seen": 2711664,
"step": 260
},
{
"epoch": 1.998114985862394,
"grad_norm": 2.7857398986816406,
"learning_rate": 3.741402123380828e-05,
"loss": 3.0592,
"num_input_tokens_seen": 2766656,
"step": 265
},
{
"epoch": 2.0358152686145146,
"grad_norm": 2.3272783756256104,
"learning_rate": 3.6981224668001424e-05,
"loss": 2.8064,
"num_input_tokens_seen": 2816736,
"step": 270
},
{
"epoch": 2.0735155513666355,
"grad_norm": 2.9153804779052734,
"learning_rate": 3.654371533087586e-05,
"loss": 2.6955,
"num_input_tokens_seen": 2871728,
"step": 275
},
{
"epoch": 2.111215834118756,
"grad_norm": 2.69919490814209,
"learning_rate": 3.610166531514436e-05,
"loss": 2.8221,
"num_input_tokens_seen": 2914880,
"step": 280
},
{
"epoch": 2.1489161168708764,
"grad_norm": 2.708343267440796,
"learning_rate": 3.565524849957921e-05,
"loss": 2.8134,
"num_input_tokens_seen": 2966144,
"step": 285
},
{
"epoch": 2.1866163996229973,
"grad_norm": 2.9772026538848877,
"learning_rate": 3.520464048061758e-05,
"loss": 2.8455,
"num_input_tokens_seen": 3017856,
"step": 290
},
{
"epoch": 2.2243166823751177,
"grad_norm": 3.0390701293945312,
"learning_rate": 3.47500185032913e-05,
"loss": 2.8067,
"num_input_tokens_seen": 3069232,
"step": 295
},
{
"epoch": 2.2620169651272386,
"grad_norm": 3.013737678527832,
"learning_rate": 3.4291561391508185e-05,
"loss": 2.8081,
"num_input_tokens_seen": 3120768,
"step": 300
},
{
"epoch": 2.299717247879359,
"grad_norm": 3.2235286235809326,
"learning_rate": 3.3829449477712324e-05,
"loss": 2.7738,
"num_input_tokens_seen": 3177328,
"step": 305
},
{
"epoch": 2.3374175306314795,
"grad_norm": 2.9949076175689697,
"learning_rate": 3.336386453195088e-05,
"loss": 2.8478,
"num_input_tokens_seen": 3232688,
"step": 310
},
{
"epoch": 2.3751178133836004,
"grad_norm": 3.145488977432251,
"learning_rate": 3.2894989690375626e-05,
"loss": 2.7571,
"num_input_tokens_seen": 3288048,
"step": 315
},
{
"epoch": 2.412818096135721,
"grad_norm": 3.5241146087646484,
"learning_rate": 3.2423009383206876e-05,
"loss": 2.7997,
"num_input_tokens_seen": 3338128,
"step": 320
},
{
"epoch": 2.4505183788878417,
"grad_norm": 3.1561355590820312,
"learning_rate": 3.194810926218861e-05,
"loss": 2.7538,
"num_input_tokens_seen": 3390848,
"step": 325
},
{
"epoch": 2.488218661639962,
"grad_norm": 4.00498104095459,
"learning_rate": 3.147047612756302e-05,
"loss": 2.7098,
"num_input_tokens_seen": 3441776,
"step": 330
},
{
"epoch": 2.525918944392083,
"grad_norm": 3.1730833053588867,
"learning_rate": 3.099029785459328e-05,
"loss": 2.8332,
"num_input_tokens_seen": 3490976,
"step": 335
},
{
"epoch": 2.5636192271442035,
"grad_norm": 2.972092390060425,
"learning_rate": 3.0507763319663517e-05,
"loss": 2.8147,
"num_input_tokens_seen": 3544848,
"step": 340
},
{
"epoch": 2.6013195098963244,
"grad_norm": 3.334397554397583,
"learning_rate": 3.002306232598497e-05,
"loss": 2.7288,
"num_input_tokens_seen": 3597536,
"step": 345
},
{
"epoch": 2.639019792648445,
"grad_norm": 3.4970219135284424,
"learning_rate": 2.9536385528937567e-05,
"loss": 2.6969,
"num_input_tokens_seen": 3649760,
"step": 350
},
{
"epoch": 2.6767200754005653,
"grad_norm": 3.345592498779297,
"learning_rate": 2.9047924361076345e-05,
"loss": 2.7767,
"num_input_tokens_seen": 3704160,
"step": 355
},
{
"epoch": 2.7144203581526862,
"grad_norm": 3.0420963764190674,
"learning_rate": 2.8557870956832132e-05,
"loss": 2.6182,
"num_input_tokens_seen": 3751280,
"step": 360
},
{
"epoch": 2.7521206409048067,
"grad_norm": 3.9190845489501953,
"learning_rate": 2.8066418076936167e-05,
"loss": 2.7498,
"num_input_tokens_seen": 3801904,
"step": 365
},
{
"epoch": 2.7898209236569276,
"grad_norm": 3.697990655899048,
"learning_rate": 2.7573759032598366e-05,
"loss": 2.7674,
"num_input_tokens_seen": 3858528,
"step": 370
},
{
"epoch": 2.827521206409048,
"grad_norm": 3.192492723464966,
"learning_rate": 2.7080087609469062e-05,
"loss": 2.7692,
"num_input_tokens_seen": 3909216,
"step": 375
},
{
"epoch": 2.8652214891611685,
"grad_norm": 3.132563829421997,
"learning_rate": 2.6585597991414114e-05,
"loss": 2.6428,
"num_input_tokens_seen": 3963040,
"step": 380
},
{
"epoch": 2.9029217719132894,
"grad_norm": 3.4705281257629395,
"learning_rate": 2.6090484684133404e-05,
"loss": 2.6906,
"num_input_tokens_seen": 4009728,
"step": 385
},
{
"epoch": 2.9406220546654103,
"grad_norm": 3.432950973510742,
"learning_rate": 2.5594942438652688e-05,
"loss": 2.7791,
"num_input_tokens_seen": 4069024,
"step": 390
},
{
"epoch": 2.9783223374175307,
"grad_norm": 3.901036262512207,
"learning_rate": 2.509916617471903e-05,
"loss": 2.8693,
"num_input_tokens_seen": 4123440,
"step": 395
},
{
"epoch": 3.016022620169651,
"grad_norm": 3.282733917236328,
"learning_rate": 2.46033509041298e-05,
"loss": 2.6128,
"num_input_tokens_seen": 4175200,
"step": 400
},
{
"epoch": 3.053722902921772,
"grad_norm": 3.502382516860962,
"learning_rate": 2.410769165402549e-05,
"loss": 2.5518,
"num_input_tokens_seen": 4231536,
"step": 405
},
{
"epoch": 3.0914231856738925,
"grad_norm": 4.178504467010498,
"learning_rate": 2.3612383390176503e-05,
"loss": 2.5435,
"num_input_tokens_seen": 4283696,
"step": 410
},
{
"epoch": 3.1291234684260134,
"grad_norm": 3.8624277114868164,
"learning_rate": 2.3117620940294048e-05,
"loss": 2.5171,
"num_input_tokens_seen": 4340368,
"step": 415
},
{
"epoch": 3.166823751178134,
"grad_norm": 4.073604583740234,
"learning_rate": 2.2623598917395438e-05,
"loss": 2.4978,
"num_input_tokens_seen": 4394352,
"step": 420
},
{
"epoch": 3.2045240339302543,
"grad_norm": 4.057010650634766,
"learning_rate": 2.213051164325366e-05,
"loss": 2.5332,
"num_input_tokens_seen": 4440992,
"step": 425
},
{
"epoch": 3.242224316682375,
"grad_norm": 4.001201152801514,
"learning_rate": 2.1638553071961708e-05,
"loss": 2.5037,
"num_input_tokens_seen": 4485120,
"step": 430
},
{
"epoch": 3.2799245994344957,
"grad_norm": 4.139381408691406,
"learning_rate": 2.1147916713641367e-05,
"loss": 2.5358,
"num_input_tokens_seen": 4537456,
"step": 435
},
{
"epoch": 3.3176248821866166,
"grad_norm": 4.426379203796387,
"learning_rate": 2.0658795558326743e-05,
"loss": 2.5274,
"num_input_tokens_seen": 4587680,
"step": 440
},
{
"epoch": 3.355325164938737,
"grad_norm": 4.528985023498535,
"learning_rate": 2.017138200005236e-05,
"loss": 2.5192,
"num_input_tokens_seen": 4644752,
"step": 445
},
{
"epoch": 3.3930254476908575,
"grad_norm": 4.443198204040527,
"learning_rate": 1.9685867761175584e-05,
"loss": 2.5902,
"num_input_tokens_seen": 4696928,
"step": 450
},
{
"epoch": 3.4307257304429783,
"grad_norm": 4.068321228027344,
"learning_rate": 1.9202443816963425e-05,
"loss": 2.5584,
"num_input_tokens_seen": 4748800,
"step": 455
},
{
"epoch": 3.468426013195099,
"grad_norm": 4.630172252655029,
"learning_rate": 1.872130032047302e-05,
"loss": 2.4829,
"num_input_tokens_seen": 4806000,
"step": 460
},
{
"epoch": 3.5061262959472197,
"grad_norm": 3.785494565963745,
"learning_rate": 1.824262652775568e-05,
"loss": 2.5967,
"num_input_tokens_seen": 4861440,
"step": 465
},
{
"epoch": 3.54382657869934,
"grad_norm": 4.5177717208862305,
"learning_rate": 1.7766610723413684e-05,
"loss": 2.4773,
"num_input_tokens_seen": 4915152,
"step": 470
},
{
"epoch": 3.581526861451461,
"grad_norm": 4.719058036804199,
"learning_rate": 1.7293440146539196e-05,
"loss": 2.591,
"num_input_tokens_seen": 4965680,
"step": 475
},
{
"epoch": 3.6192271442035815,
"grad_norm": 3.852895736694336,
"learning_rate": 1.682330091706446e-05,
"loss": 2.5478,
"num_input_tokens_seen": 5024832,
"step": 480
},
{
"epoch": 3.6569274269557024,
"grad_norm": 4.8900370597839355,
"learning_rate": 1.6356377962552238e-05,
"loss": 2.5146,
"num_input_tokens_seen": 5076112,
"step": 485
},
{
"epoch": 3.694627709707823,
"grad_norm": 4.970290660858154,
"learning_rate": 1.589285494545514e-05,
"loss": 2.523,
"num_input_tokens_seen": 5129328,
"step": 490
},
{
"epoch": 3.7323279924599433,
"grad_norm": 4.482200622558594,
"learning_rate": 1.5432914190872757e-05,
"loss": 2.4704,
"num_input_tokens_seen": 5181936,
"step": 495
},
{
"epoch": 3.770028275212064,
"grad_norm": 4.517970085144043,
"learning_rate": 1.4976736614834664e-05,
"loss": 2.4309,
"num_input_tokens_seen": 5234112,
"step": 500
},
{
"epoch": 3.8077285579641846,
"grad_norm": 4.596162796020508,
"learning_rate": 1.4524501653137787e-05,
"loss": 2.5634,
"num_input_tokens_seen": 5282256,
"step": 505
},
{
"epoch": 3.8454288407163055,
"grad_norm": 4.231651306152344,
"learning_rate": 1.4076387190766017e-05,
"loss": 2.5133,
"num_input_tokens_seen": 5334720,
"step": 510
},
{
"epoch": 3.883129123468426,
"grad_norm": 5.023342132568359,
"learning_rate": 1.363256949191972e-05,
"loss": 2.3979,
"num_input_tokens_seen": 5384608,
"step": 515
},
{
"epoch": 3.9208294062205464,
"grad_norm": 5.331748008728027,
"learning_rate": 1.3193223130682936e-05,
"loss": 2.5216,
"num_input_tokens_seen": 5434816,
"step": 520
},
{
"epoch": 3.9585296889726673,
"grad_norm": 4.532582759857178,
"learning_rate": 1.2758520922355226e-05,
"loss": 2.4333,
"num_input_tokens_seen": 5480304,
"step": 525
},
{
"epoch": 3.9962299717247878,
"grad_norm": 4.729331016540527,
"learning_rate": 1.2328633855475429e-05,
"loss": 2.4818,
"num_input_tokens_seen": 5532160,
"step": 530
},
{
"epoch": 4.033930254476909,
"grad_norm": 4.460718631744385,
"learning_rate": 1.1903731024563966e-05,
"loss": 2.2869,
"num_input_tokens_seen": 5585152,
"step": 535
},
{
"epoch": 4.071630537229029,
"grad_norm": 5.085540771484375,
"learning_rate": 1.148397956361007e-05,
"loss": 2.2979,
"num_input_tokens_seen": 5642016,
"step": 540
},
{
"epoch": 4.10933081998115,
"grad_norm": 5.158658027648926,
"learning_rate": 1.106954458033026e-05,
"loss": 2.3257,
"num_input_tokens_seen": 5698096,
"step": 545
},
{
"epoch": 4.147031102733271,
"grad_norm": 5.59428596496582,
"learning_rate": 1.0660589091223855e-05,
"loss": 2.4352,
"num_input_tokens_seen": 5747504,
"step": 550
},
{
"epoch": 4.184731385485391,
"grad_norm": 5.6425323486328125,
"learning_rate": 1.025727395745095e-05,
"loss": 2.4073,
"num_input_tokens_seen": 5796192,
"step": 555
},
{
"epoch": 4.222431668237512,
"grad_norm": 5.1205291748046875,
"learning_rate": 9.859757821558337e-06,
"loss": 2.3018,
"num_input_tokens_seen": 5850880,
"step": 560
},
{
"epoch": 4.260131950989632,
"grad_norm": 4.863905429840088,
"learning_rate": 9.468197045077976e-06,
"loss": 2.2485,
"num_input_tokens_seen": 5897680,
"step": 565
},
{
"epoch": 4.297832233741753,
"grad_norm": 5.868588924407959,
"learning_rate": 9.082745647022797e-06,
"loss": 2.3062,
"num_input_tokens_seen": 5954608,
"step": 570
},
{
"epoch": 4.335532516493874,
"grad_norm": 6.174604892730713,
"learning_rate": 8.703555243303835e-06,
"loss": 2.3681,
"num_input_tokens_seen": 6003136,
"step": 575
},
{
"epoch": 4.3732327992459945,
"grad_norm": 5.885344982147217,
"learning_rate": 8.330774987092712e-06,
"loss": 2.2854,
"num_input_tokens_seen": 6053360,
"step": 580
},
{
"epoch": 4.410933081998115,
"grad_norm": 5.063482284545898,
"learning_rate": 7.96455151015272e-06,
"loss": 2.3556,
"num_input_tokens_seen": 6104480,
"step": 585
},
{
"epoch": 4.448633364750235,
"grad_norm": 4.70927095413208,
"learning_rate": 7.605028865161809e-06,
"loss": 2.3613,
"num_input_tokens_seen": 6157456,
"step": 590
},
{
"epoch": 4.486333647502356,
"grad_norm": 5.277394771575928,
"learning_rate": 7.25234846904993e-06,
"loss": 2.3387,
"num_input_tokens_seen": 6209744,
"step": 595
},
{
"epoch": 4.524033930254477,
"grad_norm": 5.2438859939575195,
"learning_rate": 6.906649047373246e-06,
"loss": 2.3717,
"num_input_tokens_seen": 6264528,
"step": 600
},
{
"epoch": 4.561734213006598,
"grad_norm": 5.98681640625,
"learning_rate": 6.568066579746901e-06,
"loss": 2.3322,
"num_input_tokens_seen": 6311568,
"step": 605
},
{
"epoch": 4.599434495758718,
"grad_norm": 5.56313943862915,
"learning_rate": 6.2367342463579475e-06,
"loss": 2.4046,
"num_input_tokens_seen": 6361040,
"step": 610
},
{
"epoch": 4.6371347785108386,
"grad_norm": 5.7875823974609375,
"learning_rate": 5.912782375579412e-06,
"loss": 2.2987,
"num_input_tokens_seen": 6411280,
"step": 615
},
{
"epoch": 4.674835061262959,
"grad_norm": 5.74908971786499,
"learning_rate": 5.596338392706077e-06,
"loss": 2.3819,
"num_input_tokens_seen": 6462816,
"step": 620
},
{
"epoch": 4.71253534401508,
"grad_norm": 4.686108112335205,
"learning_rate": 5.2875267698322325e-06,
"loss": 2.3011,
"num_input_tokens_seen": 6514640,
"step": 625
},
{
"epoch": 4.750235626767201,
"grad_norm": 5.016228199005127,
"learning_rate": 4.986468976890993e-06,
"loss": 2.3686,
"num_input_tokens_seen": 6567824,
"step": 630
},
{
"epoch": 4.787935909519321,
"grad_norm": 5.1654253005981445,
"learning_rate": 4.693283433874565e-06,
"loss": 2.3264,
"num_input_tokens_seen": 6616992,
"step": 635
},
{
"epoch": 4.825636192271442,
"grad_norm": 5.4306640625,
"learning_rate": 4.408085464254183e-06,
"loss": 2.4168,
"num_input_tokens_seen": 6665056,
"step": 640
},
{
"epoch": 4.863336475023563,
"grad_norm": 5.239274978637695,
"learning_rate": 4.130987249617993e-06,
"loss": 2.3583,
"num_input_tokens_seen": 6720512,
"step": 645
},
{
"epoch": 4.9010367577756835,
"grad_norm": 4.968624114990234,
"learning_rate": 3.8620977855448935e-06,
"loss": 2.3542,
"num_input_tokens_seen": 6777888,
"step": 650
},
{
"epoch": 4.938737040527804,
"grad_norm": 5.927698612213135,
"learning_rate": 3.601522838731461e-06,
"loss": 2.3247,
"num_input_tokens_seen": 6832688,
"step": 655
},
{
"epoch": 4.976437323279924,
"grad_norm": 5.6173624992370605,
"learning_rate": 3.3493649053890326e-06,
"loss": 2.4144,
"num_input_tokens_seen": 6885296,
"step": 660
},
{
"epoch": 5.014137606032045,
"grad_norm": 5.255261421203613,
"learning_rate": 3.1057231709272077e-06,
"loss": 2.3183,
"num_input_tokens_seen": 6938896,
"step": 665
},
{
"epoch": 5.051837888784166,
"grad_norm": 5.603339672088623,
"learning_rate": 2.8706934709395892e-06,
"loss": 2.3131,
"num_input_tokens_seen": 6989504,
"step": 670
},
{
"epoch": 5.089538171536287,
"grad_norm": 5.814265251159668,
"learning_rate": 2.6443682535072177e-06,
"loss": 2.1683,
"num_input_tokens_seen": 7037856,
"step": 675
},
{
"epoch": 5.127238454288407,
"grad_norm": 5.432703971862793,
"learning_rate": 2.4268365428344736e-06,
"loss": 2.2366,
"num_input_tokens_seen": 7091952,
"step": 680
},
{
"epoch": 5.1649387370405275,
"grad_norm": 6.781660556793213,
"learning_rate": 2.21818390423168e-06,
"loss": 2.1666,
"num_input_tokens_seen": 7138112,
"step": 685
},
{
"epoch": 5.202639019792649,
"grad_norm": 5.423711776733398,
"learning_rate": 2.0184924104583613e-06,
"loss": 2.2275,
"num_input_tokens_seen": 7183936,
"step": 690
},
{
"epoch": 5.240339302544769,
"grad_norm": 11.971770286560059,
"learning_rate": 1.8278406094401623e-06,
"loss": 2.2126,
"num_input_tokens_seen": 7231504,
"step": 695
},
{
"epoch": 5.27803958529689,
"grad_norm": 6.030401706695557,
"learning_rate": 1.6463034933723337e-06,
"loss": 2.3274,
"num_input_tokens_seen": 7280928,
"step": 700
},
{
"epoch": 5.31573986804901,
"grad_norm": 5.718194961547852,
"learning_rate": 1.4739524692218314e-06,
"loss": 2.2847,
"num_input_tokens_seen": 7329424,
"step": 705
},
{
"epoch": 5.353440150801131,
"grad_norm": 6.101733684539795,
"learning_rate": 1.3108553306396265e-06,
"loss": 2.3183,
"num_input_tokens_seen": 7383920,
"step": 710
},
{
"epoch": 5.391140433553252,
"grad_norm": 5.965092658996582,
"learning_rate": 1.1570762312943295e-06,
"loss": 2.2681,
"num_input_tokens_seen": 7438480,
"step": 715
},
{
"epoch": 5.4288407163053725,
"grad_norm": 5.805166244506836,
"learning_rate": 1.0126756596375686e-06,
"loss": 2.1764,
"num_input_tokens_seen": 7488160,
"step": 720
},
{
"epoch": 5.466540999057493,
"grad_norm": 5.904021739959717,
"learning_rate": 8.777104151110826e-07,
"loss": 2.2726,
"num_input_tokens_seen": 7548144,
"step": 725
},
{
"epoch": 5.504241281809613,
"grad_norm": 6.18209171295166,
"learning_rate": 7.522335858048707e-07,
"loss": 2.2841,
"num_input_tokens_seen": 7605536,
"step": 730
},
{
"epoch": 5.541941564561734,
"grad_norm": 5.971505641937256,
"learning_rate": 6.362945275751736e-07,
"loss": 2.2894,
"num_input_tokens_seen": 7655408,
"step": 735
},
{
"epoch": 5.579641847313855,
"grad_norm": 5.907384872436523,
"learning_rate": 5.299388446305343e-07,
"loss": 2.3057,
"num_input_tokens_seen": 7706016,
"step": 740
},
{
"epoch": 5.617342130065976,
"grad_norm": 5.055915355682373,
"learning_rate": 4.3320837159353813e-07,
"loss": 2.2593,
"num_input_tokens_seen": 7761520,
"step": 745
},
{
"epoch": 5.655042412818096,
"grad_norm": 4.980125904083252,
"learning_rate": 3.4614115704533767e-07,
"loss": 2.2211,
"num_input_tokens_seen": 7818592,
"step": 750
},
{
"epoch": 5.6927426955702165,
"grad_norm": 6.198908805847168,
"learning_rate": 2.687714485593462e-07,
"loss": 2.3253,
"num_input_tokens_seen": 7869808,
"step": 755
},
{
"epoch": 5.730442978322337,
"grad_norm": 5.739902496337891,
"learning_rate": 2.011296792301165e-07,
"loss": 2.354,
"num_input_tokens_seen": 7926288,
"step": 760
},
{
"epoch": 5.768143261074458,
"grad_norm": 5.519392490386963,
"learning_rate": 1.4324245570256633e-07,
"loss": 2.2959,
"num_input_tokens_seen": 7978000,
"step": 765
},
{
"epoch": 5.805843543826579,
"grad_norm": 5.5738115310668945,
"learning_rate": 9.513254770636137e-08,
"loss": 2.305,
"num_input_tokens_seen": 8026688,
"step": 770
},
{
"epoch": 5.843543826578699,
"grad_norm": 5.609658718109131,
"learning_rate": 5.681887909952388e-08,
"loss": 2.321,
"num_input_tokens_seen": 8080832,
"step": 775
},
{
"epoch": 5.88124410933082,
"grad_norm": 5.990361213684082,
"learning_rate": 2.831652042480093e-08,
"loss": 2.2447,
"num_input_tokens_seen": 8132832,
"step": 780
},
{
"epoch": 5.918944392082941,
"grad_norm": 5.642332553863525,
"learning_rate": 9.636682981720158e-09,
"loss": 2.2446,
"num_input_tokens_seen": 8186208,
"step": 785
},
{
"epoch": 5.956644674835061,
"grad_norm": 6.089615345001221,
"learning_rate": 7.867144166728846e-10,
"loss": 2.2856,
"num_input_tokens_seen": 8240448,
"step": 790
},
{
"epoch": 5.971724787935909,
"num_input_tokens_seen": 8259920,
"step": 792,
"total_flos": 3.514168539537736e+17,
"train_loss": 2.724412232336372,
"train_runtime": 4977.287,
"train_samples_per_second": 5.116,
"train_steps_per_second": 0.159
}
],
"logging_steps": 5,
"max_steps": 792,
"num_input_tokens_seen": 8259920,
"num_train_epochs": 6,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.514168539537736e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}