smollm-1.7B-instruct-v2 / trainer_state.json
loubnabnl's picture
loubnabnl HF staff
Model save
19089d4 verified
raw
history blame
29.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 819,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001221001221001221,
"grad_norm": 2.845627588694297,
"learning_rate": 1.2195121951219513e-05,
"loss": 1.1248,
"step": 1
},
{
"epoch": 0.006105006105006105,
"grad_norm": 1.2124247668960009,
"learning_rate": 6.097560975609756e-05,
"loss": 1.1247,
"step": 5
},
{
"epoch": 0.01221001221001221,
"grad_norm": 0.8188382612691208,
"learning_rate": 0.00012195121951219512,
"loss": 1.0075,
"step": 10
},
{
"epoch": 0.018315018315018316,
"grad_norm": 0.3132433883914639,
"learning_rate": 0.00018292682926829268,
"loss": 0.9023,
"step": 15
},
{
"epoch": 0.02442002442002442,
"grad_norm": 0.7034747119264958,
"learning_rate": 0.00024390243902439024,
"loss": 0.8799,
"step": 20
},
{
"epoch": 0.030525030525030524,
"grad_norm": 0.23298545709273827,
"learning_rate": 0.0003048780487804878,
"loss": 0.8544,
"step": 25
},
{
"epoch": 0.03663003663003663,
"grad_norm": 0.19778335914070672,
"learning_rate": 0.00036585365853658537,
"loss": 0.8282,
"step": 30
},
{
"epoch": 0.042735042735042736,
"grad_norm": 0.19545935471697304,
"learning_rate": 0.0004268292682926829,
"loss": 0.8225,
"step": 35
},
{
"epoch": 0.04884004884004884,
"grad_norm": 0.5607940032058928,
"learning_rate": 0.0004878048780487805,
"loss": 0.8194,
"step": 40
},
{
"epoch": 0.054945054945054944,
"grad_norm": 0.1722073432319662,
"learning_rate": 0.0005487804878048781,
"loss": 0.8075,
"step": 45
},
{
"epoch": 0.06105006105006105,
"grad_norm": 0.24733599032712683,
"learning_rate": 0.0006097560975609756,
"loss": 0.8103,
"step": 50
},
{
"epoch": 0.06715506715506715,
"grad_norm": 0.2752427480573236,
"learning_rate": 0.0006707317073170732,
"loss": 0.8077,
"step": 55
},
{
"epoch": 0.07326007326007326,
"grad_norm": 0.17362880515878137,
"learning_rate": 0.0007317073170731707,
"loss": 0.8101,
"step": 60
},
{
"epoch": 0.07936507936507936,
"grad_norm": 0.19431322941155885,
"learning_rate": 0.0007926829268292683,
"loss": 0.8125,
"step": 65
},
{
"epoch": 0.08547008547008547,
"grad_norm": 0.2701548935901204,
"learning_rate": 0.0008536585365853659,
"loss": 0.8093,
"step": 70
},
{
"epoch": 0.09157509157509157,
"grad_norm": 0.16715603547837118,
"learning_rate": 0.0009146341463414635,
"loss": 0.8168,
"step": 75
},
{
"epoch": 0.09768009768009768,
"grad_norm": 0.319658532139757,
"learning_rate": 0.000975609756097561,
"loss": 0.8444,
"step": 80
},
{
"epoch": 0.10378510378510379,
"grad_norm": 0.3158267896656623,
"learning_rate": 0.000999959117130623,
"loss": 0.8357,
"step": 85
},
{
"epoch": 0.10989010989010989,
"grad_norm": 0.1920760211810322,
"learning_rate": 0.000999709301584265,
"loss": 0.826,
"step": 90
},
{
"epoch": 0.115995115995116,
"grad_norm": 0.21082439943619266,
"learning_rate": 0.0009992324965361792,
"loss": 0.8268,
"step": 95
},
{
"epoch": 0.1221001221001221,
"grad_norm": 0.2261943689235946,
"learning_rate": 0.0009985289185717684,
"loss": 0.8231,
"step": 100
},
{
"epoch": 0.1282051282051282,
"grad_norm": 0.15827441273364565,
"learning_rate": 0.000997598887286467,
"loss": 0.8246,
"step": 105
},
{
"epoch": 0.1343101343101343,
"grad_norm": 0.17232376165069538,
"learning_rate": 0.000996442825140569,
"loss": 0.8176,
"step": 110
},
{
"epoch": 0.14041514041514042,
"grad_norm": 0.21743975582559072,
"learning_rate": 0.0009950612572673255,
"loss": 0.8237,
"step": 115
},
{
"epoch": 0.14652014652014653,
"grad_norm": 0.4311991632319824,
"learning_rate": 0.0009934548112344088,
"loss": 0.8518,
"step": 120
},
{
"epoch": 0.15262515262515264,
"grad_norm": 0.6756959037582919,
"learning_rate": 0.0009916242167588433,
"loss": 0.8606,
"step": 125
},
{
"epoch": 0.15873015873015872,
"grad_norm": 0.25239211955256813,
"learning_rate": 0.0009895703053755364,
"loss": 0.8377,
"step": 130
},
{
"epoch": 0.16483516483516483,
"grad_norm": 0.18668998061461328,
"learning_rate": 0.0009872940100595598,
"loss": 0.8286,
"step": 135
},
{
"epoch": 0.17094017094017094,
"grad_norm": 0.21053747787681398,
"learning_rate": 0.0009847963648023522,
"loss": 0.8226,
"step": 140
},
{
"epoch": 0.17704517704517705,
"grad_norm": 0.1517465419574654,
"learning_rate": 0.000982078504142035,
"loss": 0.8106,
"step": 145
},
{
"epoch": 0.18315018315018314,
"grad_norm": 0.35173293057083965,
"learning_rate": 0.000979141662648057,
"loss": 0.8144,
"step": 150
},
{
"epoch": 0.18925518925518925,
"grad_norm": 0.1752442164371026,
"learning_rate": 0.0009759871743604004,
"loss": 0.8125,
"step": 155
},
{
"epoch": 0.19536019536019536,
"grad_norm": 0.18801248773726562,
"learning_rate": 0.0009726164721835996,
"loss": 0.8066,
"step": 160
},
{
"epoch": 0.20146520146520147,
"grad_norm": 0.1692284228731214,
"learning_rate": 0.0009690310872358572,
"loss": 0.8168,
"step": 165
},
{
"epoch": 0.20757020757020758,
"grad_norm": 0.14813568110706887,
"learning_rate": 0.0009652326481535434,
"loss": 0.8077,
"step": 170
},
{
"epoch": 0.21367521367521367,
"grad_norm": 0.1607678270523033,
"learning_rate": 0.0009612228803513976,
"loss": 0.8073,
"step": 175
},
{
"epoch": 0.21978021978021978,
"grad_norm": 0.17512791958206225,
"learning_rate": 0.0009570036052387725,
"loss": 0.8034,
"step": 180
},
{
"epoch": 0.2258852258852259,
"grad_norm": 0.13910989295755694,
"learning_rate": 0.0009525767393922706,
"loss": 0.7942,
"step": 185
},
{
"epoch": 0.231990231990232,
"grad_norm": 0.19359539972508316,
"learning_rate": 0.0009479442936851526,
"loss": 0.796,
"step": 190
},
{
"epoch": 0.23809523809523808,
"grad_norm": 0.17836389937285715,
"learning_rate": 0.0009431083723739124,
"loss": 0.7868,
"step": 195
},
{
"epoch": 0.2442002442002442,
"grad_norm": 0.265659142625784,
"learning_rate": 0.0009380711721424326,
"loss": 0.7906,
"step": 200
},
{
"epoch": 0.2503052503052503,
"grad_norm": 0.1678961190593195,
"learning_rate": 0.0009328349811041565,
"loss": 0.7918,
"step": 205
},
{
"epoch": 0.2564102564102564,
"grad_norm": 0.14919322708050076,
"learning_rate": 0.0009274021777627277,
"loss": 0.797,
"step": 210
},
{
"epoch": 0.2625152625152625,
"grad_norm": 0.1310511078441202,
"learning_rate": 0.0009217752299315725,
"loss": 0.7793,
"step": 215
},
{
"epoch": 0.2686202686202686,
"grad_norm": 0.14167730442710622,
"learning_rate": 0.0009159566936129111,
"loss": 0.7981,
"step": 220
},
{
"epoch": 0.27472527472527475,
"grad_norm": 0.1274513554504715,
"learning_rate": 0.0009099492118367123,
"loss": 0.7921,
"step": 225
},
{
"epoch": 0.28083028083028083,
"grad_norm": 0.13255405073600673,
"learning_rate": 0.0009037555134601149,
"loss": 0.7788,
"step": 230
},
{
"epoch": 0.2869352869352869,
"grad_norm": 0.11938377579036098,
"learning_rate": 0.000897378411927864,
"loss": 0.7841,
"step": 235
},
{
"epoch": 0.29304029304029305,
"grad_norm": 0.1435917602219082,
"learning_rate": 0.0008908208039943213,
"loss": 0.7748,
"step": 240
},
{
"epoch": 0.29914529914529914,
"grad_norm": 0.16003339862967003,
"learning_rate": 0.0008840856684076366,
"loss": 0.7673,
"step": 245
},
{
"epoch": 0.3052503052503053,
"grad_norm": 0.15924908240550437,
"learning_rate": 0.0008771760645566706,
"loss": 0.7759,
"step": 250
},
{
"epoch": 0.31135531135531136,
"grad_norm": 0.5483787394777416,
"learning_rate": 0.000870095131081289,
"loss": 0.7801,
"step": 255
},
{
"epoch": 0.31746031746031744,
"grad_norm": 0.15293992644666654,
"learning_rate": 0.0008628460844466573,
"loss": 0.77,
"step": 260
},
{
"epoch": 0.3235653235653236,
"grad_norm": 1.0110508213385356,
"learning_rate": 0.0008554322174821833,
"loss": 0.7761,
"step": 265
},
{
"epoch": 0.32967032967032966,
"grad_norm": 0.1515180292435856,
"learning_rate": 0.0008478568978857722,
"loss": 0.7775,
"step": 270
},
{
"epoch": 0.33577533577533575,
"grad_norm": 0.12505503848472327,
"learning_rate": 0.0008401235666940728,
"loss": 0.7767,
"step": 275
},
{
"epoch": 0.3418803418803419,
"grad_norm": 0.12893898857610978,
"learning_rate": 0.0008322357367194109,
"loss": 0.7687,
"step": 280
},
{
"epoch": 0.34798534798534797,
"grad_norm": 0.1405546608422591,
"learning_rate": 0.0008241969909541184,
"loss": 0.7642,
"step": 285
},
{
"epoch": 0.3540903540903541,
"grad_norm": 0.21931778442943825,
"learning_rate": 0.0008160109809429835,
"loss": 0.7718,
"step": 290
},
{
"epoch": 0.3601953601953602,
"grad_norm": 0.30317755956190323,
"learning_rate": 0.0008076814251245613,
"loss": 0.7667,
"step": 295
},
{
"epoch": 0.3663003663003663,
"grad_norm": 0.13657129292159587,
"learning_rate": 0.0007992121071421001,
"loss": 0.7767,
"step": 300
},
{
"epoch": 0.3724053724053724,
"grad_norm": 0.1468628940058117,
"learning_rate": 0.0007906068741248461,
"loss": 0.7629,
"step": 305
},
{
"epoch": 0.3785103785103785,
"grad_norm": 0.12694351711402102,
"learning_rate": 0.0007818696349405123,
"loss": 0.7671,
"step": 310
},
{
"epoch": 0.38461538461538464,
"grad_norm": 0.13102709342976568,
"learning_rate": 0.0007730043584197021,
"loss": 0.7615,
"step": 315
},
{
"epoch": 0.3907203907203907,
"grad_norm": 0.1245294881062342,
"learning_rate": 0.0007640150715530953,
"loss": 0.7518,
"step": 320
},
{
"epoch": 0.3968253968253968,
"grad_norm": 0.11835672269374947,
"learning_rate": 0.0007549058576622157,
"loss": 0.7724,
"step": 325
},
{
"epoch": 0.40293040293040294,
"grad_norm": 0.14015089006294798,
"learning_rate": 0.0007456808545446102,
"loss": 0.7561,
"step": 330
},
{
"epoch": 0.409035409035409,
"grad_norm": 0.1492561229489302,
"learning_rate": 0.0007363442525942826,
"loss": 0.7532,
"step": 335
},
{
"epoch": 0.41514041514041516,
"grad_norm": 0.13334234292600997,
"learning_rate": 0.0007269002928982366,
"loss": 0.7535,
"step": 340
},
{
"epoch": 0.42124542124542125,
"grad_norm": 0.13516173225053815,
"learning_rate": 0.0007173532653099911,
"loss": 0.7475,
"step": 345
},
{
"epoch": 0.42735042735042733,
"grad_norm": 0.1511324488816498,
"learning_rate": 0.0007077075065009433,
"loss": 0.7635,
"step": 350
},
{
"epoch": 0.43345543345543347,
"grad_norm": 0.13880830617201192,
"learning_rate": 0.0006979673979904665,
"loss": 0.7543,
"step": 355
},
{
"epoch": 0.43956043956043955,
"grad_norm": 0.13003038195812766,
"learning_rate": 0.0006881373641556346,
"loss": 0.7404,
"step": 360
},
{
"epoch": 0.4456654456654457,
"grad_norm": 0.11533542815769274,
"learning_rate": 0.0006782218702214797,
"loss": 0.7431,
"step": 365
},
{
"epoch": 0.4517704517704518,
"grad_norm": 0.10828760782447848,
"learning_rate": 0.000668225420232694,
"loss": 0.7386,
"step": 370
},
{
"epoch": 0.45787545787545786,
"grad_norm": 0.11844972793162856,
"learning_rate": 0.0006581525550076989,
"loss": 0.741,
"step": 375
},
{
"epoch": 0.463980463980464,
"grad_norm": 0.13335911956871846,
"learning_rate": 0.0006480078500760096,
"loss": 0.7466,
"step": 380
},
{
"epoch": 0.4700854700854701,
"grad_norm": 0.12386816519541291,
"learning_rate": 0.0006377959135998322,
"loss": 0.7401,
"step": 385
},
{
"epoch": 0.47619047619047616,
"grad_norm": 0.12984729252751007,
"learning_rate": 0.0006275213842808383,
"loss": 0.7321,
"step": 390
},
{
"epoch": 0.4822954822954823,
"grad_norm": 0.36584117580730136,
"learning_rate": 0.0006171889292530655,
"loss": 0.734,
"step": 395
},
{
"epoch": 0.4884004884004884,
"grad_norm": 0.1189163212437526,
"learning_rate": 0.0006068032419629059,
"loss": 0.7412,
"step": 400
},
{
"epoch": 0.4945054945054945,
"grad_norm": 0.10221301095027906,
"learning_rate": 0.0005963690400371386,
"loss": 0.7325,
"step": 405
},
{
"epoch": 0.5006105006105006,
"grad_norm": 0.1280470363734395,
"learning_rate": 0.0005858910631399817,
"loss": 0.7342,
"step": 410
},
{
"epoch": 0.5067155067155067,
"grad_norm": 0.12964643726522185,
"learning_rate": 0.0005753740708201315,
"loss": 0.7277,
"step": 415
},
{
"epoch": 0.5128205128205128,
"grad_norm": 0.12869511222529517,
"learning_rate": 0.0005648228403487712,
"loss": 0.7298,
"step": 420
},
{
"epoch": 0.518925518925519,
"grad_norm": 0.13085330751064783,
"learning_rate": 0.0005542421645495279,
"loss": 0.731,
"step": 425
},
{
"epoch": 0.525030525030525,
"grad_norm": 0.1187750571018603,
"learning_rate": 0.0005436368496213656,
"loss": 0.7163,
"step": 430
},
{
"epoch": 0.5311355311355311,
"grad_norm": 0.1052076485298473,
"learning_rate": 0.0005330117129554028,
"loss": 0.7225,
"step": 435
},
{
"epoch": 0.5372405372405372,
"grad_norm": 0.10433507372996674,
"learning_rate": 0.0005223715809466454,
"loss": 0.7229,
"step": 440
},
{
"epoch": 0.5433455433455433,
"grad_norm": 0.11656415183843066,
"learning_rate": 0.0005117212868016303,
"loss": 0.7192,
"step": 445
},
{
"epoch": 0.5494505494505495,
"grad_norm": 0.10652338347188516,
"learning_rate": 0.0005010656683429746,
"loss": 0.7216,
"step": 450
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.1005738909094789,
"learning_rate": 0.0004904095658118283,
"loss": 0.7099,
"step": 455
},
{
"epoch": 0.5616605616605617,
"grad_norm": 0.11153866886950109,
"learning_rate": 0.0004797578196692281,
"loss": 0.7187,
"step": 460
},
{
"epoch": 0.5677655677655677,
"grad_norm": 0.1144504129034415,
"learning_rate": 0.00046911526839735093,
"loss": 0.7221,
"step": 465
},
{
"epoch": 0.5738705738705738,
"grad_norm": 0.11821028818177344,
"learning_rate": 0.0004584867463016671,
"loss": 0.7147,
"step": 470
},
{
"epoch": 0.57997557997558,
"grad_norm": 0.10863161336720586,
"learning_rate": 0.00044787708131499104,
"loss": 0.7039,
"step": 475
},
{
"epoch": 0.5860805860805861,
"grad_norm": 0.1283853192980776,
"learning_rate": 0.0004372910928044249,
"loss": 0.7082,
"step": 480
},
{
"epoch": 0.5921855921855922,
"grad_norm": 0.10840234459597066,
"learning_rate": 0.00042673358938219544,
"loss": 0.7066,
"step": 485
},
{
"epoch": 0.5982905982905983,
"grad_norm": 0.1039543838216163,
"learning_rate": 0.00041620936672137393,
"loss": 0.7059,
"step": 490
},
{
"epoch": 0.6043956043956044,
"grad_norm": 0.11740716367044565,
"learning_rate": 0.00040572320537747656,
"loss": 0.7037,
"step": 495
},
{
"epoch": 0.6105006105006106,
"grad_norm": 0.10875226172213023,
"learning_rate": 0.0003952798686169279,
"loss": 0.7172,
"step": 500
},
{
"epoch": 0.6166056166056166,
"grad_norm": 0.10778168900235421,
"learning_rate": 0.00038488410025338133,
"loss": 0.7066,
"step": 505
},
{
"epoch": 0.6227106227106227,
"grad_norm": 0.13912280488293668,
"learning_rate": 0.00037454062249287477,
"loss": 0.7042,
"step": 510
},
{
"epoch": 0.6288156288156288,
"grad_norm": 0.09955262254043926,
"learning_rate": 0.0003642541337887999,
"loss": 0.7058,
"step": 515
},
{
"epoch": 0.6349206349206349,
"grad_norm": 0.1062306858321646,
"learning_rate": 0.00035402930670766296,
"loss": 0.6914,
"step": 520
},
{
"epoch": 0.6410256410256411,
"grad_norm": 0.12999926182410393,
"learning_rate": 0.00034387078580660346,
"loss": 0.6945,
"step": 525
},
{
"epoch": 0.6471306471306472,
"grad_norm": 0.09855411173072333,
"learning_rate": 0.00033378318552363664,
"loss": 0.694,
"step": 530
},
{
"epoch": 0.6532356532356532,
"grad_norm": 0.11806337807522205,
"learning_rate": 0.0003237710880815756,
"loss": 0.6999,
"step": 535
},
{
"epoch": 0.6593406593406593,
"grad_norm": 0.10757598864289536,
"learning_rate": 0.00031383904140658986,
"loss": 0.6875,
"step": 540
},
{
"epoch": 0.6654456654456654,
"grad_norm": 0.1072081233564741,
"learning_rate": 0.0003039915570623396,
"loss": 0.695,
"step": 545
},
{
"epoch": 0.6715506715506715,
"grad_norm": 0.10979416858173498,
"learning_rate": 0.0002942331082006308,
"loss": 0.6868,
"step": 550
},
{
"epoch": 0.6776556776556777,
"grad_norm": 0.8819457273201341,
"learning_rate": 0.00028456812752951485,
"loss": 0.7007,
"step": 555
},
{
"epoch": 0.6837606837606838,
"grad_norm": 0.11243672341550275,
"learning_rate": 0.0002750010052997635,
"loss": 0.6865,
"step": 560
},
{
"epoch": 0.6898656898656899,
"grad_norm": 0.10293192092961795,
"learning_rate": 0.00026553608731062604,
"loss": 0.6895,
"step": 565
},
{
"epoch": 0.6959706959706959,
"grad_norm": 0.1182585426603602,
"learning_rate": 0.00025617767293578176,
"loss": 0.6803,
"step": 570
},
{
"epoch": 0.702075702075702,
"grad_norm": 0.10806065271563285,
"learning_rate": 0.0002469300131703773,
"loss": 0.6862,
"step": 575
},
{
"epoch": 0.7081807081807082,
"grad_norm": 0.12112895844817373,
"learning_rate": 0.00023779730870004235,
"loss": 0.6756,
"step": 580
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.10280660717512566,
"learning_rate": 0.00022878370799275777,
"loss": 0.6864,
"step": 585
},
{
"epoch": 0.7203907203907204,
"grad_norm": 0.09742652551259846,
"learning_rate": 0.0002198933054144414,
"loss": 0.6697,
"step": 590
},
{
"epoch": 0.7264957264957265,
"grad_norm": 0.10276863533595788,
"learning_rate": 0.00021113013936911113,
"loss": 0.6773,
"step": 595
},
{
"epoch": 0.7326007326007326,
"grad_norm": 0.09405375108971262,
"learning_rate": 0.00020249819046446837,
"loss": 0.6744,
"step": 600
},
{
"epoch": 0.7387057387057387,
"grad_norm": 0.09943799218174577,
"learning_rate": 0.00019400137970373356,
"loss": 0.6798,
"step": 605
},
{
"epoch": 0.7448107448107448,
"grad_norm": 0.09345899241802126,
"learning_rate": 0.00018564356670455767,
"loss": 0.6796,
"step": 610
},
{
"epoch": 0.7509157509157509,
"grad_norm": 0.09653784373074349,
"learning_rate": 0.00017742854794581785,
"loss": 0.6824,
"step": 615
},
{
"epoch": 0.757020757020757,
"grad_norm": 0.09280543861775854,
"learning_rate": 0.00016936005504309342,
"loss": 0.6779,
"step": 620
},
{
"epoch": 0.7631257631257631,
"grad_norm": 0.09345715836472011,
"learning_rate": 0.0001614417530536042,
"loss": 0.6759,
"step": 625
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.09678676699978526,
"learning_rate": 0.00015367723881138434,
"loss": 0.6783,
"step": 630
},
{
"epoch": 0.7753357753357754,
"grad_norm": 0.10103798904615227,
"learning_rate": 0.00014607003929344492,
"loss": 0.6737,
"step": 635
},
{
"epoch": 0.7814407814407814,
"grad_norm": 0.10782510789122983,
"learning_rate": 0.00013862361001766972,
"loss": 0.6663,
"step": 640
},
{
"epoch": 0.7875457875457875,
"grad_norm": 0.09404405358524387,
"learning_rate": 0.00013134133347316885,
"loss": 0.6623,
"step": 645
},
{
"epoch": 0.7936507936507936,
"grad_norm": 0.09304236582140996,
"learning_rate": 0.0001242265175838072,
"loss": 0.6629,
"step": 650
},
{
"epoch": 0.7997557997557998,
"grad_norm": 0.10119415327352758,
"learning_rate": 0.00011728239420560316,
"loss": 0.6674,
"step": 655
},
{
"epoch": 0.8058608058608059,
"grad_norm": 0.09622079862749633,
"learning_rate": 0.0001105121176586793,
"loss": 0.6662,
"step": 660
},
{
"epoch": 0.811965811965812,
"grad_norm": 0.10162594693939093,
"learning_rate": 0.00010391876329443534,
"loss": 0.661,
"step": 665
},
{
"epoch": 0.818070818070818,
"grad_norm": 0.10210286806288393,
"learning_rate": 9.750532609858991e-05,
"loss": 0.6657,
"step": 670
},
{
"epoch": 0.8241758241758241,
"grad_norm": 0.09880466873137764,
"learning_rate": 9.127471933073007e-05,
"loss": 0.6672,
"step": 675
},
{
"epoch": 0.8302808302808303,
"grad_norm": 0.10918550954379612,
"learning_rate": 8.522977320098224e-05,
"loss": 0.6589,
"step": 680
},
{
"epoch": 0.8363858363858364,
"grad_norm": 0.08978860640915999,
"learning_rate": 7.937323358440934e-05,
"loss": 0.6623,
"step": 685
},
{
"epoch": 0.8424908424908425,
"grad_norm": 0.10525052691451267,
"learning_rate": 7.370776077371622e-05,
"loss": 0.656,
"step": 690
},
{
"epoch": 0.8485958485958486,
"grad_norm": 0.08820292667299327,
"learning_rate": 6.82359282708292e-05,
"loss": 0.6634,
"step": 695
},
{
"epoch": 0.8547008547008547,
"grad_norm": 0.10304057992300253,
"learning_rate": 6.296022161790149e-05,
"loss": 0.6619,
"step": 700
},
{
"epoch": 0.8608058608058609,
"grad_norm": 0.09268680805627066,
"learning_rate": 5.78830372682721e-05,
"loss": 0.6606,
"step": 705
},
{
"epoch": 0.8669108669108669,
"grad_norm": 0.11269852110384801,
"learning_rate": 5.300668149789417e-05,
"loss": 0.66,
"step": 710
},
{
"epoch": 0.873015873015873,
"grad_norm": 0.09810783125187136,
"learning_rate": 4.833336935772442e-05,
"loss": 0.6561,
"step": 715
},
{
"epoch": 0.8791208791208791,
"grad_norm": 0.09466046226072769,
"learning_rate": 4.386522366755169e-05,
"loss": 0.6612,
"step": 720
},
{
"epoch": 0.8852258852258852,
"grad_norm": 0.10630959016804076,
"learning_rate": 3.960427405172079e-05,
"loss": 0.659,
"step": 725
},
{
"epoch": 0.8913308913308914,
"grad_norm": 0.10763420167495608,
"learning_rate": 3.5552456017189926e-05,
"loss": 0.6603,
"step": 730
},
{
"epoch": 0.8974358974358975,
"grad_norm": 0.0939541631473488,
"learning_rate": 3.171161007433937e-05,
"loss": 0.6565,
"step": 735
},
{
"epoch": 0.9035409035409036,
"grad_norm": 0.09013917837701788,
"learning_rate": 2.808348090093277e-05,
"loss": 0.6609,
"step": 740
},
{
"epoch": 0.9096459096459096,
"grad_norm": 0.0940662793486537,
"learning_rate": 2.466971654960931e-05,
"loss": 0.6597,
"step": 745
},
{
"epoch": 0.9157509157509157,
"grad_norm": 0.09483824579567894,
"learning_rate": 2.147186769926712e-05,
"loss": 0.6539,
"step": 750
},
{
"epoch": 0.9218559218559218,
"grad_norm": 0.08386874161410003,
"learning_rate": 1.8491386950677812e-05,
"loss": 0.6548,
"step": 755
},
{
"epoch": 0.927960927960928,
"grad_norm": 0.09329690783399783,
"learning_rate": 1.572962816665302e-05,
"loss": 0.6578,
"step": 760
},
{
"epoch": 0.9340659340659341,
"grad_norm": 0.09440009819249363,
"learning_rate": 1.3187845857061508e-05,
"loss": 0.6486,
"step": 765
},
{
"epoch": 0.9401709401709402,
"grad_norm": 0.0993077201527517,
"learning_rate": 1.0867194608976228e-05,
"loss": 0.6666,
"step": 770
},
{
"epoch": 0.9462759462759462,
"grad_norm": 0.08833153825443844,
"learning_rate": 8.768728562211947e-06,
"loss": 0.6576,
"step": 775
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.09203712131267434,
"learning_rate": 6.893400930488569e-06,
"loss": 0.6585,
"step": 780
},
{
"epoch": 0.9584859584859585,
"grad_norm": 0.0951382321616532,
"learning_rate": 5.242063568441313e-06,
"loss": 0.6576,
"step": 785
},
{
"epoch": 0.9645909645909646,
"grad_norm": 0.08565887455728097,
"learning_rate": 3.815466584670746e-06,
"loss": 0.6503,
"step": 790
},
{
"epoch": 0.9706959706959707,
"grad_norm": 0.09652420113897765,
"learning_rate": 2.6142580010117823e-06,
"loss": 0.656,
"step": 795
},
{
"epoch": 0.9768009768009768,
"grad_norm": 0.08859484215258381,
"learning_rate": 1.6389834581739814e-06,
"loss": 0.6564,
"step": 800
},
{
"epoch": 0.9829059829059829,
"grad_norm": 0.08947783751139311,
"learning_rate": 8.900859678879769e-07,
"loss": 0.6573,
"step": 805
},
{
"epoch": 0.989010989010989,
"grad_norm": 0.09210226203889316,
"learning_rate": 3.6790571167061305e-07,
"loss": 0.6579,
"step": 810
},
{
"epoch": 0.9951159951159951,
"grad_norm": 0.09419192800150224,
"learning_rate": 7.26798862996092e-08,
"loss": 0.6504,
"step": 815
},
{
"epoch": 1.0,
"eval_loss": 1.0153491497039795,
"eval_runtime": 101.6617,
"eval_samples_per_second": 206.489,
"eval_steps_per_second": 6.453,
"step": 819
},
{
"epoch": 1.0,
"step": 819,
"total_flos": 129231539404800.0,
"train_loss": 0.7394485170879062,
"train_runtime": 1679.86,
"train_samples_per_second": 62.397,
"train_steps_per_second": 0.488
}
],
"logging_steps": 5,
"max_steps": 819,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 129231539404800.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}