Phi-3-mini-4k-pubmed / trainer_state.json
couturierc's picture
Upload 7 files
6c48bf9 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0001,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1e-07,
"grad_norm": 0.6030768752098083,
"learning_rate": 0.0001,
"loss": 1.8578,
"step": 1
},
{
"epoch": 2e-07,
"grad_norm": 0.6584610939025879,
"learning_rate": 0.0002,
"loss": 1.6344,
"step": 2
},
{
"epoch": 3e-07,
"grad_norm": 0.6526930928230286,
"learning_rate": 0.000199999979999996,
"loss": 1.6128,
"step": 3
},
{
"epoch": 4e-07,
"grad_norm": 0.4974237084388733,
"learning_rate": 0.000199999959999992,
"loss": 1.575,
"step": 4
},
{
"epoch": 5e-07,
"grad_norm": 0.5527949333190918,
"learning_rate": 0.000199999939999988,
"loss": 1.5657,
"step": 5
},
{
"epoch": 6e-07,
"grad_norm": 0.49546897411346436,
"learning_rate": 0.000199999919999984,
"loss": 1.544,
"step": 6
},
{
"epoch": 7e-07,
"grad_norm": 0.5431241989135742,
"learning_rate": 0.00019999989999998,
"loss": 1.6721,
"step": 7
},
{
"epoch": 8e-07,
"grad_norm": 0.5824649930000305,
"learning_rate": 0.000199999879999976,
"loss": 1.8802,
"step": 8
},
{
"epoch": 9e-07,
"grad_norm": 0.5251922607421875,
"learning_rate": 0.00019999985999997203,
"loss": 1.4523,
"step": 9
},
{
"epoch": 1e-06,
"grad_norm": 0.8404814600944519,
"learning_rate": 0.00019999983999996801,
"loss": 1.9409,
"step": 10
},
{
"epoch": 1.1e-06,
"grad_norm": 0.8073492646217346,
"learning_rate": 0.000199999819999964,
"loss": 1.6832,
"step": 11
},
{
"epoch": 1.2e-06,
"grad_norm": 0.7752310633659363,
"learning_rate": 0.00019999979999996002,
"loss": 1.7134,
"step": 12
},
{
"epoch": 1.3e-06,
"grad_norm": 0.6018164157867432,
"learning_rate": 0.000199999779999956,
"loss": 1.4571,
"step": 13
},
{
"epoch": 1.4e-06,
"grad_norm": 2.5733439922332764,
"learning_rate": 0.00019999975999995202,
"loss": 1.8977,
"step": 14
},
{
"epoch": 1.5e-06,
"grad_norm": 1.044441819190979,
"learning_rate": 0.000199999739999948,
"loss": 1.9061,
"step": 15
},
{
"epoch": 1.6e-06,
"grad_norm": 1.0037944316864014,
"learning_rate": 0.000199999719999944,
"loss": 2.0744,
"step": 16
},
{
"epoch": 1.7e-06,
"grad_norm": 0.6178597211837769,
"learning_rate": 0.00019999969999994,
"loss": 1.6495,
"step": 17
},
{
"epoch": 1.8e-06,
"grad_norm": 0.9781101942062378,
"learning_rate": 0.00019999967999993602,
"loss": 2.1303,
"step": 18
},
{
"epoch": 1.9e-06,
"grad_norm": 1.0162076950073242,
"learning_rate": 0.000199999659999932,
"loss": 1.8215,
"step": 19
},
{
"epoch": 2e-06,
"grad_norm": 0.7903466820716858,
"learning_rate": 0.00019999963999992802,
"loss": 1.6967,
"step": 20
},
{
"epoch": 2.1e-06,
"grad_norm": 0.6429514288902283,
"learning_rate": 0.000199999619999924,
"loss": 1.6124,
"step": 21
},
{
"epoch": 2.2e-06,
"grad_norm": 0.7452751398086548,
"learning_rate": 0.00019999959999992002,
"loss": 1.6571,
"step": 22
},
{
"epoch": 2.3e-06,
"grad_norm": 0.7010871767997742,
"learning_rate": 0.000199999579999916,
"loss": 1.6899,
"step": 23
},
{
"epoch": 2.4e-06,
"grad_norm": 0.9412943720817566,
"learning_rate": 0.000199999559999912,
"loss": 1.7723,
"step": 24
},
{
"epoch": 2.5e-06,
"grad_norm": 0.9647365808486938,
"learning_rate": 0.000199999539999908,
"loss": 1.6435,
"step": 25
},
{
"epoch": 2.6e-06,
"grad_norm": 0.4499894976615906,
"learning_rate": 0.000199999519999904,
"loss": 1.5171,
"step": 26
},
{
"epoch": 2.7e-06,
"grad_norm": 0.7129512429237366,
"learning_rate": 0.0001999994999999,
"loss": 1.8055,
"step": 27
},
{
"epoch": 2.8e-06,
"grad_norm": 0.7464190125465393,
"learning_rate": 0.000199999479999896,
"loss": 1.7082,
"step": 28
},
{
"epoch": 2.9e-06,
"grad_norm": 0.8202065825462341,
"learning_rate": 0.000199999459999892,
"loss": 1.6817,
"step": 29
},
{
"epoch": 3e-06,
"grad_norm": 0.8208650350570679,
"learning_rate": 0.00019999943999988803,
"loss": 2.1718,
"step": 30
},
{
"epoch": 3.1e-06,
"grad_norm": 0.6060669422149658,
"learning_rate": 0.00019999941999988401,
"loss": 1.5962,
"step": 31
},
{
"epoch": 3.2e-06,
"grad_norm": 1.2536108493804932,
"learning_rate": 0.00019999939999988,
"loss": 1.6303,
"step": 32
},
{
"epoch": 3.3e-06,
"grad_norm": 0.7007343173027039,
"learning_rate": 0.00019999937999987601,
"loss": 1.7731,
"step": 33
},
{
"epoch": 3.4e-06,
"grad_norm": 0.9848162531852722,
"learning_rate": 0.000199999359999872,
"loss": 2.3275,
"step": 34
},
{
"epoch": 3.5e-06,
"grad_norm": 0.8210628032684326,
"learning_rate": 0.00019999933999986802,
"loss": 1.6669,
"step": 35
},
{
"epoch": 3.6e-06,
"grad_norm": 0.7030662298202515,
"learning_rate": 0.000199999319999864,
"loss": 1.8048,
"step": 36
},
{
"epoch": 3.7e-06,
"grad_norm": 0.7612553238868713,
"learning_rate": 0.00019999929999986,
"loss": 1.7174,
"step": 37
},
{
"epoch": 3.8e-06,
"grad_norm": 0.6783046722412109,
"learning_rate": 0.000199999279999856,
"loss": 1.9285,
"step": 38
},
{
"epoch": 3.9e-06,
"grad_norm": 0.5745652914047241,
"learning_rate": 0.00019999925999985202,
"loss": 1.6992,
"step": 39
},
{
"epoch": 4e-06,
"grad_norm": 0.6361237168312073,
"learning_rate": 0.00019999923999984803,
"loss": 1.9848,
"step": 40
},
{
"epoch": 4.1e-06,
"grad_norm": 0.5350402593612671,
"learning_rate": 0.00019999921999984402,
"loss": 1.3948,
"step": 41
},
{
"epoch": 4.2e-06,
"grad_norm": 0.6011056303977966,
"learning_rate": 0.00019999919999984,
"loss": 1.7458,
"step": 42
},
{
"epoch": 4.3e-06,
"grad_norm": 0.7886531949043274,
"learning_rate": 0.00019999917999983602,
"loss": 1.9866,
"step": 43
},
{
"epoch": 4.4e-06,
"grad_norm": 0.9603063464164734,
"learning_rate": 0.000199999159999832,
"loss": 1.8797,
"step": 44
},
{
"epoch": 4.5e-06,
"grad_norm": 0.5431351661682129,
"learning_rate": 0.000199999139999828,
"loss": 1.524,
"step": 45
},
{
"epoch": 4.6e-06,
"grad_norm": 0.6948148608207703,
"learning_rate": 0.000199999119999824,
"loss": 1.3813,
"step": 46
},
{
"epoch": 4.7e-06,
"grad_norm": 0.7161701917648315,
"learning_rate": 0.00019999909999982,
"loss": 1.5055,
"step": 47
},
{
"epoch": 4.8e-06,
"grad_norm": 0.5436611175537109,
"learning_rate": 0.000199999079999816,
"loss": 1.6389,
"step": 48
},
{
"epoch": 4.9e-06,
"grad_norm": 0.6155924201011658,
"learning_rate": 0.000199999059999812,
"loss": 1.5841,
"step": 49
},
{
"epoch": 5e-06,
"grad_norm": 0.6030799150466919,
"learning_rate": 0.000199999039999808,
"loss": 1.7588,
"step": 50
},
{
"epoch": 5.1e-06,
"grad_norm": 0.5269575715065002,
"learning_rate": 0.00019999901999980403,
"loss": 1.7043,
"step": 51
},
{
"epoch": 5.2e-06,
"grad_norm": 0.4011719822883606,
"learning_rate": 0.0001999989999998,
"loss": 1.5657,
"step": 52
},
{
"epoch": 5.3e-06,
"grad_norm": 0.48203980922698975,
"learning_rate": 0.000199998979999796,
"loss": 1.4895,
"step": 53
},
{
"epoch": 5.4e-06,
"grad_norm": 0.37792739272117615,
"learning_rate": 0.00019999895999979201,
"loss": 1.4533,
"step": 54
},
{
"epoch": 5.5e-06,
"grad_norm": 0.4255346655845642,
"learning_rate": 0.000199998939999788,
"loss": 1.714,
"step": 55
},
{
"epoch": 5.6e-06,
"grad_norm": 0.47425952553749084,
"learning_rate": 0.00019999891999978402,
"loss": 1.746,
"step": 56
},
{
"epoch": 5.7e-06,
"grad_norm": 0.5287901759147644,
"learning_rate": 0.00019999889999978,
"loss": 1.8376,
"step": 57
},
{
"epoch": 5.8e-06,
"grad_norm": 0.3943078815937042,
"learning_rate": 0.000199998879999776,
"loss": 1.4741,
"step": 58
},
{
"epoch": 5.9e-06,
"grad_norm": 0.53409743309021,
"learning_rate": 0.000199998859999772,
"loss": 1.6481,
"step": 59
},
{
"epoch": 6e-06,
"grad_norm": 0.7237704992294312,
"learning_rate": 0.00019999883999976802,
"loss": 1.5401,
"step": 60
},
{
"epoch": 6.1e-06,
"grad_norm": 0.5388357043266296,
"learning_rate": 0.00019999881999976403,
"loss": 1.8668,
"step": 61
},
{
"epoch": 6.2e-06,
"grad_norm": 0.8964146375656128,
"learning_rate": 0.00019999879999976002,
"loss": 1.9183,
"step": 62
},
{
"epoch": 6.3e-06,
"grad_norm": 0.44148045778274536,
"learning_rate": 0.000199998779999756,
"loss": 1.6199,
"step": 63
},
{
"epoch": 6.4e-06,
"grad_norm": 0.4926050305366516,
"learning_rate": 0.00019999875999975202,
"loss": 1.8648,
"step": 64
},
{
"epoch": 6.5e-06,
"grad_norm": 0.45091211795806885,
"learning_rate": 0.000199998739999748,
"loss": 1.9774,
"step": 65
},
{
"epoch": 6.6e-06,
"grad_norm": 0.5831663608551025,
"learning_rate": 0.000199998719999744,
"loss": 1.8903,
"step": 66
},
{
"epoch": 6.7e-06,
"grad_norm": 0.561790406703949,
"learning_rate": 0.00019999869999974,
"loss": 1.8142,
"step": 67
},
{
"epoch": 6.8e-06,
"grad_norm": 0.5125352740287781,
"learning_rate": 0.000199998679999736,
"loss": 1.7682,
"step": 68
},
{
"epoch": 6.9e-06,
"grad_norm": 0.3399497866630554,
"learning_rate": 0.000199998659999732,
"loss": 1.733,
"step": 69
},
{
"epoch": 7e-06,
"grad_norm": 0.6615362763404846,
"learning_rate": 0.00019999863999972802,
"loss": 1.7978,
"step": 70
},
{
"epoch": 7.1e-06,
"grad_norm": 0.4135492146015167,
"learning_rate": 0.000199998619999724,
"loss": 1.3721,
"step": 71
},
{
"epoch": 7.2e-06,
"grad_norm": 0.4415687322616577,
"learning_rate": 0.00019999859999972002,
"loss": 1.9491,
"step": 72
},
{
"epoch": 7.3e-06,
"grad_norm": 0.6152645349502563,
"learning_rate": 0.000199998579999716,
"loss": 1.9222,
"step": 73
},
{
"epoch": 7.4e-06,
"grad_norm": 0.47358042001724243,
"learning_rate": 0.000199998559999712,
"loss": 1.9554,
"step": 74
},
{
"epoch": 7.5e-06,
"grad_norm": 0.5570238828659058,
"learning_rate": 0.000199998539999708,
"loss": 1.8312,
"step": 75
},
{
"epoch": 7.6e-06,
"grad_norm": 0.663379967212677,
"learning_rate": 0.000199998519999704,
"loss": 1.8923,
"step": 76
},
{
"epoch": 7.7e-06,
"grad_norm": 0.614033043384552,
"learning_rate": 0.00019999849999970001,
"loss": 2.2436,
"step": 77
},
{
"epoch": 7.8e-06,
"grad_norm": 0.6010253429412842,
"learning_rate": 0.000199998479999696,
"loss": 1.9391,
"step": 78
},
{
"epoch": 7.9e-06,
"grad_norm": 1.2682664394378662,
"learning_rate": 0.000199998459999692,
"loss": 2.1508,
"step": 79
},
{
"epoch": 8e-06,
"grad_norm": 0.6418437361717224,
"learning_rate": 0.000199998439999688,
"loss": 1.6548,
"step": 80
},
{
"epoch": 8.1e-06,
"grad_norm": 0.4999440610408783,
"learning_rate": 0.00019999841999968402,
"loss": 1.621,
"step": 81
},
{
"epoch": 8.2e-06,
"grad_norm": 0.7112942934036255,
"learning_rate": 0.00019999839999968003,
"loss": 1.8116,
"step": 82
},
{
"epoch": 8.3e-06,
"grad_norm": 0.6015899181365967,
"learning_rate": 0.00019999837999967602,
"loss": 1.8636,
"step": 83
},
{
"epoch": 8.4e-06,
"grad_norm": 0.5443506240844727,
"learning_rate": 0.000199998359999672,
"loss": 1.7018,
"step": 84
},
{
"epoch": 8.5e-06,
"grad_norm": 0.6934968829154968,
"learning_rate": 0.00019999833999966802,
"loss": 1.9206,
"step": 85
},
{
"epoch": 8.6e-06,
"grad_norm": 0.650553822517395,
"learning_rate": 0.000199998319999664,
"loss": 1.8815,
"step": 86
},
{
"epoch": 8.7e-06,
"grad_norm": 0.41978561878204346,
"learning_rate": 0.00019999829999966,
"loss": 1.7672,
"step": 87
},
{
"epoch": 8.8e-06,
"grad_norm": 0.4585091471672058,
"learning_rate": 0.000199998279999656,
"loss": 1.7288,
"step": 88
},
{
"epoch": 8.9e-06,
"grad_norm": 0.8017829656600952,
"learning_rate": 0.000199998259999652,
"loss": 1.903,
"step": 89
},
{
"epoch": 9e-06,
"grad_norm": 0.6802563071250916,
"learning_rate": 0.000199998239999648,
"loss": 1.8178,
"step": 90
},
{
"epoch": 9.1e-06,
"grad_norm": 0.392862468957901,
"learning_rate": 0.00019999821999964402,
"loss": 1.6134,
"step": 91
},
{
"epoch": 9.2e-06,
"grad_norm": 0.4774057865142822,
"learning_rate": 0.00019999819999964,
"loss": 1.8251,
"step": 92
},
{
"epoch": 9.3e-06,
"grad_norm": 0.6621251702308655,
"learning_rate": 0.00019999817999963602,
"loss": 1.868,
"step": 93
},
{
"epoch": 9.4e-06,
"grad_norm": 0.43166202306747437,
"learning_rate": 0.000199998159999632,
"loss": 1.8325,
"step": 94
},
{
"epoch": 9.5e-06,
"grad_norm": 0.4113704562187195,
"learning_rate": 0.00019999813999962803,
"loss": 1.6396,
"step": 95
},
{
"epoch": 9.6e-06,
"grad_norm": 0.6392448544502258,
"learning_rate": 0.000199998119999624,
"loss": 1.8363,
"step": 96
},
{
"epoch": 9.7e-06,
"grad_norm": 0.8083016276359558,
"learning_rate": 0.00019999809999962,
"loss": 2.1199,
"step": 97
},
{
"epoch": 9.8e-06,
"grad_norm": 0.5524376630783081,
"learning_rate": 0.000199998079999616,
"loss": 1.615,
"step": 98
},
{
"epoch": 9.9e-06,
"grad_norm": 0.7018817067146301,
"learning_rate": 0.000199998059999612,
"loss": 1.8829,
"step": 99
},
{
"epoch": 1e-05,
"grad_norm": 0.7241093516349792,
"learning_rate": 0.000199998039999608,
"loss": 1.8137,
"step": 100
},
{
"epoch": 1.01e-05,
"grad_norm": 0.6216440796852112,
"learning_rate": 0.000199998019999604,
"loss": 1.8201,
"step": 101
},
{
"epoch": 1.02e-05,
"grad_norm": 0.6082159876823425,
"learning_rate": 0.00019999799999960002,
"loss": 2.0963,
"step": 102
},
{
"epoch": 1.03e-05,
"grad_norm": 0.5349624156951904,
"learning_rate": 0.00019999797999959603,
"loss": 1.6248,
"step": 103
},
{
"epoch": 1.04e-05,
"grad_norm": 0.7082953453063965,
"learning_rate": 0.00019999795999959202,
"loss": 1.8749,
"step": 104
},
{
"epoch": 1.05e-05,
"grad_norm": 0.5373268127441406,
"learning_rate": 0.000199997939999588,
"loss": 2.0572,
"step": 105
},
{
"epoch": 1.06e-05,
"grad_norm": 0.7016363739967346,
"learning_rate": 0.00019999791999958402,
"loss": 1.8279,
"step": 106
},
{
"epoch": 1.07e-05,
"grad_norm": 0.6221460700035095,
"learning_rate": 0.00019999789999958,
"loss": 2.1174,
"step": 107
},
{
"epoch": 1.08e-05,
"grad_norm": 0.6501404047012329,
"learning_rate": 0.000199997879999576,
"loss": 1.9551,
"step": 108
},
{
"epoch": 1.09e-05,
"grad_norm": 0.9046558141708374,
"learning_rate": 0.000199997859999572,
"loss": 1.7935,
"step": 109
},
{
"epoch": 1.1e-05,
"grad_norm": 0.6367177963256836,
"learning_rate": 0.000199997839999568,
"loss": 1.8313,
"step": 110
},
{
"epoch": 1.11e-05,
"grad_norm": 0.5869525671005249,
"learning_rate": 0.000199997819999564,
"loss": 1.6034,
"step": 111
},
{
"epoch": 1.12e-05,
"grad_norm": 0.5856000781059265,
"learning_rate": 0.00019999779999956002,
"loss": 1.7663,
"step": 112
},
{
"epoch": 1.13e-05,
"grad_norm": 0.5486036539077759,
"learning_rate": 0.000199997779999556,
"loss": 1.7331,
"step": 113
},
{
"epoch": 1.14e-05,
"grad_norm": 0.5195130109786987,
"learning_rate": 0.00019999775999955202,
"loss": 1.5346,
"step": 114
},
{
"epoch": 1.15e-05,
"grad_norm": 0.8694611191749573,
"learning_rate": 0.000199997739999548,
"loss": 1.8483,
"step": 115
},
{
"epoch": 1.16e-05,
"grad_norm": 0.5632051825523376,
"learning_rate": 0.00019999771999954402,
"loss": 1.6481,
"step": 116
},
{
"epoch": 1.17e-05,
"grad_norm": 0.5927804708480835,
"learning_rate": 0.00019999769999954,
"loss": 1.8621,
"step": 117
},
{
"epoch": 1.18e-05,
"grad_norm": 0.5696079730987549,
"learning_rate": 0.000199997679999536,
"loss": 1.8153,
"step": 118
},
{
"epoch": 1.19e-05,
"grad_norm": 0.478731632232666,
"learning_rate": 0.000199997659999532,
"loss": 1.6562,
"step": 119
},
{
"epoch": 1.2e-05,
"grad_norm": 0.46577003598213196,
"learning_rate": 0.000199997639999528,
"loss": 1.4231,
"step": 120
},
{
"epoch": 1.21e-05,
"grad_norm": 0.4282207489013672,
"learning_rate": 0.000199997619999524,
"loss": 1.6877,
"step": 121
},
{
"epoch": 1.22e-05,
"grad_norm": 0.9066228270530701,
"learning_rate": 0.00019999759999952,
"loss": 1.84,
"step": 122
},
{
"epoch": 1.23e-05,
"grad_norm": 0.5513137578964233,
"learning_rate": 0.00019999757999951602,
"loss": 1.7048,
"step": 123
},
{
"epoch": 1.24e-05,
"grad_norm": 0.472076416015625,
"learning_rate": 0.00019999755999951203,
"loss": 1.7235,
"step": 124
},
{
"epoch": 1.25e-05,
"grad_norm": 0.4584066569805145,
"learning_rate": 0.00019999753999950802,
"loss": 1.784,
"step": 125
},
{
"epoch": 1.26e-05,
"grad_norm": 0.6278989315032959,
"learning_rate": 0.000199997519999504,
"loss": 1.4582,
"step": 126
},
{
"epoch": 1.27e-05,
"grad_norm": 0.5184012651443481,
"learning_rate": 0.00019999749999950002,
"loss": 1.9244,
"step": 127
},
{
"epoch": 1.28e-05,
"grad_norm": 0.5831007957458496,
"learning_rate": 0.000199997479999496,
"loss": 1.5218,
"step": 128
},
{
"epoch": 1.29e-05,
"grad_norm": 0.40411731600761414,
"learning_rate": 0.000199997459999492,
"loss": 1.3143,
"step": 129
},
{
"epoch": 1.3e-05,
"grad_norm": 0.596390426158905,
"learning_rate": 0.000199997439999488,
"loss": 1.693,
"step": 130
},
{
"epoch": 1.31e-05,
"grad_norm": 0.7649241089820862,
"learning_rate": 0.000199997419999484,
"loss": 2.0808,
"step": 131
},
{
"epoch": 1.32e-05,
"grad_norm": 0.7482056021690369,
"learning_rate": 0.00019999739999948,
"loss": 1.9266,
"step": 132
},
{
"epoch": 1.33e-05,
"grad_norm": 0.5742607116699219,
"learning_rate": 0.00019999737999947602,
"loss": 1.9895,
"step": 133
},
{
"epoch": 1.34e-05,
"grad_norm": 0.5262996554374695,
"learning_rate": 0.000199997359999472,
"loss": 2.0739,
"step": 134
},
{
"epoch": 1.35e-05,
"grad_norm": 0.5321303009986877,
"learning_rate": 0.00019999733999946802,
"loss": 2.2675,
"step": 135
},
{
"epoch": 1.36e-05,
"grad_norm": 0.37136197090148926,
"learning_rate": 0.000199997319999464,
"loss": 1.5382,
"step": 136
},
{
"epoch": 1.37e-05,
"grad_norm": 0.3858487010002136,
"learning_rate": 0.00019999729999946002,
"loss": 1.6801,
"step": 137
},
{
"epoch": 1.38e-05,
"grad_norm": 0.4758774936199188,
"learning_rate": 0.000199997279999456,
"loss": 1.6721,
"step": 138
},
{
"epoch": 1.39e-05,
"grad_norm": 0.4500921666622162,
"learning_rate": 0.000199997259999452,
"loss": 1.5869,
"step": 139
},
{
"epoch": 1.4e-05,
"grad_norm": 0.8483019471168518,
"learning_rate": 0.000199997239999448,
"loss": 1.8523,
"step": 140
},
{
"epoch": 1.41e-05,
"grad_norm": 0.5695202946662903,
"learning_rate": 0.000199997219999444,
"loss": 1.7305,
"step": 141
},
{
"epoch": 1.42e-05,
"grad_norm": 0.44855567812919617,
"learning_rate": 0.00019999719999943999,
"loss": 1.5317,
"step": 142
},
{
"epoch": 1.43e-05,
"grad_norm": 0.406058132648468,
"learning_rate": 0.000199997179999436,
"loss": 1.5769,
"step": 143
},
{
"epoch": 1.44e-05,
"grad_norm": 0.4420010447502136,
"learning_rate": 0.00019999715999943201,
"loss": 1.5033,
"step": 144
},
{
"epoch": 1.45e-05,
"grad_norm": 0.47307848930358887,
"learning_rate": 0.00019999713999942803,
"loss": 1.3258,
"step": 145
},
{
"epoch": 1.46e-05,
"grad_norm": 0.3859032690525055,
"learning_rate": 0.00019999711999942402,
"loss": 1.4687,
"step": 146
},
{
"epoch": 1.47e-05,
"grad_norm": 0.416414737701416,
"learning_rate": 0.00019999709999942,
"loss": 1.5518,
"step": 147
},
{
"epoch": 1.48e-05,
"grad_norm": 0.433167040348053,
"learning_rate": 0.00019999707999941602,
"loss": 1.9201,
"step": 148
},
{
"epoch": 1.49e-05,
"grad_norm": 0.4099215865135193,
"learning_rate": 0.000199997059999412,
"loss": 1.4254,
"step": 149
},
{
"epoch": 1.5e-05,
"grad_norm": 0.5384925007820129,
"learning_rate": 0.00019999703999940802,
"loss": 1.8626,
"step": 150
},
{
"epoch": 1.51e-05,
"grad_norm": 0.37440967559814453,
"learning_rate": 0.000199997019999404,
"loss": 1.5261,
"step": 151
},
{
"epoch": 1.52e-05,
"grad_norm": 0.44074127078056335,
"learning_rate": 0.0001999969999994,
"loss": 1.7549,
"step": 152
},
{
"epoch": 1.53e-05,
"grad_norm": 0.546076774597168,
"learning_rate": 0.000199996979999396,
"loss": 1.8012,
"step": 153
},
{
"epoch": 1.54e-05,
"grad_norm": 0.4423200488090515,
"learning_rate": 0.00019999695999939202,
"loss": 1.68,
"step": 154
},
{
"epoch": 1.55e-05,
"grad_norm": 0.41590616106987,
"learning_rate": 0.000199996939999388,
"loss": 1.6104,
"step": 155
},
{
"epoch": 1.56e-05,
"grad_norm": 0.3735498785972595,
"learning_rate": 0.00019999691999938402,
"loss": 1.3661,
"step": 156
},
{
"epoch": 1.57e-05,
"grad_norm": 0.49392393231391907,
"learning_rate": 0.00019999689999938,
"loss": 2.1605,
"step": 157
},
{
"epoch": 1.58e-05,
"grad_norm": 0.6458868384361267,
"learning_rate": 0.00019999687999937602,
"loss": 1.9343,
"step": 158
},
{
"epoch": 1.59e-05,
"grad_norm": 0.4576835334300995,
"learning_rate": 0.000199996859999372,
"loss": 1.9118,
"step": 159
},
{
"epoch": 1.6e-05,
"grad_norm": 0.5450042486190796,
"learning_rate": 0.000199996839999368,
"loss": 2.2885,
"step": 160
},
{
"epoch": 1.61e-05,
"grad_norm": 0.6322759985923767,
"learning_rate": 0.000199996819999364,
"loss": 1.9177,
"step": 161
},
{
"epoch": 1.62e-05,
"grad_norm": 0.9141877889633179,
"learning_rate": 0.00019999679999936,
"loss": 2.0124,
"step": 162
},
{
"epoch": 1.63e-05,
"grad_norm": 0.7055004835128784,
"learning_rate": 0.000199996779999356,
"loss": 2.1958,
"step": 163
},
{
"epoch": 1.64e-05,
"grad_norm": 0.4048287868499756,
"learning_rate": 0.000199996759999352,
"loss": 1.6366,
"step": 164
},
{
"epoch": 1.65e-05,
"grad_norm": 0.5366451144218445,
"learning_rate": 0.000199996739999348,
"loss": 1.9019,
"step": 165
},
{
"epoch": 1.66e-05,
"grad_norm": 0.45625025033950806,
"learning_rate": 0.00019999671999934403,
"loss": 1.669,
"step": 166
},
{
"epoch": 1.67e-05,
"grad_norm": 0.4085591733455658,
"learning_rate": 0.00019999669999934001,
"loss": 1.456,
"step": 167
},
{
"epoch": 1.68e-05,
"grad_norm": 0.46340250968933105,
"learning_rate": 0.000199996679999336,
"loss": 1.4021,
"step": 168
},
{
"epoch": 1.69e-05,
"grad_norm": 0.40409213304519653,
"learning_rate": 0.00019999665999933202,
"loss": 1.715,
"step": 169
},
{
"epoch": 1.7e-05,
"grad_norm": 0.3457713723182678,
"learning_rate": 0.000199996639999328,
"loss": 1.5341,
"step": 170
},
{
"epoch": 1.71e-05,
"grad_norm": 0.5006639361381531,
"learning_rate": 0.00019999661999932402,
"loss": 1.9549,
"step": 171
},
{
"epoch": 1.72e-05,
"grad_norm": 0.48425284028053284,
"learning_rate": 0.00019999659999932,
"loss": 1.6894,
"step": 172
},
{
"epoch": 1.73e-05,
"grad_norm": 0.4876086115837097,
"learning_rate": 0.000199996579999316,
"loss": 1.6386,
"step": 173
},
{
"epoch": 1.74e-05,
"grad_norm": 0.5767083168029785,
"learning_rate": 0.000199996559999312,
"loss": 1.94,
"step": 174
},
{
"epoch": 1.75e-05,
"grad_norm": 1.863520622253418,
"learning_rate": 0.00019999653999930802,
"loss": 1.6294,
"step": 175
},
{
"epoch": 1.76e-05,
"grad_norm": 0.7980679869651794,
"learning_rate": 0.000199996519999304,
"loss": 1.8792,
"step": 176
},
{
"epoch": 1.77e-05,
"grad_norm": 0.3906029164791107,
"learning_rate": 0.00019999649999930002,
"loss": 1.7023,
"step": 177
},
{
"epoch": 1.78e-05,
"grad_norm": 0.4237198531627655,
"learning_rate": 0.000199996479999296,
"loss": 1.5053,
"step": 178
},
{
"epoch": 1.79e-05,
"grad_norm": 0.4453522562980652,
"learning_rate": 0.00019999645999929202,
"loss": 1.663,
"step": 179
},
{
"epoch": 1.8e-05,
"grad_norm": 0.46418821811676025,
"learning_rate": 0.000199996439999288,
"loss": 1.8424,
"step": 180
},
{
"epoch": 1.81e-05,
"grad_norm": 0.4990067481994629,
"learning_rate": 0.000199996419999284,
"loss": 1.8821,
"step": 181
},
{
"epoch": 1.82e-05,
"grad_norm": 0.49845150113105774,
"learning_rate": 0.00019999639999928,
"loss": 1.7954,
"step": 182
},
{
"epoch": 1.83e-05,
"grad_norm": 0.41052332520484924,
"learning_rate": 0.000199996379999276,
"loss": 1.9909,
"step": 183
},
{
"epoch": 1.84e-05,
"grad_norm": 0.6790204644203186,
"learning_rate": 0.000199996359999272,
"loss": 2.0227,
"step": 184
},
{
"epoch": 1.85e-05,
"grad_norm": 0.559575617313385,
"learning_rate": 0.00019999633999926803,
"loss": 1.8845,
"step": 185
},
{
"epoch": 1.86e-05,
"grad_norm": 0.43018248677253723,
"learning_rate": 0.000199996319999264,
"loss": 1.3121,
"step": 186
},
{
"epoch": 1.87e-05,
"grad_norm": 0.49696406722068787,
"learning_rate": 0.00019999629999926003,
"loss": 1.9635,
"step": 187
},
{
"epoch": 1.88e-05,
"grad_norm": 0.5610666275024414,
"learning_rate": 0.00019999627999925601,
"loss": 1.5446,
"step": 188
},
{
"epoch": 1.89e-05,
"grad_norm": 0.3752838373184204,
"learning_rate": 0.000199996259999252,
"loss": 1.5967,
"step": 189
},
{
"epoch": 1.9e-05,
"grad_norm": 0.5093499422073364,
"learning_rate": 0.00019999623999924801,
"loss": 1.7468,
"step": 190
},
{
"epoch": 1.91e-05,
"grad_norm": 0.5607715845108032,
"learning_rate": 0.000199996219999244,
"loss": 1.7748,
"step": 191
},
{
"epoch": 1.92e-05,
"grad_norm": 0.5026123523712158,
"learning_rate": 0.00019999619999924002,
"loss": 1.6418,
"step": 192
},
{
"epoch": 1.93e-05,
"grad_norm": 0.9300629496574402,
"learning_rate": 0.000199996179999236,
"loss": 1.9604,
"step": 193
},
{
"epoch": 1.94e-05,
"grad_norm": 0.4116390347480774,
"learning_rate": 0.000199996159999232,
"loss": 1.7273,
"step": 194
},
{
"epoch": 1.95e-05,
"grad_norm": 0.7066006660461426,
"learning_rate": 0.000199996139999228,
"loss": 1.8577,
"step": 195
},
{
"epoch": 1.96e-05,
"grad_norm": 0.3522622585296631,
"learning_rate": 0.00019999611999922402,
"loss": 1.6297,
"step": 196
},
{
"epoch": 1.97e-05,
"grad_norm": 0.5760959386825562,
"learning_rate": 0.00019999609999922,
"loss": 1.7793,
"step": 197
},
{
"epoch": 1.98e-05,
"grad_norm": 0.5428193211555481,
"learning_rate": 0.00019999607999921602,
"loss": 1.6331,
"step": 198
},
{
"epoch": 1.99e-05,
"grad_norm": 0.6514245867729187,
"learning_rate": 0.000199996059999212,
"loss": 2.0183,
"step": 199
},
{
"epoch": 2e-05,
"grad_norm": 0.8064345121383667,
"learning_rate": 0.00019999603999920802,
"loss": 2.1794,
"step": 200
},
{
"epoch": 2.01e-05,
"grad_norm": 0.9006212949752808,
"learning_rate": 0.000199996019999204,
"loss": 2.4607,
"step": 201
},
{
"epoch": 2.02e-05,
"grad_norm": 0.5918715000152588,
"learning_rate": 0.0001999959999992,
"loss": 2.1049,
"step": 202
},
{
"epoch": 2.03e-05,
"grad_norm": 0.6828485131263733,
"learning_rate": 0.000199995979999196,
"loss": 2.5226,
"step": 203
},
{
"epoch": 2.04e-05,
"grad_norm": 0.6373446583747864,
"learning_rate": 0.000199995959999192,
"loss": 2.4249,
"step": 204
},
{
"epoch": 2.05e-05,
"grad_norm": 0.7441200613975525,
"learning_rate": 0.000199995939999188,
"loss": 2.4339,
"step": 205
},
{
"epoch": 2.06e-05,
"grad_norm": 0.8439544439315796,
"learning_rate": 0.00019999591999918402,
"loss": 1.9476,
"step": 206
},
{
"epoch": 2.07e-05,
"grad_norm": 0.48914116621017456,
"learning_rate": 0.00019999589999918,
"loss": 1.5295,
"step": 207
},
{
"epoch": 2.08e-05,
"grad_norm": 0.4834679961204529,
"learning_rate": 0.00019999587999917603,
"loss": 1.6255,
"step": 208
},
{
"epoch": 2.09e-05,
"grad_norm": 0.5508498549461365,
"learning_rate": 0.000199995859999172,
"loss": 1.551,
"step": 209
},
{
"epoch": 2.1e-05,
"grad_norm": 0.5926271677017212,
"learning_rate": 0.000199995839999168,
"loss": 1.699,
"step": 210
},
{
"epoch": 2.11e-05,
"grad_norm": 0.7590788006782532,
"learning_rate": 0.00019999581999916401,
"loss": 1.763,
"step": 211
},
{
"epoch": 2.12e-05,
"grad_norm": 0.4198217988014221,
"learning_rate": 0.00019999579999916,
"loss": 1.7187,
"step": 212
},
{
"epoch": 2.13e-05,
"grad_norm": 0.4620197117328644,
"learning_rate": 0.00019999577999915602,
"loss": 1.5359,
"step": 213
},
{
"epoch": 2.14e-05,
"grad_norm": 0.44593310356140137,
"learning_rate": 0.000199995759999152,
"loss": 1.6022,
"step": 214
},
{
"epoch": 2.15e-05,
"grad_norm": 0.3533499538898468,
"learning_rate": 0.000199995739999148,
"loss": 1.6444,
"step": 215
},
{
"epoch": 2.16e-05,
"grad_norm": 0.543205976486206,
"learning_rate": 0.000199995719999144,
"loss": 1.6438,
"step": 216
},
{
"epoch": 2.17e-05,
"grad_norm": 0.703269898891449,
"learning_rate": 0.00019999569999914002,
"loss": 1.8974,
"step": 217
},
{
"epoch": 2.18e-05,
"grad_norm": 0.8203062415122986,
"learning_rate": 0.000199995679999136,
"loss": 1.6767,
"step": 218
},
{
"epoch": 2.19e-05,
"grad_norm": 0.7072187066078186,
"learning_rate": 0.00019999565999913202,
"loss": 2.0079,
"step": 219
},
{
"epoch": 2.2e-05,
"grad_norm": 0.5376666188240051,
"learning_rate": 0.000199995639999128,
"loss": 1.9399,
"step": 220
},
{
"epoch": 2.21e-05,
"grad_norm": 0.6364527940750122,
"learning_rate": 0.00019999561999912402,
"loss": 1.8431,
"step": 221
},
{
"epoch": 2.22e-05,
"grad_norm": 0.6043407917022705,
"learning_rate": 0.00019999559999912,
"loss": 1.8711,
"step": 222
},
{
"epoch": 2.23e-05,
"grad_norm": 0.7499778866767883,
"learning_rate": 0.000199995579999116,
"loss": 1.6933,
"step": 223
},
{
"epoch": 2.24e-05,
"grad_norm": 0.4400179088115692,
"learning_rate": 0.000199995559999112,
"loss": 1.5813,
"step": 224
},
{
"epoch": 2.25e-05,
"grad_norm": 0.5211970806121826,
"learning_rate": 0.000199995539999108,
"loss": 1.6015,
"step": 225
},
{
"epoch": 2.26e-05,
"grad_norm": 0.43454399704933167,
"learning_rate": 0.000199995519999104,
"loss": 1.5191,
"step": 226
},
{
"epoch": 2.27e-05,
"grad_norm": 0.4297579824924469,
"learning_rate": 0.00019999549999910002,
"loss": 1.6515,
"step": 227
},
{
"epoch": 2.28e-05,
"grad_norm": 0.5561093091964722,
"learning_rate": 0.000199995479999096,
"loss": 1.7018,
"step": 228
},
{
"epoch": 2.29e-05,
"grad_norm": 0.6111759543418884,
"learning_rate": 0.00019999545999909203,
"loss": 1.4623,
"step": 229
},
{
"epoch": 2.3e-05,
"grad_norm": 0.7900436520576477,
"learning_rate": 0.000199995439999088,
"loss": 1.9266,
"step": 230
},
{
"epoch": 2.31e-05,
"grad_norm": 0.6187627911567688,
"learning_rate": 0.000199995419999084,
"loss": 2.226,
"step": 231
},
{
"epoch": 2.32e-05,
"grad_norm": 0.5691580176353455,
"learning_rate": 0.00019999539999908,
"loss": 1.9283,
"step": 232
},
{
"epoch": 2.33e-05,
"grad_norm": 0.5886045694351196,
"learning_rate": 0.000199995379999076,
"loss": 2.121,
"step": 233
},
{
"epoch": 2.34e-05,
"grad_norm": 0.6269411444664001,
"learning_rate": 0.00019999535999907201,
"loss": 2.0709,
"step": 234
},
{
"epoch": 2.35e-05,
"grad_norm": 0.49698230624198914,
"learning_rate": 0.000199995339999068,
"loss": 2.0256,
"step": 235
},
{
"epoch": 2.36e-05,
"grad_norm": 0.39077070355415344,
"learning_rate": 0.000199995319999064,
"loss": 1.659,
"step": 236
},
{
"epoch": 2.37e-05,
"grad_norm": 0.5250963568687439,
"learning_rate": 0.00019999529999906,
"loss": 1.6387,
"step": 237
},
{
"epoch": 2.38e-05,
"grad_norm": 0.6859326362609863,
"learning_rate": 0.00019999527999905602,
"loss": 1.6328,
"step": 238
},
{
"epoch": 2.39e-05,
"grad_norm": 0.5848241448402405,
"learning_rate": 0.00019999525999905203,
"loss": 1.6842,
"step": 239
},
{
"epoch": 2.4e-05,
"grad_norm": 0.5376678109169006,
"learning_rate": 0.00019999523999904802,
"loss": 1.9328,
"step": 240
},
{
"epoch": 2.41e-05,
"grad_norm": 0.4557065963745117,
"learning_rate": 0.000199995219999044,
"loss": 1.58,
"step": 241
},
{
"epoch": 2.42e-05,
"grad_norm": 0.47288385033607483,
"learning_rate": 0.00019999519999904002,
"loss": 1.6694,
"step": 242
},
{
"epoch": 2.43e-05,
"grad_norm": 0.5947932600975037,
"learning_rate": 0.000199995179999036,
"loss": 1.8366,
"step": 243
},
{
"epoch": 2.44e-05,
"grad_norm": 0.6255089044570923,
"learning_rate": 0.000199995159999032,
"loss": 1.903,
"step": 244
},
{
"epoch": 2.45e-05,
"grad_norm": 0.5417413711547852,
"learning_rate": 0.000199995139999028,
"loss": 1.8091,
"step": 245
},
{
"epoch": 2.46e-05,
"grad_norm": 0.8538123369216919,
"learning_rate": 0.000199995119999024,
"loss": 2.2769,
"step": 246
},
{
"epoch": 2.47e-05,
"grad_norm": 0.5323597192764282,
"learning_rate": 0.00019999509999902,
"loss": 2.0454,
"step": 247
},
{
"epoch": 2.48e-05,
"grad_norm": 0.5675183534622192,
"learning_rate": 0.00019999507999901602,
"loss": 2.0094,
"step": 248
},
{
"epoch": 2.49e-05,
"grad_norm": 0.4544978439807892,
"learning_rate": 0.000199995059999012,
"loss": 1.5711,
"step": 249
},
{
"epoch": 2.5e-05,
"grad_norm": 0.5580434799194336,
"learning_rate": 0.00019999503999900802,
"loss": 1.6857,
"step": 250
},
{
"epoch": 2.51e-05,
"grad_norm": 0.42421337962150574,
"learning_rate": 0.000199995019999004,
"loss": 1.9094,
"step": 251
},
{
"epoch": 2.52e-05,
"grad_norm": 0.44504305720329285,
"learning_rate": 0.000199994999999,
"loss": 1.9155,
"step": 252
},
{
"epoch": 2.53e-05,
"grad_norm": 0.6089363694190979,
"learning_rate": 0.000199994979998996,
"loss": 2.1466,
"step": 253
},
{
"epoch": 2.54e-05,
"grad_norm": 0.7319867014884949,
"learning_rate": 0.000199994959998992,
"loss": 1.8415,
"step": 254
},
{
"epoch": 2.55e-05,
"grad_norm": 0.5788962841033936,
"learning_rate": 0.00019999493999898801,
"loss": 1.723,
"step": 255
},
{
"epoch": 2.56e-05,
"grad_norm": 0.45342084765434265,
"learning_rate": 0.000199994919998984,
"loss": 1.8237,
"step": 256
},
{
"epoch": 2.57e-05,
"grad_norm": 0.7103757858276367,
"learning_rate": 0.00019999489999898,
"loss": 2.0791,
"step": 257
},
{
"epoch": 2.58e-05,
"grad_norm": 0.4426691234111786,
"learning_rate": 0.000199994879998976,
"loss": 1.8145,
"step": 258
},
{
"epoch": 2.59e-05,
"grad_norm": 0.43514561653137207,
"learning_rate": 0.00019999485999897202,
"loss": 1.8114,
"step": 259
},
{
"epoch": 2.6e-05,
"grad_norm": 0.6240082383155823,
"learning_rate": 0.00019999483999896803,
"loss": 1.8466,
"step": 260
},
{
"epoch": 2.61e-05,
"grad_norm": 0.7914692163467407,
"learning_rate": 0.00019999481999896402,
"loss": 2.0891,
"step": 261
},
{
"epoch": 2.62e-05,
"grad_norm": 0.9623270034790039,
"learning_rate": 0.00019999479999896,
"loss": 2.587,
"step": 262
},
{
"epoch": 2.63e-05,
"grad_norm": 0.7898182272911072,
"learning_rate": 0.00019999477999895602,
"loss": 2.3259,
"step": 263
},
{
"epoch": 2.64e-05,
"grad_norm": 0.7883146405220032,
"learning_rate": 0.000199994759998952,
"loss": 2.2352,
"step": 264
},
{
"epoch": 2.65e-05,
"grad_norm": 0.6351277828216553,
"learning_rate": 0.000199994739998948,
"loss": 2.2981,
"step": 265
},
{
"epoch": 2.66e-05,
"grad_norm": 0.5381727814674377,
"learning_rate": 0.000199994719998944,
"loss": 2.6191,
"step": 266
},
{
"epoch": 2.67e-05,
"grad_norm": 0.5770056247711182,
"learning_rate": 0.00019999469999894,
"loss": 2.284,
"step": 267
},
{
"epoch": 2.68e-05,
"grad_norm": 0.4391525387763977,
"learning_rate": 0.000199994679998936,
"loss": 2.2969,
"step": 268
},
{
"epoch": 2.69e-05,
"grad_norm": 0.4670884907245636,
"learning_rate": 0.00019999465999893202,
"loss": 2.521,
"step": 269
},
{
"epoch": 2.7e-05,
"grad_norm": 0.5235867500305176,
"learning_rate": 0.000199994639998928,
"loss": 2.4748,
"step": 270
},
{
"epoch": 2.71e-05,
"grad_norm": 0.6667335629463196,
"learning_rate": 0.00019999461999892402,
"loss": 2.3679,
"step": 271
},
{
"epoch": 2.72e-05,
"grad_norm": 0.3543158769607544,
"learning_rate": 0.00019999459999892,
"loss": 2.2826,
"step": 272
},
{
"epoch": 2.73e-05,
"grad_norm": 0.5188414454460144,
"learning_rate": 0.000199994579998916,
"loss": 1.7956,
"step": 273
},
{
"epoch": 2.74e-05,
"grad_norm": 0.8030866980552673,
"learning_rate": 0.000199994559998912,
"loss": 2.1592,
"step": 274
},
{
"epoch": 2.75e-05,
"grad_norm": 0.4300987720489502,
"learning_rate": 0.000199994539998908,
"loss": 1.6752,
"step": 275
},
{
"epoch": 2.76e-05,
"grad_norm": 0.4736029803752899,
"learning_rate": 0.000199994519998904,
"loss": 1.7071,
"step": 276
},
{
"epoch": 2.77e-05,
"grad_norm": 0.5338713526725769,
"learning_rate": 0.0001999944999989,
"loss": 1.9712,
"step": 277
},
{
"epoch": 2.78e-05,
"grad_norm": 0.4187946617603302,
"learning_rate": 0.00019999447999889601,
"loss": 1.4615,
"step": 278
},
{
"epoch": 2.79e-05,
"grad_norm": 0.46406739950180054,
"learning_rate": 0.000199994459998892,
"loss": 1.6076,
"step": 279
},
{
"epoch": 2.8e-05,
"grad_norm": 0.5043464303016663,
"learning_rate": 0.00019999443999888802,
"loss": 1.5358,
"step": 280
},
{
"epoch": 2.81e-05,
"grad_norm": 0.48431020975112915,
"learning_rate": 0.00019999441999888403,
"loss": 1.6123,
"step": 281
},
{
"epoch": 2.82e-05,
"grad_norm": 0.49477505683898926,
"learning_rate": 0.00019999439999888002,
"loss": 1.7142,
"step": 282
},
{
"epoch": 2.83e-05,
"grad_norm": 0.5439501404762268,
"learning_rate": 0.000199994379998876,
"loss": 1.7144,
"step": 283
},
{
"epoch": 2.84e-05,
"grad_norm": 0.9285573363304138,
"learning_rate": 0.00019999435999887202,
"loss": 1.8587,
"step": 284
},
{
"epoch": 2.85e-05,
"grad_norm": 0.5427992343902588,
"learning_rate": 0.000199994339998868,
"loss": 1.6225,
"step": 285
},
{
"epoch": 2.86e-05,
"grad_norm": 0.5165106058120728,
"learning_rate": 0.000199994319998864,
"loss": 1.6448,
"step": 286
},
{
"epoch": 2.87e-05,
"grad_norm": 0.4794739782810211,
"learning_rate": 0.00019999429999886,
"loss": 1.5984,
"step": 287
},
{
"epoch": 2.88e-05,
"grad_norm": 0.5224581956863403,
"learning_rate": 0.000199994279998856,
"loss": 1.6081,
"step": 288
},
{
"epoch": 2.89e-05,
"grad_norm": 0.3491154909133911,
"learning_rate": 0.000199994259998852,
"loss": 1.6606,
"step": 289
},
{
"epoch": 2.9e-05,
"grad_norm": 0.34161049127578735,
"learning_rate": 0.00019999423999884802,
"loss": 1.5991,
"step": 290
},
{
"epoch": 2.91e-05,
"grad_norm": 0.38104647397994995,
"learning_rate": 0.000199994219998844,
"loss": 1.6891,
"step": 291
},
{
"epoch": 2.92e-05,
"grad_norm": 0.4426082372665405,
"learning_rate": 0.00019999419999884002,
"loss": 1.7436,
"step": 292
},
{
"epoch": 2.93e-05,
"grad_norm": 0.4086962938308716,
"learning_rate": 0.000199994179998836,
"loss": 1.4521,
"step": 293
},
{
"epoch": 2.94e-05,
"grad_norm": 0.36440199613571167,
"learning_rate": 0.00019999415999883202,
"loss": 1.3778,
"step": 294
},
{
"epoch": 2.95e-05,
"grad_norm": 0.36885395646095276,
"learning_rate": 0.000199994139998828,
"loss": 1.5455,
"step": 295
},
{
"epoch": 2.96e-05,
"grad_norm": 0.4338560700416565,
"learning_rate": 0.000199994119998824,
"loss": 1.7012,
"step": 296
},
{
"epoch": 2.97e-05,
"grad_norm": 0.33639904856681824,
"learning_rate": 0.00019999409999882,
"loss": 1.6867,
"step": 297
},
{
"epoch": 2.98e-05,
"grad_norm": 0.4115736484527588,
"learning_rate": 0.000199994079998816,
"loss": 1.7148,
"step": 298
},
{
"epoch": 2.99e-05,
"grad_norm": 0.3798239231109619,
"learning_rate": 0.000199994059998812,
"loss": 1.6553,
"step": 299
},
{
"epoch": 3e-05,
"grad_norm": 0.4842982590198517,
"learning_rate": 0.000199994039998808,
"loss": 1.8699,
"step": 300
},
{
"epoch": 3.01e-05,
"grad_norm": 0.4711601436138153,
"learning_rate": 0.00019999401999880401,
"loss": 1.8628,
"step": 301
},
{
"epoch": 3.02e-05,
"grad_norm": 0.4568040370941162,
"learning_rate": 0.00019999399999880003,
"loss": 1.8388,
"step": 302
},
{
"epoch": 3.03e-05,
"grad_norm": 0.7439306378364563,
"learning_rate": 0.00019999397999879602,
"loss": 2.163,
"step": 303
},
{
"epoch": 3.04e-05,
"grad_norm": 0.5208331942558289,
"learning_rate": 0.000199993959998792,
"loss": 1.9997,
"step": 304
},
{
"epoch": 3.05e-05,
"grad_norm": 0.5737571120262146,
"learning_rate": 0.00019999393999878802,
"loss": 1.883,
"step": 305
},
{
"epoch": 3.06e-05,
"grad_norm": 0.39593684673309326,
"learning_rate": 0.000199993919998784,
"loss": 1.4802,
"step": 306
},
{
"epoch": 3.07e-05,
"grad_norm": 0.8919790983200073,
"learning_rate": 0.00019999389999878,
"loss": 2.4384,
"step": 307
},
{
"epoch": 3.08e-05,
"grad_norm": 0.6580540537834167,
"learning_rate": 0.000199993879998776,
"loss": 1.8365,
"step": 308
},
{
"epoch": 3.09e-05,
"grad_norm": 0.44777825474739075,
"learning_rate": 0.000199993859998772,
"loss": 1.9201,
"step": 309
},
{
"epoch": 3.1e-05,
"grad_norm": 0.4586222171783447,
"learning_rate": 0.000199993839998768,
"loss": 1.6156,
"step": 310
},
{
"epoch": 3.11e-05,
"grad_norm": 0.44421055912971497,
"learning_rate": 0.00019999381999876402,
"loss": 1.6511,
"step": 311
},
{
"epoch": 3.12e-05,
"grad_norm": 0.5122581124305725,
"learning_rate": 0.00019999379999876,
"loss": 1.8611,
"step": 312
},
{
"epoch": 3.13e-05,
"grad_norm": 0.5003589987754822,
"learning_rate": 0.00019999377999875602,
"loss": 1.867,
"step": 313
},
{
"epoch": 3.14e-05,
"grad_norm": 0.44899260997772217,
"learning_rate": 0.000199993759998752,
"loss": 1.5448,
"step": 314
},
{
"epoch": 3.15e-05,
"grad_norm": 0.5642510652542114,
"learning_rate": 0.00019999373999874802,
"loss": 1.4981,
"step": 315
},
{
"epoch": 3.16e-05,
"grad_norm": 0.383941113948822,
"learning_rate": 0.000199993719998744,
"loss": 1.5839,
"step": 316
},
{
"epoch": 3.17e-05,
"grad_norm": 0.363727331161499,
"learning_rate": 0.00019999369999874,
"loss": 1.7339,
"step": 317
},
{
"epoch": 3.18e-05,
"grad_norm": 0.33593347668647766,
"learning_rate": 0.000199993679998736,
"loss": 1.5518,
"step": 318
},
{
"epoch": 3.19e-05,
"grad_norm": 0.38592690229415894,
"learning_rate": 0.000199993659998732,
"loss": 1.5354,
"step": 319
},
{
"epoch": 3.2e-05,
"grad_norm": 0.32959306240081787,
"learning_rate": 0.000199993639998728,
"loss": 1.4933,
"step": 320
},
{
"epoch": 3.21e-05,
"grad_norm": 0.46583858132362366,
"learning_rate": 0.00019999361999872403,
"loss": 1.7337,
"step": 321
},
{
"epoch": 3.22e-05,
"grad_norm": 0.6772357821464539,
"learning_rate": 0.00019999359999872001,
"loss": 1.8055,
"step": 322
},
{
"epoch": 3.23e-05,
"grad_norm": 0.45480331778526306,
"learning_rate": 0.00019999357999871603,
"loss": 1.9445,
"step": 323
},
{
"epoch": 3.24e-05,
"grad_norm": 0.4692111313343048,
"learning_rate": 0.00019999355999871201,
"loss": 1.933,
"step": 324
},
{
"epoch": 3.25e-05,
"grad_norm": 0.41295114159584045,
"learning_rate": 0.000199993539998708,
"loss": 1.7842,
"step": 325
},
{
"epoch": 3.26e-05,
"grad_norm": 0.42672646045684814,
"learning_rate": 0.00019999351999870402,
"loss": 1.7351,
"step": 326
},
{
"epoch": 3.27e-05,
"grad_norm": 0.6787707209587097,
"learning_rate": 0.0001999934999987,
"loss": 1.7051,
"step": 327
},
{
"epoch": 3.28e-05,
"grad_norm": 0.503491222858429,
"learning_rate": 0.000199993479998696,
"loss": 1.5995,
"step": 328
},
{
"epoch": 3.29e-05,
"grad_norm": 0.45077043771743774,
"learning_rate": 0.000199993459998692,
"loss": 1.7496,
"step": 329
},
{
"epoch": 3.3e-05,
"grad_norm": 0.520510733127594,
"learning_rate": 0.000199993439998688,
"loss": 1.9546,
"step": 330
},
{
"epoch": 3.31e-05,
"grad_norm": 0.5362284183502197,
"learning_rate": 0.000199993419998684,
"loss": 1.6676,
"step": 331
},
{
"epoch": 3.32e-05,
"grad_norm": 0.31078314781188965,
"learning_rate": 0.00019999339999868002,
"loss": 1.5173,
"step": 332
},
{
"epoch": 3.33e-05,
"grad_norm": 0.4486532509326935,
"learning_rate": 0.000199993379998676,
"loss": 1.6869,
"step": 333
},
{
"epoch": 3.34e-05,
"grad_norm": 0.44027021527290344,
"learning_rate": 0.00019999335999867202,
"loss": 1.6374,
"step": 334
},
{
"epoch": 3.35e-05,
"grad_norm": 0.41162291169166565,
"learning_rate": 0.000199993339998668,
"loss": 1.5459,
"step": 335
},
{
"epoch": 3.36e-05,
"grad_norm": 0.3559807240962982,
"learning_rate": 0.00019999331999866402,
"loss": 1.442,
"step": 336
},
{
"epoch": 3.37e-05,
"grad_norm": 0.4056732654571533,
"learning_rate": 0.00019999329999866,
"loss": 1.7412,
"step": 337
},
{
"epoch": 3.38e-05,
"grad_norm": 0.4403332769870758,
"learning_rate": 0.000199993279998656,
"loss": 1.741,
"step": 338
},
{
"epoch": 3.39e-05,
"grad_norm": 0.4582686126232147,
"learning_rate": 0.000199993259998652,
"loss": 1.6075,
"step": 339
},
{
"epoch": 3.4e-05,
"grad_norm": 0.3991680443286896,
"learning_rate": 0.000199993239998648,
"loss": 1.6045,
"step": 340
},
{
"epoch": 3.41e-05,
"grad_norm": 0.586495041847229,
"learning_rate": 0.000199993219998644,
"loss": 1.7699,
"step": 341
},
{
"epoch": 3.42e-05,
"grad_norm": 0.5652266144752502,
"learning_rate": 0.00019999319999864003,
"loss": 1.461,
"step": 342
},
{
"epoch": 3.43e-05,
"grad_norm": 0.6062789559364319,
"learning_rate": 0.000199993179998636,
"loss": 1.715,
"step": 343
},
{
"epoch": 3.44e-05,
"grad_norm": 0.9058301448822021,
"learning_rate": 0.00019999315999863203,
"loss": 2.1137,
"step": 344
},
{
"epoch": 3.45e-05,
"grad_norm": 0.46400541067123413,
"learning_rate": 0.00019999313999862801,
"loss": 1.4979,
"step": 345
},
{
"epoch": 3.46e-05,
"grad_norm": 0.3700037896633148,
"learning_rate": 0.000199993119998624,
"loss": 1.5047,
"step": 346
},
{
"epoch": 3.47e-05,
"grad_norm": 0.412404328584671,
"learning_rate": 0.00019999309999862002,
"loss": 1.7056,
"step": 347
},
{
"epoch": 3.48e-05,
"grad_norm": 0.5440813899040222,
"learning_rate": 0.000199993079998616,
"loss": 2.0077,
"step": 348
},
{
"epoch": 3.49e-05,
"grad_norm": 0.6518210768699646,
"learning_rate": 0.00019999305999861202,
"loss": 1.5044,
"step": 349
},
{
"epoch": 3.5e-05,
"grad_norm": 0.4178387224674225,
"learning_rate": 0.000199993039998608,
"loss": 1.423,
"step": 350
},
{
"epoch": 3.51e-05,
"grad_norm": 0.3856907784938812,
"learning_rate": 0.000199993019998604,
"loss": 1.4126,
"step": 351
},
{
"epoch": 3.52e-05,
"grad_norm": 0.5440114140510559,
"learning_rate": 0.0001999929999986,
"loss": 1.7295,
"step": 352
},
{
"epoch": 3.53e-05,
"grad_norm": 0.5093787312507629,
"learning_rate": 0.00019999297999859602,
"loss": 1.7646,
"step": 353
},
{
"epoch": 3.54e-05,
"grad_norm": 0.4566439986228943,
"learning_rate": 0.000199992959998592,
"loss": 2.0013,
"step": 354
},
{
"epoch": 3.55e-05,
"grad_norm": 0.5688943862915039,
"learning_rate": 0.00019999293999858802,
"loss": 1.7562,
"step": 355
},
{
"epoch": 3.56e-05,
"grad_norm": 0.8719181418418884,
"learning_rate": 0.000199992919998584,
"loss": 2.0401,
"step": 356
},
{
"epoch": 3.57e-05,
"grad_norm": 1.0500630140304565,
"learning_rate": 0.00019999289999858002,
"loss": 1.8875,
"step": 357
},
{
"epoch": 3.58e-05,
"grad_norm": 1.3272708654403687,
"learning_rate": 0.000199992879998576,
"loss": 1.9512,
"step": 358
},
{
"epoch": 3.59e-05,
"grad_norm": 1.5037835836410522,
"learning_rate": 0.000199992859998572,
"loss": 2.3478,
"step": 359
},
{
"epoch": 3.6e-05,
"grad_norm": 0.420180082321167,
"learning_rate": 0.000199992839998568,
"loss": 1.4584,
"step": 360
},
{
"epoch": 3.61e-05,
"grad_norm": 0.5174924731254578,
"learning_rate": 0.000199992819998564,
"loss": 1.6607,
"step": 361
},
{
"epoch": 3.62e-05,
"grad_norm": 0.6072699427604675,
"learning_rate": 0.00019999279999856,
"loss": 1.6475,
"step": 362
},
{
"epoch": 3.63e-05,
"grad_norm": 0.48657774925231934,
"learning_rate": 0.00019999277999855602,
"loss": 1.709,
"step": 363
},
{
"epoch": 3.64e-05,
"grad_norm": 0.5174648761749268,
"learning_rate": 0.000199992759998552,
"loss": 2.0207,
"step": 364
},
{
"epoch": 3.65e-05,
"grad_norm": 0.4686170220375061,
"learning_rate": 0.00019999273999854803,
"loss": 1.8379,
"step": 365
},
{
"epoch": 3.66e-05,
"grad_norm": 0.5319675803184509,
"learning_rate": 0.000199992719998544,
"loss": 1.7686,
"step": 366
},
{
"epoch": 3.67e-05,
"grad_norm": 0.5765033960342407,
"learning_rate": 0.00019999269999854,
"loss": 1.7377,
"step": 367
},
{
"epoch": 3.68e-05,
"grad_norm": 0.6665096879005432,
"learning_rate": 0.00019999267999853601,
"loss": 1.812,
"step": 368
},
{
"epoch": 3.69e-05,
"grad_norm": 0.46088990569114685,
"learning_rate": 0.000199992659998532,
"loss": 1.9321,
"step": 369
},
{
"epoch": 3.7e-05,
"grad_norm": 0.523918867111206,
"learning_rate": 0.00019999263999852802,
"loss": 1.5505,
"step": 370
},
{
"epoch": 3.71e-05,
"grad_norm": 0.4749225378036499,
"learning_rate": 0.000199992619998524,
"loss": 1.857,
"step": 371
},
{
"epoch": 3.72e-05,
"grad_norm": 0.47414734959602356,
"learning_rate": 0.00019999259999852,
"loss": 1.6442,
"step": 372
},
{
"epoch": 3.73e-05,
"grad_norm": 1.0153627395629883,
"learning_rate": 0.000199992579998516,
"loss": 2.0091,
"step": 373
},
{
"epoch": 3.74e-05,
"grad_norm": 0.43973875045776367,
"learning_rate": 0.00019999255999851202,
"loss": 1.3698,
"step": 374
},
{
"epoch": 3.75e-05,
"grad_norm": 0.657660961151123,
"learning_rate": 0.000199992539998508,
"loss": 1.9903,
"step": 375
},
{
"epoch": 3.76e-05,
"grad_norm": 0.44873693585395813,
"learning_rate": 0.00019999251999850402,
"loss": 1.5217,
"step": 376
},
{
"epoch": 3.77e-05,
"grad_norm": 0.5388695001602173,
"learning_rate": 0.0001999924999985,
"loss": 1.827,
"step": 377
},
{
"epoch": 3.78e-05,
"grad_norm": 0.540785551071167,
"learning_rate": 0.00019999247999849602,
"loss": 1.742,
"step": 378
},
{
"epoch": 3.79e-05,
"grad_norm": 0.7967843413352966,
"learning_rate": 0.000199992459998492,
"loss": 1.9857,
"step": 379
},
{
"epoch": 3.8e-05,
"grad_norm": 0.5176407694816589,
"learning_rate": 0.000199992439998488,
"loss": 1.9847,
"step": 380
},
{
"epoch": 3.81e-05,
"grad_norm": 0.5027253031730652,
"learning_rate": 0.000199992419998484,
"loss": 1.9902,
"step": 381
},
{
"epoch": 3.82e-05,
"grad_norm": 0.5308915376663208,
"learning_rate": 0.00019999239999848,
"loss": 2.0047,
"step": 382
},
{
"epoch": 3.83e-05,
"grad_norm": 0.5136021971702576,
"learning_rate": 0.000199992379998476,
"loss": 1.7114,
"step": 383
},
{
"epoch": 3.84e-05,
"grad_norm": 0.4838967025279999,
"learning_rate": 0.00019999235999847202,
"loss": 1.6508,
"step": 384
},
{
"epoch": 3.85e-05,
"grad_norm": 0.5344421863555908,
"learning_rate": 0.000199992339998468,
"loss": 1.5912,
"step": 385
},
{
"epoch": 3.86e-05,
"grad_norm": 0.46266481280326843,
"learning_rate": 0.00019999231999846403,
"loss": 1.6242,
"step": 386
},
{
"epoch": 3.87e-05,
"grad_norm": 0.5061294436454773,
"learning_rate": 0.00019999229999846,
"loss": 1.761,
"step": 387
},
{
"epoch": 3.88e-05,
"grad_norm": 0.5162082314491272,
"learning_rate": 0.000199992279998456,
"loss": 1.9192,
"step": 388
},
{
"epoch": 3.89e-05,
"grad_norm": 0.527224063873291,
"learning_rate": 0.000199992259998452,
"loss": 1.77,
"step": 389
},
{
"epoch": 3.9e-05,
"grad_norm": 0.5944364070892334,
"learning_rate": 0.000199992239998448,
"loss": 1.9018,
"step": 390
},
{
"epoch": 3.91e-05,
"grad_norm": 0.4631739556789398,
"learning_rate": 0.00019999221999844401,
"loss": 1.7068,
"step": 391
},
{
"epoch": 3.92e-05,
"grad_norm": 0.5427398085594177,
"learning_rate": 0.00019999219999844,
"loss": 1.9163,
"step": 392
},
{
"epoch": 3.93e-05,
"grad_norm": 0.384926974773407,
"learning_rate": 0.000199992179998436,
"loss": 1.642,
"step": 393
},
{
"epoch": 3.94e-05,
"grad_norm": 0.5050017237663269,
"learning_rate": 0.000199992159998432,
"loss": 1.5671,
"step": 394
},
{
"epoch": 3.95e-05,
"grad_norm": 0.37314701080322266,
"learning_rate": 0.00019999213999842802,
"loss": 1.575,
"step": 395
},
{
"epoch": 3.96e-05,
"grad_norm": 0.4661215543746948,
"learning_rate": 0.000199992119998424,
"loss": 2.0183,
"step": 396
},
{
"epoch": 3.97e-05,
"grad_norm": 0.5529941916465759,
"learning_rate": 0.00019999209999842002,
"loss": 1.7248,
"step": 397
},
{
"epoch": 3.98e-05,
"grad_norm": 0.5972985029220581,
"learning_rate": 0.000199992079998416,
"loss": 2.0641,
"step": 398
},
{
"epoch": 3.99e-05,
"grad_norm": 0.5913353562355042,
"learning_rate": 0.00019999205999841202,
"loss": 1.944,
"step": 399
},
{
"epoch": 4e-05,
"grad_norm": 0.3237490653991699,
"learning_rate": 0.000199992039998408,
"loss": 1.5714,
"step": 400
},
{
"epoch": 4.01e-05,
"grad_norm": 0.36518394947052,
"learning_rate": 0.000199992019998404,
"loss": 1.4421,
"step": 401
},
{
"epoch": 4.02e-05,
"grad_norm": 0.45599955320358276,
"learning_rate": 0.0001999919999984,
"loss": 1.502,
"step": 402
},
{
"epoch": 4.03e-05,
"grad_norm": 0.5273477435112,
"learning_rate": 0.000199991979998396,
"loss": 1.7517,
"step": 403
},
{
"epoch": 4.04e-05,
"grad_norm": 0.8294169306755066,
"learning_rate": 0.000199991959998392,
"loss": 1.8472,
"step": 404
},
{
"epoch": 4.05e-05,
"grad_norm": 0.4928412437438965,
"learning_rate": 0.00019999193999838802,
"loss": 1.7974,
"step": 405
},
{
"epoch": 4.06e-05,
"grad_norm": 0.5215554237365723,
"learning_rate": 0.000199991919998384,
"loss": 1.9876,
"step": 406
},
{
"epoch": 4.07e-05,
"grad_norm": 0.47070685029029846,
"learning_rate": 0.00019999189999838002,
"loss": 1.7061,
"step": 407
},
{
"epoch": 4.08e-05,
"grad_norm": 0.3851932883262634,
"learning_rate": 0.000199991879998376,
"loss": 1.7855,
"step": 408
},
{
"epoch": 4.09e-05,
"grad_norm": 0.3901963233947754,
"learning_rate": 0.000199991859998372,
"loss": 1.7579,
"step": 409
},
{
"epoch": 4.1e-05,
"grad_norm": 0.42012375593185425,
"learning_rate": 0.000199991839998368,
"loss": 1.7395,
"step": 410
},
{
"epoch": 4.11e-05,
"grad_norm": 0.43087467551231384,
"learning_rate": 0.000199991819998364,
"loss": 1.8467,
"step": 411
},
{
"epoch": 4.12e-05,
"grad_norm": 0.36192286014556885,
"learning_rate": 0.00019999179999836001,
"loss": 1.5076,
"step": 412
},
{
"epoch": 4.13e-05,
"grad_norm": 0.442259818315506,
"learning_rate": 0.000199991779998356,
"loss": 1.7476,
"step": 413
},
{
"epoch": 4.14e-05,
"grad_norm": 0.513245701789856,
"learning_rate": 0.00019999175999835202,
"loss": 1.9794,
"step": 414
},
{
"epoch": 4.15e-05,
"grad_norm": 0.4502357840538025,
"learning_rate": 0.000199991739998348,
"loss": 1.5555,
"step": 415
},
{
"epoch": 4.16e-05,
"grad_norm": 0.40502068400382996,
"learning_rate": 0.00019999171999834402,
"loss": 1.7587,
"step": 416
},
{
"epoch": 4.17e-05,
"grad_norm": 0.4134822487831116,
"learning_rate": 0.00019999169999834,
"loss": 1.4992,
"step": 417
},
{
"epoch": 4.18e-05,
"grad_norm": 0.5051032900810242,
"learning_rate": 0.00019999167999833602,
"loss": 1.7131,
"step": 418
},
{
"epoch": 4.19e-05,
"grad_norm": 0.4787388741970062,
"learning_rate": 0.000199991659998332,
"loss": 1.5989,
"step": 419
},
{
"epoch": 4.2e-05,
"grad_norm": 0.7127793431282043,
"learning_rate": 0.00019999163999832802,
"loss": 1.7414,
"step": 420
},
{
"epoch": 4.21e-05,
"grad_norm": 0.5218737125396729,
"learning_rate": 0.000199991619998324,
"loss": 1.7214,
"step": 421
},
{
"epoch": 4.22e-05,
"grad_norm": 0.49243026971817017,
"learning_rate": 0.00019999159999832,
"loss": 1.5933,
"step": 422
},
{
"epoch": 4.23e-05,
"grad_norm": 0.5783970355987549,
"learning_rate": 0.000199991579998316,
"loss": 1.7337,
"step": 423
},
{
"epoch": 4.24e-05,
"grad_norm": 0.520795464515686,
"learning_rate": 0.000199991559998312,
"loss": 1.784,
"step": 424
},
{
"epoch": 4.25e-05,
"grad_norm": 0.6008274555206299,
"learning_rate": 0.000199991539998308,
"loss": 1.8683,
"step": 425
},
{
"epoch": 4.26e-05,
"grad_norm": 0.3708783686161041,
"learning_rate": 0.00019999151999830402,
"loss": 1.3375,
"step": 426
},
{
"epoch": 4.27e-05,
"grad_norm": 0.7564141154289246,
"learning_rate": 0.0001999914999983,
"loss": 1.7041,
"step": 427
},
{
"epoch": 4.28e-05,
"grad_norm": 0.45716744661331177,
"learning_rate": 0.00019999147999829602,
"loss": 1.4543,
"step": 428
},
{
"epoch": 4.29e-05,
"grad_norm": 0.5028320550918579,
"learning_rate": 0.000199991459998292,
"loss": 1.5647,
"step": 429
},
{
"epoch": 4.3e-05,
"grad_norm": 0.44311386346817017,
"learning_rate": 0.000199991439998288,
"loss": 1.4007,
"step": 430
},
{
"epoch": 4.31e-05,
"grad_norm": 0.36131751537323,
"learning_rate": 0.000199991419998284,
"loss": 1.5601,
"step": 431
},
{
"epoch": 4.32e-05,
"grad_norm": 0.46772250533103943,
"learning_rate": 0.00019999139999828,
"loss": 1.7792,
"step": 432
},
{
"epoch": 4.33e-05,
"grad_norm": 0.47078242897987366,
"learning_rate": 0.000199991379998276,
"loss": 1.395,
"step": 433
},
{
"epoch": 4.34e-05,
"grad_norm": 0.6773596405982971,
"learning_rate": 0.000199991359998272,
"loss": 1.975,
"step": 434
},
{
"epoch": 4.35e-05,
"grad_norm": 0.495720237493515,
"learning_rate": 0.00019999133999826801,
"loss": 1.7673,
"step": 435
},
{
"epoch": 4.36e-05,
"grad_norm": 0.5966281294822693,
"learning_rate": 0.00019999131999826403,
"loss": 1.7842,
"step": 436
},
{
"epoch": 4.37e-05,
"grad_norm": 0.4410156011581421,
"learning_rate": 0.00019999129999826002,
"loss": 1.6935,
"step": 437
},
{
"epoch": 4.38e-05,
"grad_norm": 0.5413877964019775,
"learning_rate": 0.000199991279998256,
"loss": 1.8937,
"step": 438
},
{
"epoch": 4.39e-05,
"grad_norm": 0.5021318197250366,
"learning_rate": 0.00019999125999825202,
"loss": 1.9982,
"step": 439
},
{
"epoch": 4.4e-05,
"grad_norm": 0.5858921408653259,
"learning_rate": 0.000199991239998248,
"loss": 1.9119,
"step": 440
},
{
"epoch": 4.41e-05,
"grad_norm": 0.4997514486312866,
"learning_rate": 0.00019999121999824402,
"loss": 1.6518,
"step": 441
},
{
"epoch": 4.42e-05,
"grad_norm": 0.5777234435081482,
"learning_rate": 0.00019999119999824,
"loss": 1.817,
"step": 442
},
{
"epoch": 4.43e-05,
"grad_norm": 0.4161803126335144,
"learning_rate": 0.000199991179998236,
"loss": 1.8506,
"step": 443
},
{
"epoch": 4.44e-05,
"grad_norm": 0.7515570521354675,
"learning_rate": 0.000199991159998232,
"loss": 1.5829,
"step": 444
},
{
"epoch": 4.45e-05,
"grad_norm": 0.4300539493560791,
"learning_rate": 0.000199991139998228,
"loss": 1.688,
"step": 445
},
{
"epoch": 4.46e-05,
"grad_norm": 0.4746590554714203,
"learning_rate": 0.000199991119998224,
"loss": 1.5601,
"step": 446
},
{
"epoch": 4.47e-05,
"grad_norm": 0.47511962056159973,
"learning_rate": 0.00019999109999822002,
"loss": 1.8611,
"step": 447
},
{
"epoch": 4.48e-05,
"grad_norm": 0.6381555199623108,
"learning_rate": 0.000199991079998216,
"loss": 1.7195,
"step": 448
},
{
"epoch": 4.49e-05,
"grad_norm": 0.4788546562194824,
"learning_rate": 0.00019999105999821202,
"loss": 1.629,
"step": 449
},
{
"epoch": 4.5e-05,
"grad_norm": 0.6829419732093811,
"learning_rate": 0.000199991039998208,
"loss": 1.8349,
"step": 450
},
{
"epoch": 4.51e-05,
"grad_norm": 0.43366751074790955,
"learning_rate": 0.000199991019998204,
"loss": 1.896,
"step": 451
},
{
"epoch": 4.52e-05,
"grad_norm": 0.4899962842464447,
"learning_rate": 0.0001999909999982,
"loss": 1.6235,
"step": 452
},
{
"epoch": 4.53e-05,
"grad_norm": 0.3090633749961853,
"learning_rate": 0.000199990979998196,
"loss": 1.5011,
"step": 453
},
{
"epoch": 4.54e-05,
"grad_norm": 0.5154569745063782,
"learning_rate": 0.000199990959998192,
"loss": 1.5772,
"step": 454
},
{
"epoch": 4.55e-05,
"grad_norm": 0.4062784016132355,
"learning_rate": 0.000199990939998188,
"loss": 1.8449,
"step": 455
},
{
"epoch": 4.56e-05,
"grad_norm": 0.5207509994506836,
"learning_rate": 0.000199990919998184,
"loss": 1.4527,
"step": 456
},
{
"epoch": 4.57e-05,
"grad_norm": 0.52181476354599,
"learning_rate": 0.00019999089999818003,
"loss": 1.4873,
"step": 457
},
{
"epoch": 4.58e-05,
"grad_norm": 0.46334174275398254,
"learning_rate": 0.00019999087999817601,
"loss": 1.7768,
"step": 458
},
{
"epoch": 4.59e-05,
"grad_norm": 0.4093489646911621,
"learning_rate": 0.00019999085999817203,
"loss": 1.7924,
"step": 459
},
{
"epoch": 4.6e-05,
"grad_norm": 0.4195363521575928,
"learning_rate": 0.00019999083999816802,
"loss": 1.7812,
"step": 460
},
{
"epoch": 4.61e-05,
"grad_norm": 0.5941916704177856,
"learning_rate": 0.000199990819998164,
"loss": 2.058,
"step": 461
},
{
"epoch": 4.62e-05,
"grad_norm": 0.4739120304584503,
"learning_rate": 0.00019999079999816002,
"loss": 1.8039,
"step": 462
},
{
"epoch": 4.63e-05,
"grad_norm": 0.653116762638092,
"learning_rate": 0.000199990779998156,
"loss": 2.2873,
"step": 463
},
{
"epoch": 4.64e-05,
"grad_norm": 0.7166501879692078,
"learning_rate": 0.000199990759998152,
"loss": 2.5231,
"step": 464
},
{
"epoch": 4.65e-05,
"grad_norm": 0.7239299416542053,
"learning_rate": 0.000199990739998148,
"loss": 2.39,
"step": 465
},
{
"epoch": 4.66e-05,
"grad_norm": 0.4797520339488983,
"learning_rate": 0.000199990719998144,
"loss": 1.947,
"step": 466
},
{
"epoch": 4.67e-05,
"grad_norm": 0.4120495021343231,
"learning_rate": 0.00019999069999814,
"loss": 1.7155,
"step": 467
},
{
"epoch": 4.68e-05,
"grad_norm": 0.44701510667800903,
"learning_rate": 0.00019999067999813602,
"loss": 1.8377,
"step": 468
},
{
"epoch": 4.69e-05,
"grad_norm": 0.3953056335449219,
"learning_rate": 0.000199990659998132,
"loss": 1.6366,
"step": 469
},
{
"epoch": 4.7e-05,
"grad_norm": 0.5183536410331726,
"learning_rate": 0.00019999063999812802,
"loss": 1.4199,
"step": 470
},
{
"epoch": 4.71e-05,
"grad_norm": 0.5872283577919006,
"learning_rate": 0.000199990619998124,
"loss": 1.7674,
"step": 471
},
{
"epoch": 4.72e-05,
"grad_norm": 0.5009239315986633,
"learning_rate": 0.00019999059999812,
"loss": 1.9779,
"step": 472
},
{
"epoch": 4.73e-05,
"grad_norm": 0.5798078179359436,
"learning_rate": 0.000199990579998116,
"loss": 1.9061,
"step": 473
},
{
"epoch": 4.74e-05,
"grad_norm": 0.5016769170761108,
"learning_rate": 0.000199990559998112,
"loss": 1.5461,
"step": 474
},
{
"epoch": 4.75e-05,
"grad_norm": 0.7677909731864929,
"learning_rate": 0.000199990539998108,
"loss": 1.9737,
"step": 475
},
{
"epoch": 4.76e-05,
"grad_norm": 0.7846194505691528,
"learning_rate": 0.000199990519998104,
"loss": 1.7413,
"step": 476
},
{
"epoch": 4.77e-05,
"grad_norm": 0.4583892524242401,
"learning_rate": 0.0001999904999981,
"loss": 1.7405,
"step": 477
},
{
"epoch": 4.78e-05,
"grad_norm": 0.7079900503158569,
"learning_rate": 0.00019999047999809603,
"loss": 2.1876,
"step": 478
},
{
"epoch": 4.79e-05,
"grad_norm": 0.4428488314151764,
"learning_rate": 0.00019999045999809201,
"loss": 1.7024,
"step": 479
},
{
"epoch": 4.8e-05,
"grad_norm": 0.6131700873374939,
"learning_rate": 0.00019999043999808803,
"loss": 1.8886,
"step": 480
},
{
"epoch": 4.81e-05,
"grad_norm": 0.44003427028656006,
"learning_rate": 0.00019999041999808401,
"loss": 1.9647,
"step": 481
},
{
"epoch": 4.82e-05,
"grad_norm": 0.63946533203125,
"learning_rate": 0.00019999039999808,
"loss": 1.8513,
"step": 482
},
{
"epoch": 4.83e-05,
"grad_norm": 0.559574544429779,
"learning_rate": 0.00019999037999807602,
"loss": 1.6653,
"step": 483
},
{
"epoch": 4.84e-05,
"grad_norm": 0.46520495414733887,
"learning_rate": 0.000199990359998072,
"loss": 1.7211,
"step": 484
},
{
"epoch": 4.85e-05,
"grad_norm": 0.38688644766807556,
"learning_rate": 0.000199990339998068,
"loss": 1.5398,
"step": 485
},
{
"epoch": 4.86e-05,
"grad_norm": 0.6553531289100647,
"learning_rate": 0.000199990319998064,
"loss": 1.6478,
"step": 486
},
{
"epoch": 4.87e-05,
"grad_norm": 0.47065457701683044,
"learning_rate": 0.00019999029999806,
"loss": 1.7527,
"step": 487
},
{
"epoch": 4.88e-05,
"grad_norm": 0.3773510456085205,
"learning_rate": 0.000199990279998056,
"loss": 1.6088,
"step": 488
},
{
"epoch": 4.89e-05,
"grad_norm": 0.5014131665229797,
"learning_rate": 0.00019999025999805202,
"loss": 1.8127,
"step": 489
},
{
"epoch": 4.9e-05,
"grad_norm": 0.4235721528530121,
"learning_rate": 0.000199990239998048,
"loss": 1.6993,
"step": 490
},
{
"epoch": 4.91e-05,
"grad_norm": 0.4877326190471649,
"learning_rate": 0.00019999021999804402,
"loss": 1.9371,
"step": 491
},
{
"epoch": 4.92e-05,
"grad_norm": 0.4448818564414978,
"learning_rate": 0.00019999019999804,
"loss": 1.6973,
"step": 492
},
{
"epoch": 4.93e-05,
"grad_norm": 0.7788869142532349,
"learning_rate": 0.00019999017999803602,
"loss": 1.6327,
"step": 493
},
{
"epoch": 4.94e-05,
"grad_norm": 0.612545907497406,
"learning_rate": 0.000199990159998032,
"loss": 1.9169,
"step": 494
},
{
"epoch": 4.95e-05,
"grad_norm": 0.5722140669822693,
"learning_rate": 0.000199990139998028,
"loss": 1.6466,
"step": 495
},
{
"epoch": 4.96e-05,
"grad_norm": 0.5448535680770874,
"learning_rate": 0.000199990119998024,
"loss": 1.7833,
"step": 496
},
{
"epoch": 4.97e-05,
"grad_norm": 0.3963779807090759,
"learning_rate": 0.00019999009999802,
"loss": 1.8032,
"step": 497
},
{
"epoch": 4.98e-05,
"grad_norm": 0.34618857502937317,
"learning_rate": 0.000199990079998016,
"loss": 1.494,
"step": 498
},
{
"epoch": 4.99e-05,
"grad_norm": 0.3954801857471466,
"learning_rate": 0.00019999005999801203,
"loss": 1.4898,
"step": 499
},
{
"epoch": 5e-05,
"grad_norm": 0.40954142808914185,
"learning_rate": 0.000199990039998008,
"loss": 1.9802,
"step": 500
},
{
"epoch": 5.01e-05,
"grad_norm": 0.45834434032440186,
"learning_rate": 0.00019999001999800403,
"loss": 1.6985,
"step": 501
},
{
"epoch": 5.02e-05,
"grad_norm": 0.43741267919540405,
"learning_rate": 0.00019998999999800001,
"loss": 1.482,
"step": 502
},
{
"epoch": 5.03e-05,
"grad_norm": 0.4866531491279602,
"learning_rate": 0.000199989979997996,
"loss": 1.8653,
"step": 503
},
{
"epoch": 5.04e-05,
"grad_norm": 0.48899972438812256,
"learning_rate": 0.00019998995999799202,
"loss": 1.3746,
"step": 504
},
{
"epoch": 5.05e-05,
"grad_norm": 0.575947105884552,
"learning_rate": 0.000199989939997988,
"loss": 1.8655,
"step": 505
},
{
"epoch": 5.06e-05,
"grad_norm": 0.47064775228500366,
"learning_rate": 0.000199989919997984,
"loss": 1.6095,
"step": 506
},
{
"epoch": 5.07e-05,
"grad_norm": 0.38576826453208923,
"learning_rate": 0.00019998989999798,
"loss": 1.7178,
"step": 507
},
{
"epoch": 5.08e-05,
"grad_norm": 0.454904168844223,
"learning_rate": 0.000199989879997976,
"loss": 1.6923,
"step": 508
},
{
"epoch": 5.09e-05,
"grad_norm": 0.49647441506385803,
"learning_rate": 0.000199989859997972,
"loss": 1.554,
"step": 509
},
{
"epoch": 5.1e-05,
"grad_norm": 0.4623638689517975,
"learning_rate": 0.00019998983999796802,
"loss": 1.3255,
"step": 510
},
{
"epoch": 5.11e-05,
"grad_norm": 0.4332015812397003,
"learning_rate": 0.000199989819997964,
"loss": 1.5285,
"step": 511
},
{
"epoch": 5.12e-05,
"grad_norm": 0.444069504737854,
"learning_rate": 0.00019998979999796002,
"loss": 1.3793,
"step": 512
},
{
"epoch": 5.13e-05,
"grad_norm": 0.4705671966075897,
"learning_rate": 0.000199989779997956,
"loss": 1.3994,
"step": 513
},
{
"epoch": 5.14e-05,
"grad_norm": 0.38149315118789673,
"learning_rate": 0.00019998975999795202,
"loss": 1.7004,
"step": 514
},
{
"epoch": 5.15e-05,
"grad_norm": 0.42478975653648376,
"learning_rate": 0.000199989739997948,
"loss": 1.912,
"step": 515
},
{
"epoch": 5.16e-05,
"grad_norm": 0.49340444803237915,
"learning_rate": 0.000199989719997944,
"loss": 1.7983,
"step": 516
},
{
"epoch": 5.17e-05,
"grad_norm": 0.5461348295211792,
"learning_rate": 0.00019998969999794,
"loss": 1.8409,
"step": 517
},
{
"epoch": 5.18e-05,
"grad_norm": 0.4065783619880676,
"learning_rate": 0.000199989679997936,
"loss": 1.6614,
"step": 518
},
{
"epoch": 5.19e-05,
"grad_norm": 0.562455415725708,
"learning_rate": 0.000199989659997932,
"loss": 1.6747,
"step": 519
},
{
"epoch": 5.2e-05,
"grad_norm": 0.5079939365386963,
"learning_rate": 0.00019998963999792803,
"loss": 1.6947,
"step": 520
},
{
"epoch": 5.21e-05,
"grad_norm": 0.5305806398391724,
"learning_rate": 0.000199989619997924,
"loss": 1.7818,
"step": 521
},
{
"epoch": 5.22e-05,
"grad_norm": 0.5093033313751221,
"learning_rate": 0.00019998959999792003,
"loss": 1.6435,
"step": 522
},
{
"epoch": 5.23e-05,
"grad_norm": 0.5598170757293701,
"learning_rate": 0.000199989579997916,
"loss": 1.5383,
"step": 523
},
{
"epoch": 5.24e-05,
"grad_norm": 0.3814893066883087,
"learning_rate": 0.000199989559997912,
"loss": 1.7024,
"step": 524
},
{
"epoch": 5.25e-05,
"grad_norm": 0.5464659333229065,
"learning_rate": 0.00019998953999790801,
"loss": 1.6669,
"step": 525
},
{
"epoch": 5.26e-05,
"grad_norm": 0.4405536651611328,
"learning_rate": 0.000199989519997904,
"loss": 1.7269,
"step": 526
},
{
"epoch": 5.27e-05,
"grad_norm": 0.46354395151138306,
"learning_rate": 0.0001999894999979,
"loss": 1.8811,
"step": 527
},
{
"epoch": 5.28e-05,
"grad_norm": 0.4659210443496704,
"learning_rate": 0.000199989479997896,
"loss": 1.7979,
"step": 528
},
{
"epoch": 5.29e-05,
"grad_norm": 0.41204634308815,
"learning_rate": 0.00019998945999789202,
"loss": 1.6806,
"step": 529
},
{
"epoch": 5.3e-05,
"grad_norm": 0.9784532189369202,
"learning_rate": 0.000199989439997888,
"loss": 2.2657,
"step": 530
},
{
"epoch": 5.31e-05,
"grad_norm": 0.4294663667678833,
"learning_rate": 0.00019998941999788402,
"loss": 1.665,
"step": 531
},
{
"epoch": 5.32e-05,
"grad_norm": 0.6002557873725891,
"learning_rate": 0.00019998939999788,
"loss": 1.7575,
"step": 532
},
{
"epoch": 5.33e-05,
"grad_norm": 0.5490646362304688,
"learning_rate": 0.00019998937999787602,
"loss": 1.5325,
"step": 533
},
{
"epoch": 5.34e-05,
"grad_norm": 0.5014079213142395,
"learning_rate": 0.000199989359997872,
"loss": 1.7163,
"step": 534
},
{
"epoch": 5.35e-05,
"grad_norm": 0.5703453421592712,
"learning_rate": 0.00019998933999786802,
"loss": 1.698,
"step": 535
},
{
"epoch": 5.36e-05,
"grad_norm": 0.6075358986854553,
"learning_rate": 0.000199989319997864,
"loss": 1.7913,
"step": 536
},
{
"epoch": 5.37e-05,
"grad_norm": 0.4881259799003601,
"learning_rate": 0.00019998929999786,
"loss": 2.113,
"step": 537
},
{
"epoch": 5.38e-05,
"grad_norm": 0.510300874710083,
"learning_rate": 0.000199989279997856,
"loss": 2.2238,
"step": 538
},
{
"epoch": 5.39e-05,
"grad_norm": 0.39274975657463074,
"learning_rate": 0.000199989259997852,
"loss": 1.3532,
"step": 539
},
{
"epoch": 5.4e-05,
"grad_norm": 0.44840964674949646,
"learning_rate": 0.000199989239997848,
"loss": 1.7173,
"step": 540
},
{
"epoch": 5.41e-05,
"grad_norm": 0.44919291138648987,
"learning_rate": 0.00019998921999784402,
"loss": 1.6348,
"step": 541
},
{
"epoch": 5.42e-05,
"grad_norm": 0.42541933059692383,
"learning_rate": 0.00019998919999784,
"loss": 1.7341,
"step": 542
},
{
"epoch": 5.43e-05,
"grad_norm": 0.5008085370063782,
"learning_rate": 0.00019998917999783603,
"loss": 1.5201,
"step": 543
},
{
"epoch": 5.44e-05,
"grad_norm": 0.4793260991573334,
"learning_rate": 0.000199989159997832,
"loss": 1.7609,
"step": 544
},
{
"epoch": 5.45e-05,
"grad_norm": 0.401962548494339,
"learning_rate": 0.000199989139997828,
"loss": 1.7412,
"step": 545
},
{
"epoch": 5.46e-05,
"grad_norm": 0.37203165888786316,
"learning_rate": 0.00019998911999782401,
"loss": 1.6765,
"step": 546
},
{
"epoch": 5.47e-05,
"grad_norm": 0.4987531006336212,
"learning_rate": 0.00019998909999782,
"loss": 1.6213,
"step": 547
},
{
"epoch": 5.48e-05,
"grad_norm": 0.33126118779182434,
"learning_rate": 0.00019998907999781601,
"loss": 1.4363,
"step": 548
},
{
"epoch": 5.49e-05,
"grad_norm": 0.38644975423812866,
"learning_rate": 0.000199989059997812,
"loss": 1.7057,
"step": 549
},
{
"epoch": 5.5e-05,
"grad_norm": 0.5224719643592834,
"learning_rate": 0.00019998903999780802,
"loss": 1.832,
"step": 550
},
{
"epoch": 5.51e-05,
"grad_norm": 0.48069778084754944,
"learning_rate": 0.00019998901999780403,
"loss": 1.5211,
"step": 551
},
{
"epoch": 5.52e-05,
"grad_norm": 0.523922860622406,
"learning_rate": 0.00019998899999780002,
"loss": 1.882,
"step": 552
},
{
"epoch": 5.53e-05,
"grad_norm": 0.4732465147972107,
"learning_rate": 0.000199988979997796,
"loss": 1.8234,
"step": 553
},
{
"epoch": 5.54e-05,
"grad_norm": 0.5047708749771118,
"learning_rate": 0.00019998895999779202,
"loss": 1.7325,
"step": 554
},
{
"epoch": 5.55e-05,
"grad_norm": 0.4322643280029297,
"learning_rate": 0.000199988939997788,
"loss": 1.9404,
"step": 555
},
{
"epoch": 5.56e-05,
"grad_norm": 0.32925716042518616,
"learning_rate": 0.00019998891999778402,
"loss": 1.5556,
"step": 556
},
{
"epoch": 5.57e-05,
"grad_norm": 0.6177994608879089,
"learning_rate": 0.00019998889999778,
"loss": 1.9714,
"step": 557
},
{
"epoch": 5.58e-05,
"grad_norm": 0.5117059946060181,
"learning_rate": 0.000199988879997776,
"loss": 1.6378,
"step": 558
},
{
"epoch": 5.59e-05,
"grad_norm": 0.38223034143447876,
"learning_rate": 0.000199988859997772,
"loss": 1.7231,
"step": 559
},
{
"epoch": 5.6e-05,
"grad_norm": 0.4473375380039215,
"learning_rate": 0.000199988839997768,
"loss": 1.3054,
"step": 560
},
{
"epoch": 5.61e-05,
"grad_norm": 0.42548301815986633,
"learning_rate": 0.000199988819997764,
"loss": 1.464,
"step": 561
},
{
"epoch": 5.62e-05,
"grad_norm": 0.44777560234069824,
"learning_rate": 0.00019998879999776002,
"loss": 1.6101,
"step": 562
},
{
"epoch": 5.63e-05,
"grad_norm": 0.523910641670227,
"learning_rate": 0.000199988779997756,
"loss": 1.5159,
"step": 563
},
{
"epoch": 5.64e-05,
"grad_norm": 0.36971428990364075,
"learning_rate": 0.00019998875999775202,
"loss": 1.3634,
"step": 564
},
{
"epoch": 5.65e-05,
"grad_norm": 0.34093400835990906,
"learning_rate": 0.000199988739997748,
"loss": 1.5296,
"step": 565
},
{
"epoch": 5.66e-05,
"grad_norm": 0.3805979788303375,
"learning_rate": 0.000199988719997744,
"loss": 1.5763,
"step": 566
},
{
"epoch": 5.67e-05,
"grad_norm": 0.38377007842063904,
"learning_rate": 0.00019998869999774,
"loss": 1.6956,
"step": 567
},
{
"epoch": 5.68e-05,
"grad_norm": 0.3472450375556946,
"learning_rate": 0.000199988679997736,
"loss": 1.5444,
"step": 568
},
{
"epoch": 5.69e-05,
"grad_norm": 0.48055920004844666,
"learning_rate": 0.00019998865999773201,
"loss": 1.8195,
"step": 569
},
{
"epoch": 5.7e-05,
"grad_norm": 0.3129830062389374,
"learning_rate": 0.000199988639997728,
"loss": 1.3913,
"step": 570
},
{
"epoch": 5.71e-05,
"grad_norm": 0.6258849501609802,
"learning_rate": 0.00019998861999772402,
"loss": 1.9391,
"step": 571
},
{
"epoch": 5.72e-05,
"grad_norm": 0.49116870760917664,
"learning_rate": 0.00019998859999772003,
"loss": 1.6893,
"step": 572
},
{
"epoch": 5.73e-05,
"grad_norm": 0.5946299433708191,
"learning_rate": 0.00019998857999771602,
"loss": 1.8386,
"step": 573
},
{
"epoch": 5.74e-05,
"grad_norm": 0.6036151647567749,
"learning_rate": 0.000199988559997712,
"loss": 1.9863,
"step": 574
},
{
"epoch": 5.75e-05,
"grad_norm": 0.4285214841365814,
"learning_rate": 0.00019998853999770802,
"loss": 1.6547,
"step": 575
},
{
"epoch": 5.76e-05,
"grad_norm": 0.500227153301239,
"learning_rate": 0.000199988519997704,
"loss": 1.9506,
"step": 576
},
{
"epoch": 5.77e-05,
"grad_norm": 0.7017431855201721,
"learning_rate": 0.00019998849999770002,
"loss": 1.5819,
"step": 577
},
{
"epoch": 5.78e-05,
"grad_norm": 0.5523765683174133,
"learning_rate": 0.000199988479997696,
"loss": 1.506,
"step": 578
},
{
"epoch": 5.79e-05,
"grad_norm": 0.3622891306877136,
"learning_rate": 0.000199988459997692,
"loss": 1.3593,
"step": 579
},
{
"epoch": 5.8e-05,
"grad_norm": 0.34565019607543945,
"learning_rate": 0.000199988439997688,
"loss": 1.6033,
"step": 580
},
{
"epoch": 5.81e-05,
"grad_norm": 0.5420109629631042,
"learning_rate": 0.000199988419997684,
"loss": 2.0507,
"step": 581
},
{
"epoch": 5.82e-05,
"grad_norm": 0.5639861822128296,
"learning_rate": 0.00019998839999768,
"loss": 1.7713,
"step": 582
},
{
"epoch": 5.83e-05,
"grad_norm": 0.6982139945030212,
"learning_rate": 0.00019998837999767602,
"loss": 1.8072,
"step": 583
},
{
"epoch": 5.84e-05,
"grad_norm": 0.7252458333969116,
"learning_rate": 0.000199988359997672,
"loss": 2.1882,
"step": 584
},
{
"epoch": 5.85e-05,
"grad_norm": 0.8567319512367249,
"learning_rate": 0.00019998833999766802,
"loss": 1.8691,
"step": 585
},
{
"epoch": 5.86e-05,
"grad_norm": 1.0040030479431152,
"learning_rate": 0.000199988319997664,
"loss": 1.9881,
"step": 586
},
{
"epoch": 5.87e-05,
"grad_norm": 0.8487244248390198,
"learning_rate": 0.00019998829999766,
"loss": 1.6833,
"step": 587
},
{
"epoch": 5.88e-05,
"grad_norm": 1.3802292346954346,
"learning_rate": 0.000199988279997656,
"loss": 2.0359,
"step": 588
},
{
"epoch": 5.89e-05,
"grad_norm": 0.41328075528144836,
"learning_rate": 0.000199988259997652,
"loss": 1.7928,
"step": 589
},
{
"epoch": 5.9e-05,
"grad_norm": 0.5135270953178406,
"learning_rate": 0.000199988239997648,
"loss": 2.0794,
"step": 590
},
{
"epoch": 5.91e-05,
"grad_norm": 0.4181782603263855,
"learning_rate": 0.000199988219997644,
"loss": 1.8075,
"step": 591
},
{
"epoch": 5.92e-05,
"grad_norm": 0.4290955066680908,
"learning_rate": 0.00019998819999764001,
"loss": 1.7813,
"step": 592
},
{
"epoch": 5.93e-05,
"grad_norm": 0.4388153553009033,
"learning_rate": 0.00019998817999763603,
"loss": 1.6148,
"step": 593
},
{
"epoch": 5.94e-05,
"grad_norm": 0.5249912738800049,
"learning_rate": 0.00019998815999763202,
"loss": 1.7352,
"step": 594
},
{
"epoch": 5.95e-05,
"grad_norm": 0.5201463103294373,
"learning_rate": 0.000199988139997628,
"loss": 1.7824,
"step": 595
},
{
"epoch": 5.96e-05,
"grad_norm": 0.4420567750930786,
"learning_rate": 0.00019998811999762402,
"loss": 1.6909,
"step": 596
},
{
"epoch": 5.97e-05,
"grad_norm": 0.5250062942504883,
"learning_rate": 0.00019998809999762,
"loss": 1.5819,
"step": 597
},
{
"epoch": 5.98e-05,
"grad_norm": 0.3808429539203644,
"learning_rate": 0.00019998807999761602,
"loss": 1.5204,
"step": 598
},
{
"epoch": 5.99e-05,
"grad_norm": 0.5340396761894226,
"learning_rate": 0.000199988059997612,
"loss": 1.7607,
"step": 599
},
{
"epoch": 6e-05,
"grad_norm": 0.41092732548713684,
"learning_rate": 0.000199988039997608,
"loss": 1.9763,
"step": 600
},
{
"epoch": 6.01e-05,
"grad_norm": 0.49032190442085266,
"learning_rate": 0.000199988019997604,
"loss": 1.931,
"step": 601
},
{
"epoch": 6.02e-05,
"grad_norm": 0.7396040558815002,
"learning_rate": 0.0001999879999976,
"loss": 2.1648,
"step": 602
},
{
"epoch": 6.03e-05,
"grad_norm": 0.6198503375053406,
"learning_rate": 0.000199987979997596,
"loss": 1.6999,
"step": 603
},
{
"epoch": 6.04e-05,
"grad_norm": 0.6124745011329651,
"learning_rate": 0.00019998795999759202,
"loss": 1.7087,
"step": 604
},
{
"epoch": 6.05e-05,
"grad_norm": 0.5189010500907898,
"learning_rate": 0.000199987939997588,
"loss": 1.6049,
"step": 605
},
{
"epoch": 6.06e-05,
"grad_norm": 0.5214333534240723,
"learning_rate": 0.00019998791999758402,
"loss": 1.6779,
"step": 606
},
{
"epoch": 6.07e-05,
"grad_norm": 0.43613508343696594,
"learning_rate": 0.00019998789999758,
"loss": 1.546,
"step": 607
},
{
"epoch": 6.08e-05,
"grad_norm": 0.5446900725364685,
"learning_rate": 0.000199987879997576,
"loss": 1.6044,
"step": 608
},
{
"epoch": 6.09e-05,
"grad_norm": 0.7355135083198547,
"learning_rate": 0.000199987859997572,
"loss": 1.8087,
"step": 609
},
{
"epoch": 6.1e-05,
"grad_norm": 0.37349310517311096,
"learning_rate": 0.000199987839997568,
"loss": 1.6505,
"step": 610
},
{
"epoch": 6.11e-05,
"grad_norm": 0.6389155983924866,
"learning_rate": 0.000199987819997564,
"loss": 2.5499,
"step": 611
},
{
"epoch": 6.12e-05,
"grad_norm": 0.6529825329780579,
"learning_rate": 0.00019998779999756,
"loss": 1.9657,
"step": 612
},
{
"epoch": 6.13e-05,
"grad_norm": 0.40945208072662354,
"learning_rate": 0.00019998777999755601,
"loss": 1.7877,
"step": 613
},
{
"epoch": 6.14e-05,
"grad_norm": 0.8343349099159241,
"learning_rate": 0.00019998775999755203,
"loss": 1.8748,
"step": 614
},
{
"epoch": 6.15e-05,
"grad_norm": 0.5249513983726501,
"learning_rate": 0.00019998773999754801,
"loss": 2.1268,
"step": 615
},
{
"epoch": 6.16e-05,
"grad_norm": 0.6493553519248962,
"learning_rate": 0.000199987719997544,
"loss": 2.0341,
"step": 616
},
{
"epoch": 6.17e-05,
"grad_norm": 0.37419742345809937,
"learning_rate": 0.00019998769999754002,
"loss": 1.7567,
"step": 617
},
{
"epoch": 6.18e-05,
"grad_norm": 0.47593483328819275,
"learning_rate": 0.000199987679997536,
"loss": 1.666,
"step": 618
},
{
"epoch": 6.19e-05,
"grad_norm": 0.5198250412940979,
"learning_rate": 0.00019998765999753202,
"loss": 1.8843,
"step": 619
},
{
"epoch": 6.2e-05,
"grad_norm": 0.5427200198173523,
"learning_rate": 0.000199987639997528,
"loss": 1.9366,
"step": 620
},
{
"epoch": 6.21e-05,
"grad_norm": 0.6414089798927307,
"learning_rate": 0.000199987619997524,
"loss": 2.5078,
"step": 621
},
{
"epoch": 6.22e-05,
"grad_norm": 0.7620255947113037,
"learning_rate": 0.00019998759999752,
"loss": 2.2601,
"step": 622
},
{
"epoch": 6.23e-05,
"grad_norm": 0.5621963739395142,
"learning_rate": 0.000199987579997516,
"loss": 2.2027,
"step": 623
},
{
"epoch": 6.24e-05,
"grad_norm": 0.6137848496437073,
"learning_rate": 0.000199987559997512,
"loss": 2.4129,
"step": 624
},
{
"epoch": 6.25e-05,
"grad_norm": 0.5061240792274475,
"learning_rate": 0.00019998753999750802,
"loss": 2.0332,
"step": 625
},
{
"epoch": 6.26e-05,
"grad_norm": 0.5945548415184021,
"learning_rate": 0.000199987519997504,
"loss": 1.9248,
"step": 626
},
{
"epoch": 6.27e-05,
"grad_norm": 0.7097086906433105,
"learning_rate": 0.00019998749999750002,
"loss": 2.0192,
"step": 627
},
{
"epoch": 6.28e-05,
"grad_norm": 0.6447247862815857,
"learning_rate": 0.000199987479997496,
"loss": 1.6857,
"step": 628
},
{
"epoch": 6.29e-05,
"grad_norm": 0.6555777788162231,
"learning_rate": 0.000199987459997492,
"loss": 2.2241,
"step": 629
},
{
"epoch": 6.3e-05,
"grad_norm": 0.7811471223831177,
"learning_rate": 0.000199987439997488,
"loss": 1.9206,
"step": 630
},
{
"epoch": 6.31e-05,
"grad_norm": 0.494417279958725,
"learning_rate": 0.000199987419997484,
"loss": 1.696,
"step": 631
},
{
"epoch": 6.32e-05,
"grad_norm": 0.6267716288566589,
"learning_rate": 0.00019998739999748,
"loss": 1.9402,
"step": 632
},
{
"epoch": 6.33e-05,
"grad_norm": 0.48158010840415955,
"learning_rate": 0.000199987379997476,
"loss": 1.6654,
"step": 633
},
{
"epoch": 6.34e-05,
"grad_norm": 0.5128390789031982,
"learning_rate": 0.000199987359997472,
"loss": 1.8172,
"step": 634
},
{
"epoch": 6.35e-05,
"grad_norm": 0.5641897320747375,
"learning_rate": 0.00019998733999746803,
"loss": 1.6595,
"step": 635
},
{
"epoch": 6.36e-05,
"grad_norm": 0.5541167259216309,
"learning_rate": 0.00019998731999746401,
"loss": 1.5466,
"step": 636
},
{
"epoch": 6.37e-05,
"grad_norm": 0.4185241162776947,
"learning_rate": 0.00019998729999746,
"loss": 1.6125,
"step": 637
},
{
"epoch": 6.38e-05,
"grad_norm": 0.39442434906959534,
"learning_rate": 0.00019998727999745602,
"loss": 1.6312,
"step": 638
},
{
"epoch": 6.39e-05,
"grad_norm": 0.4594479203224182,
"learning_rate": 0.000199987259997452,
"loss": 1.7502,
"step": 639
},
{
"epoch": 6.4e-05,
"grad_norm": 0.4056912660598755,
"learning_rate": 0.00019998723999744802,
"loss": 1.8075,
"step": 640
},
{
"epoch": 6.41e-05,
"grad_norm": 0.41331806778907776,
"learning_rate": 0.000199987219997444,
"loss": 1.5923,
"step": 641
},
{
"epoch": 6.42e-05,
"grad_norm": 0.5517099499702454,
"learning_rate": 0.00019998719999744,
"loss": 1.7828,
"step": 642
},
{
"epoch": 6.43e-05,
"grad_norm": 0.4295980632305145,
"learning_rate": 0.000199987179997436,
"loss": 1.6051,
"step": 643
},
{
"epoch": 6.44e-05,
"grad_norm": 0.4832804203033447,
"learning_rate": 0.00019998715999743202,
"loss": 1.5881,
"step": 644
},
{
"epoch": 6.45e-05,
"grad_norm": 0.4688960611820221,
"learning_rate": 0.000199987139997428,
"loss": 1.7298,
"step": 645
},
{
"epoch": 6.46e-05,
"grad_norm": 0.46499601006507874,
"learning_rate": 0.00019998711999742402,
"loss": 1.6955,
"step": 646
},
{
"epoch": 6.47e-05,
"grad_norm": 0.47443389892578125,
"learning_rate": 0.00019998709999742,
"loss": 1.6418,
"step": 647
},
{
"epoch": 6.48e-05,
"grad_norm": 0.3848339319229126,
"learning_rate": 0.00019998707999741602,
"loss": 1.682,
"step": 648
},
{
"epoch": 6.49e-05,
"grad_norm": 0.5372732877731323,
"learning_rate": 0.000199987059997412,
"loss": 1.9316,
"step": 649
},
{
"epoch": 6.5e-05,
"grad_norm": 0.5197330713272095,
"learning_rate": 0.000199987039997408,
"loss": 1.8002,
"step": 650
},
{
"epoch": 6.51e-05,
"grad_norm": 0.44663047790527344,
"learning_rate": 0.000199987019997404,
"loss": 1.7142,
"step": 651
},
{
"epoch": 6.52e-05,
"grad_norm": 0.6336218118667603,
"learning_rate": 0.0001999869999974,
"loss": 2.0031,
"step": 652
},
{
"epoch": 6.53e-05,
"grad_norm": 0.3979768455028534,
"learning_rate": 0.000199986979997396,
"loss": 1.6116,
"step": 653
},
{
"epoch": 6.54e-05,
"grad_norm": 0.6072180271148682,
"learning_rate": 0.000199986959997392,
"loss": 1.844,
"step": 654
},
{
"epoch": 6.55e-05,
"grad_norm": 0.37806108593940735,
"learning_rate": 0.000199986939997388,
"loss": 1.8738,
"step": 655
},
{
"epoch": 6.56e-05,
"grad_norm": 0.4649452269077301,
"learning_rate": 0.00019998691999738403,
"loss": 1.6399,
"step": 656
},
{
"epoch": 6.57e-05,
"grad_norm": 0.39098310470581055,
"learning_rate": 0.00019998689999738,
"loss": 1.5222,
"step": 657
},
{
"epoch": 6.58e-05,
"grad_norm": 0.38826289772987366,
"learning_rate": 0.00019998687999737603,
"loss": 1.4876,
"step": 658
},
{
"epoch": 6.59e-05,
"grad_norm": 0.501055121421814,
"learning_rate": 0.00019998685999737201,
"loss": 1.6178,
"step": 659
},
{
"epoch": 6.6e-05,
"grad_norm": 0.3379994034767151,
"learning_rate": 0.000199986839997368,
"loss": 1.3138,
"step": 660
},
{
"epoch": 6.61e-05,
"grad_norm": 0.6309155225753784,
"learning_rate": 0.00019998681999736402,
"loss": 1.566,
"step": 661
},
{
"epoch": 6.62e-05,
"grad_norm": 0.8135642409324646,
"learning_rate": 0.00019998679999736,
"loss": 1.6626,
"step": 662
},
{
"epoch": 6.63e-05,
"grad_norm": 0.6718431115150452,
"learning_rate": 0.000199986779997356,
"loss": 1.7325,
"step": 663
},
{
"epoch": 6.64e-05,
"grad_norm": 0.41403815150260925,
"learning_rate": 0.000199986759997352,
"loss": 1.5701,
"step": 664
},
{
"epoch": 6.65e-05,
"grad_norm": 0.4888553321361542,
"learning_rate": 0.00019998673999734802,
"loss": 1.8185,
"step": 665
},
{
"epoch": 6.66e-05,
"grad_norm": 0.5423339605331421,
"learning_rate": 0.00019998671999734403,
"loss": 1.7844,
"step": 666
},
{
"epoch": 6.67e-05,
"grad_norm": 0.6097223162651062,
"learning_rate": 0.00019998669999734002,
"loss": 1.8125,
"step": 667
},
{
"epoch": 6.68e-05,
"grad_norm": 0.4173910319805145,
"learning_rate": 0.000199986679997336,
"loss": 1.8131,
"step": 668
},
{
"epoch": 6.69e-05,
"grad_norm": 0.39703911542892456,
"learning_rate": 0.00019998665999733202,
"loss": 1.6145,
"step": 669
},
{
"epoch": 6.7e-05,
"grad_norm": 0.6763572692871094,
"learning_rate": 0.000199986639997328,
"loss": 2.1873,
"step": 670
},
{
"epoch": 6.71e-05,
"grad_norm": 0.7136011123657227,
"learning_rate": 0.000199986619997324,
"loss": 1.877,
"step": 671
},
{
"epoch": 6.72e-05,
"grad_norm": 0.43068549036979675,
"learning_rate": 0.00019998659999732,
"loss": 1.4075,
"step": 672
},
{
"epoch": 6.73e-05,
"grad_norm": 0.35947272181510925,
"learning_rate": 0.000199986579997316,
"loss": 1.3672,
"step": 673
},
{
"epoch": 6.74e-05,
"grad_norm": 0.47769075632095337,
"learning_rate": 0.000199986559997312,
"loss": 1.7204,
"step": 674
},
{
"epoch": 6.75e-05,
"grad_norm": 0.4047739803791046,
"learning_rate": 0.000199986539997308,
"loss": 1.7409,
"step": 675
},
{
"epoch": 6.76e-05,
"grad_norm": 0.4852672517299652,
"learning_rate": 0.000199986519997304,
"loss": 1.6483,
"step": 676
},
{
"epoch": 6.77e-05,
"grad_norm": 0.43307265639305115,
"learning_rate": 0.00019998649999730003,
"loss": 1.4541,
"step": 677
},
{
"epoch": 6.78e-05,
"grad_norm": 0.3534131646156311,
"learning_rate": 0.000199986479997296,
"loss": 1.4279,
"step": 678
},
{
"epoch": 6.79e-05,
"grad_norm": 0.5243763327598572,
"learning_rate": 0.00019998645999729203,
"loss": 1.6531,
"step": 679
},
{
"epoch": 6.8e-05,
"grad_norm": 0.42719149589538574,
"learning_rate": 0.000199986439997288,
"loss": 1.6113,
"step": 680
},
{
"epoch": 6.81e-05,
"grad_norm": 0.34318026900291443,
"learning_rate": 0.000199986419997284,
"loss": 1.7228,
"step": 681
},
{
"epoch": 6.82e-05,
"grad_norm": 0.44339466094970703,
"learning_rate": 0.00019998639999728001,
"loss": 1.6673,
"step": 682
},
{
"epoch": 6.83e-05,
"grad_norm": 0.4398897588253021,
"learning_rate": 0.000199986379997276,
"loss": 1.5628,
"step": 683
},
{
"epoch": 6.84e-05,
"grad_norm": 0.34138771891593933,
"learning_rate": 0.000199986359997272,
"loss": 1.3863,
"step": 684
},
{
"epoch": 6.85e-05,
"grad_norm": 0.37642526626586914,
"learning_rate": 0.000199986339997268,
"loss": 1.5187,
"step": 685
},
{
"epoch": 6.86e-05,
"grad_norm": 0.34787964820861816,
"learning_rate": 0.00019998631999726402,
"loss": 1.5789,
"step": 686
},
{
"epoch": 6.87e-05,
"grad_norm": 0.608262300491333,
"learning_rate": 0.00019998629999726003,
"loss": 1.9227,
"step": 687
},
{
"epoch": 6.88e-05,
"grad_norm": 0.4408338963985443,
"learning_rate": 0.00019998627999725602,
"loss": 1.5578,
"step": 688
},
{
"epoch": 6.89e-05,
"grad_norm": 0.43292123079299927,
"learning_rate": 0.000199986259997252,
"loss": 1.4985,
"step": 689
},
{
"epoch": 6.9e-05,
"grad_norm": 0.44983357191085815,
"learning_rate": 0.00019998623999724802,
"loss": 1.6885,
"step": 690
},
{
"epoch": 6.91e-05,
"grad_norm": 0.45934993028640747,
"learning_rate": 0.000199986219997244,
"loss": 1.7836,
"step": 691
},
{
"epoch": 6.92e-05,
"grad_norm": 0.748002290725708,
"learning_rate": 0.00019998619999724,
"loss": 1.8561,
"step": 692
},
{
"epoch": 6.93e-05,
"grad_norm": 0.424216091632843,
"learning_rate": 0.000199986179997236,
"loss": 1.5345,
"step": 693
},
{
"epoch": 6.94e-05,
"grad_norm": 0.4493589699268341,
"learning_rate": 0.000199986159997232,
"loss": 1.895,
"step": 694
},
{
"epoch": 6.95e-05,
"grad_norm": 0.4744150936603546,
"learning_rate": 0.000199986139997228,
"loss": 1.6292,
"step": 695
},
{
"epoch": 6.96e-05,
"grad_norm": 0.5223432183265686,
"learning_rate": 0.000199986119997224,
"loss": 1.8238,
"step": 696
},
{
"epoch": 6.97e-05,
"grad_norm": 0.808218240737915,
"learning_rate": 0.00019998609999722,
"loss": 1.7626,
"step": 697
},
{
"epoch": 6.98e-05,
"grad_norm": 0.3832715153694153,
"learning_rate": 0.00019998607999721602,
"loss": 1.5161,
"step": 698
},
{
"epoch": 6.99e-05,
"grad_norm": 0.3905687630176544,
"learning_rate": 0.000199986059997212,
"loss": 1.6365,
"step": 699
},
{
"epoch": 7e-05,
"grad_norm": 0.5448684096336365,
"learning_rate": 0.00019998603999720803,
"loss": 1.6622,
"step": 700
},
{
"epoch": 7.01e-05,
"grad_norm": 0.5400844812393188,
"learning_rate": 0.000199986019997204,
"loss": 1.7099,
"step": 701
},
{
"epoch": 7.02e-05,
"grad_norm": 0.5657634735107422,
"learning_rate": 0.0001999859999972,
"loss": 1.6923,
"step": 702
},
{
"epoch": 7.03e-05,
"grad_norm": 0.5056957006454468,
"learning_rate": 0.00019998597999719601,
"loss": 1.9012,
"step": 703
},
{
"epoch": 7.04e-05,
"grad_norm": 0.342732697725296,
"learning_rate": 0.000199985959997192,
"loss": 1.5076,
"step": 704
},
{
"epoch": 7.05e-05,
"grad_norm": 0.7074832916259766,
"learning_rate": 0.000199985939997188,
"loss": 1.6404,
"step": 705
},
{
"epoch": 7.06e-05,
"grad_norm": 0.4222736060619354,
"learning_rate": 0.000199985919997184,
"loss": 1.5784,
"step": 706
},
{
"epoch": 7.07e-05,
"grad_norm": 0.48748013377189636,
"learning_rate": 0.00019998589999718002,
"loss": 1.6454,
"step": 707
},
{
"epoch": 7.08e-05,
"grad_norm": 0.5379446744918823,
"learning_rate": 0.00019998587999717603,
"loss": 2.0898,
"step": 708
},
{
"epoch": 7.09e-05,
"grad_norm": 0.520378589630127,
"learning_rate": 0.00019998585999717202,
"loss": 2.1026,
"step": 709
},
{
"epoch": 7.1e-05,
"grad_norm": 0.6118019819259644,
"learning_rate": 0.000199985839997168,
"loss": 1.7686,
"step": 710
},
{
"epoch": 7.11e-05,
"grad_norm": 0.3723047077655792,
"learning_rate": 0.00019998581999716402,
"loss": 1.5874,
"step": 711
},
{
"epoch": 7.12e-05,
"grad_norm": 0.5237134099006653,
"learning_rate": 0.00019998579999716,
"loss": 1.7566,
"step": 712
},
{
"epoch": 7.13e-05,
"grad_norm": 0.6304356455802917,
"learning_rate": 0.00019998577999715602,
"loss": 1.8531,
"step": 713
},
{
"epoch": 7.14e-05,
"grad_norm": 0.601813554763794,
"learning_rate": 0.000199985759997152,
"loss": 1.6686,
"step": 714
},
{
"epoch": 7.15e-05,
"grad_norm": 0.8765939474105835,
"learning_rate": 0.000199985739997148,
"loss": 1.8965,
"step": 715
},
{
"epoch": 7.16e-05,
"grad_norm": 0.5739784240722656,
"learning_rate": 0.000199985719997144,
"loss": 2.0969,
"step": 716
},
{
"epoch": 7.17e-05,
"grad_norm": 0.5800229907035828,
"learning_rate": 0.00019998569999714,
"loss": 1.6845,
"step": 717
},
{
"epoch": 7.18e-05,
"grad_norm": 0.5827582478523254,
"learning_rate": 0.000199985679997136,
"loss": 1.5745,
"step": 718
},
{
"epoch": 7.19e-05,
"grad_norm": 0.4680020809173584,
"learning_rate": 0.00019998565999713202,
"loss": 1.7667,
"step": 719
},
{
"epoch": 7.2e-05,
"grad_norm": 0.48157379031181335,
"learning_rate": 0.000199985639997128,
"loss": 1.691,
"step": 720
},
{
"epoch": 7.21e-05,
"grad_norm": 0.6326285004615784,
"learning_rate": 0.00019998561999712402,
"loss": 1.773,
"step": 721
},
{
"epoch": 7.22e-05,
"grad_norm": 0.9741144180297852,
"learning_rate": 0.00019998559999712,
"loss": 1.8203,
"step": 722
},
{
"epoch": 7.23e-05,
"grad_norm": 0.5455628633499146,
"learning_rate": 0.000199985579997116,
"loss": 1.8255,
"step": 723
},
{
"epoch": 7.24e-05,
"grad_norm": 0.4121125638484955,
"learning_rate": 0.000199985559997112,
"loss": 1.5954,
"step": 724
},
{
"epoch": 7.25e-05,
"grad_norm": 0.4517988860607147,
"learning_rate": 0.000199985539997108,
"loss": 1.5003,
"step": 725
},
{
"epoch": 7.26e-05,
"grad_norm": 0.6520750522613525,
"learning_rate": 0.000199985519997104,
"loss": 1.7305,
"step": 726
},
{
"epoch": 7.27e-05,
"grad_norm": 0.5598487854003906,
"learning_rate": 0.0001999854999971,
"loss": 1.9097,
"step": 727
},
{
"epoch": 7.28e-05,
"grad_norm": 0.5791051983833313,
"learning_rate": 0.00019998547999709602,
"loss": 1.9114,
"step": 728
},
{
"epoch": 7.29e-05,
"grad_norm": 0.4858875274658203,
"learning_rate": 0.00019998545999709203,
"loss": 1.7664,
"step": 729
},
{
"epoch": 7.3e-05,
"grad_norm": 0.44866469502449036,
"learning_rate": 0.00019998543999708802,
"loss": 1.6479,
"step": 730
},
{
"epoch": 7.31e-05,
"grad_norm": 0.43170586228370667,
"learning_rate": 0.000199985419997084,
"loss": 1.6068,
"step": 731
},
{
"epoch": 7.32e-05,
"grad_norm": 0.5810086727142334,
"learning_rate": 0.00019998539999708002,
"loss": 1.5655,
"step": 732
},
{
"epoch": 7.33e-05,
"grad_norm": 0.7025956511497498,
"learning_rate": 0.000199985379997076,
"loss": 1.7895,
"step": 733
},
{
"epoch": 7.34e-05,
"grad_norm": 0.6398160457611084,
"learning_rate": 0.00019998535999707202,
"loss": 1.6792,
"step": 734
},
{
"epoch": 7.35e-05,
"grad_norm": 0.5608841180801392,
"learning_rate": 0.000199985339997068,
"loss": 1.5725,
"step": 735
},
{
"epoch": 7.36e-05,
"grad_norm": 0.40653395652770996,
"learning_rate": 0.000199985319997064,
"loss": 1.5799,
"step": 736
},
{
"epoch": 7.37e-05,
"grad_norm": 0.5680859684944153,
"learning_rate": 0.00019998529999706,
"loss": 1.8978,
"step": 737
},
{
"epoch": 7.38e-05,
"grad_norm": 0.4924467206001282,
"learning_rate": 0.000199985279997056,
"loss": 1.8382,
"step": 738
},
{
"epoch": 7.39e-05,
"grad_norm": 0.46362969279289246,
"learning_rate": 0.000199985259997052,
"loss": 1.6736,
"step": 739
},
{
"epoch": 7.4e-05,
"grad_norm": 0.3772338926792145,
"learning_rate": 0.00019998523999704802,
"loss": 1.5918,
"step": 740
},
{
"epoch": 7.41e-05,
"grad_norm": 0.4587782025337219,
"learning_rate": 0.000199985219997044,
"loss": 1.6029,
"step": 741
},
{
"epoch": 7.42e-05,
"grad_norm": 0.8824099898338318,
"learning_rate": 0.00019998519999704002,
"loss": 2.5752,
"step": 742
},
{
"epoch": 7.43e-05,
"grad_norm": 0.5362225770950317,
"learning_rate": 0.000199985179997036,
"loss": 2.0711,
"step": 743
},
{
"epoch": 7.44e-05,
"grad_norm": 0.6724733114242554,
"learning_rate": 0.000199985159997032,
"loss": 1.8741,
"step": 744
},
{
"epoch": 7.45e-05,
"grad_norm": 1.0224779844284058,
"learning_rate": 0.000199985139997028,
"loss": 1.7158,
"step": 745
},
{
"epoch": 7.46e-05,
"grad_norm": 0.7625367045402527,
"learning_rate": 0.000199985119997024,
"loss": 2.1522,
"step": 746
},
{
"epoch": 7.47e-05,
"grad_norm": 0.5652865171432495,
"learning_rate": 0.00019998509999702,
"loss": 1.8304,
"step": 747
},
{
"epoch": 7.48e-05,
"grad_norm": 0.502132773399353,
"learning_rate": 0.000199985079997016,
"loss": 1.5777,
"step": 748
},
{
"epoch": 7.49e-05,
"grad_norm": 0.5017869472503662,
"learning_rate": 0.00019998505999701201,
"loss": 1.8774,
"step": 749
},
{
"epoch": 7.5e-05,
"grad_norm": 0.4578314423561096,
"learning_rate": 0.00019998503999700803,
"loss": 1.7524,
"step": 750
},
{
"epoch": 7.51e-05,
"grad_norm": 0.5851650834083557,
"learning_rate": 0.00019998501999700402,
"loss": 1.8602,
"step": 751
},
{
"epoch": 7.52e-05,
"grad_norm": 0.5075413584709167,
"learning_rate": 0.000199984999997,
"loss": 1.8023,
"step": 752
},
{
"epoch": 7.53e-05,
"grad_norm": 0.6435492038726807,
"learning_rate": 0.00019998497999699602,
"loss": 2.4565,
"step": 753
},
{
"epoch": 7.54e-05,
"grad_norm": 0.4106220602989197,
"learning_rate": 0.000199984959996992,
"loss": 1.6566,
"step": 754
},
{
"epoch": 7.55e-05,
"grad_norm": 0.5346071124076843,
"learning_rate": 0.00019998493999698802,
"loss": 1.3896,
"step": 755
},
{
"epoch": 7.56e-05,
"grad_norm": 0.4541730582714081,
"learning_rate": 0.000199984919996984,
"loss": 1.5428,
"step": 756
},
{
"epoch": 7.57e-05,
"grad_norm": 0.44926509261131287,
"learning_rate": 0.00019998489999698,
"loss": 1.6749,
"step": 757
},
{
"epoch": 7.58e-05,
"grad_norm": 0.4825266897678375,
"learning_rate": 0.000199984879996976,
"loss": 1.6849,
"step": 758
},
{
"epoch": 7.59e-05,
"grad_norm": 0.6303689479827881,
"learning_rate": 0.00019998485999697202,
"loss": 1.3428,
"step": 759
},
{
"epoch": 7.6e-05,
"grad_norm": 0.4438778758049011,
"learning_rate": 0.000199984839996968,
"loss": 1.4608,
"step": 760
},
{
"epoch": 7.61e-05,
"grad_norm": 0.4618997871875763,
"learning_rate": 0.00019998481999696402,
"loss": 1.6723,
"step": 761
},
{
"epoch": 7.62e-05,
"grad_norm": 0.4038149118423462,
"learning_rate": 0.00019998479999696,
"loss": 1.6377,
"step": 762
},
{
"epoch": 7.63e-05,
"grad_norm": 0.39794233441352844,
"learning_rate": 0.00019998477999695602,
"loss": 1.3978,
"step": 763
},
{
"epoch": 7.64e-05,
"grad_norm": 0.5095017552375793,
"learning_rate": 0.000199984759996952,
"loss": 1.7386,
"step": 764
},
{
"epoch": 7.65e-05,
"grad_norm": 0.4217730760574341,
"learning_rate": 0.000199984739996948,
"loss": 1.5239,
"step": 765
},
{
"epoch": 7.66e-05,
"grad_norm": 0.4295751452445984,
"learning_rate": 0.000199984719996944,
"loss": 1.7376,
"step": 766
},
{
"epoch": 7.67e-05,
"grad_norm": 0.560659646987915,
"learning_rate": 0.00019998469999694,
"loss": 1.7936,
"step": 767
},
{
"epoch": 7.68e-05,
"grad_norm": 0.47901651263237,
"learning_rate": 0.000199984679996936,
"loss": 1.9015,
"step": 768
},
{
"epoch": 7.69e-05,
"grad_norm": 0.5986664891242981,
"learning_rate": 0.000199984659996932,
"loss": 1.5583,
"step": 769
},
{
"epoch": 7.7e-05,
"grad_norm": 0.5027921795845032,
"learning_rate": 0.00019998463999692801,
"loss": 2.1018,
"step": 770
},
{
"epoch": 7.71e-05,
"grad_norm": 0.5863832831382751,
"learning_rate": 0.00019998461999692403,
"loss": 1.7039,
"step": 771
},
{
"epoch": 7.72e-05,
"grad_norm": 0.6483323574066162,
"learning_rate": 0.00019998459999692001,
"loss": 1.9763,
"step": 772
},
{
"epoch": 7.73e-05,
"grad_norm": 0.5346373915672302,
"learning_rate": 0.000199984579996916,
"loss": 1.6734,
"step": 773
},
{
"epoch": 7.74e-05,
"grad_norm": 0.4236164391040802,
"learning_rate": 0.00019998455999691202,
"loss": 1.6433,
"step": 774
},
{
"epoch": 7.75e-05,
"grad_norm": 0.46211886405944824,
"learning_rate": 0.000199984539996908,
"loss": 1.6719,
"step": 775
},
{
"epoch": 7.76e-05,
"grad_norm": 0.39084675908088684,
"learning_rate": 0.00019998451999690402,
"loss": 1.38,
"step": 776
},
{
"epoch": 7.77e-05,
"grad_norm": 0.5234657526016235,
"learning_rate": 0.0001999844999969,
"loss": 1.7214,
"step": 777
},
{
"epoch": 7.78e-05,
"grad_norm": 0.45347291231155396,
"learning_rate": 0.000199984479996896,
"loss": 1.6419,
"step": 778
},
{
"epoch": 7.79e-05,
"grad_norm": 0.42318612337112427,
"learning_rate": 0.000199984459996892,
"loss": 1.5282,
"step": 779
},
{
"epoch": 7.8e-05,
"grad_norm": 0.3687291145324707,
"learning_rate": 0.00019998443999688802,
"loss": 1.6526,
"step": 780
},
{
"epoch": 7.81e-05,
"grad_norm": 0.6593069434165955,
"learning_rate": 0.000199984419996884,
"loss": 1.7376,
"step": 781
},
{
"epoch": 7.82e-05,
"grad_norm": 0.5323063135147095,
"learning_rate": 0.00019998439999688002,
"loss": 1.6746,
"step": 782
},
{
"epoch": 7.83e-05,
"grad_norm": 0.535099983215332,
"learning_rate": 0.000199984379996876,
"loss": 1.7769,
"step": 783
},
{
"epoch": 7.84e-05,
"grad_norm": 0.49146050214767456,
"learning_rate": 0.00019998435999687202,
"loss": 1.7294,
"step": 784
},
{
"epoch": 7.85e-05,
"grad_norm": 0.3785257935523987,
"learning_rate": 0.000199984339996868,
"loss": 1.4755,
"step": 785
},
{
"epoch": 7.86e-05,
"grad_norm": 0.3635449707508087,
"learning_rate": 0.000199984319996864,
"loss": 1.5614,
"step": 786
},
{
"epoch": 7.87e-05,
"grad_norm": 0.44353702664375305,
"learning_rate": 0.00019998429999686,
"loss": 1.6669,
"step": 787
},
{
"epoch": 7.88e-05,
"grad_norm": 0.5304941534996033,
"learning_rate": 0.000199984279996856,
"loss": 1.3839,
"step": 788
},
{
"epoch": 7.89e-05,
"grad_norm": 0.4795816242694855,
"learning_rate": 0.000199984259996852,
"loss": 1.507,
"step": 789
},
{
"epoch": 7.9e-05,
"grad_norm": 0.4223780035972595,
"learning_rate": 0.000199984239996848,
"loss": 1.7837,
"step": 790
},
{
"epoch": 7.91e-05,
"grad_norm": 0.4522766172885895,
"learning_rate": 0.000199984219996844,
"loss": 1.628,
"step": 791
},
{
"epoch": 7.92e-05,
"grad_norm": 0.49307534098625183,
"learning_rate": 0.00019998419999684003,
"loss": 1.6543,
"step": 792
},
{
"epoch": 7.93e-05,
"grad_norm": 0.42226704955101013,
"learning_rate": 0.00019998417999683601,
"loss": 1.6927,
"step": 793
},
{
"epoch": 7.94e-05,
"grad_norm": 0.5577994585037231,
"learning_rate": 0.000199984159996832,
"loss": 2.0899,
"step": 794
},
{
"epoch": 7.95e-05,
"grad_norm": 0.3516179621219635,
"learning_rate": 0.00019998413999682802,
"loss": 1.4305,
"step": 795
},
{
"epoch": 7.96e-05,
"grad_norm": 0.297841340303421,
"learning_rate": 0.000199984119996824,
"loss": 1.4874,
"step": 796
},
{
"epoch": 7.97e-05,
"grad_norm": 0.3547735810279846,
"learning_rate": 0.00019998409999682002,
"loss": 1.6441,
"step": 797
},
{
"epoch": 7.98e-05,
"grad_norm": 0.49142611026763916,
"learning_rate": 0.000199984079996816,
"loss": 1.697,
"step": 798
},
{
"epoch": 7.99e-05,
"grad_norm": 0.38856208324432373,
"learning_rate": 0.000199984059996812,
"loss": 1.4103,
"step": 799
},
{
"epoch": 8e-05,
"grad_norm": 0.36545267701148987,
"learning_rate": 0.000199984039996808,
"loss": 1.3663,
"step": 800
},
{
"epoch": 8.01e-05,
"grad_norm": 0.44038134813308716,
"learning_rate": 0.00019998401999680402,
"loss": 1.5611,
"step": 801
},
{
"epoch": 8.02e-05,
"grad_norm": 0.7710018754005432,
"learning_rate": 0.00019998399999680003,
"loss": 1.6782,
"step": 802
},
{
"epoch": 8.03e-05,
"grad_norm": 0.7384598255157471,
"learning_rate": 0.00019998397999679602,
"loss": 1.8299,
"step": 803
},
{
"epoch": 8.04e-05,
"grad_norm": 0.5734308958053589,
"learning_rate": 0.000199983959996792,
"loss": 1.9115,
"step": 804
},
{
"epoch": 8.05e-05,
"grad_norm": 0.772005021572113,
"learning_rate": 0.00019998393999678802,
"loss": 2.0859,
"step": 805
},
{
"epoch": 8.06e-05,
"grad_norm": 0.5809271931648254,
"learning_rate": 0.000199983919996784,
"loss": 1.8312,
"step": 806
},
{
"epoch": 8.07e-05,
"grad_norm": 0.45767372846603394,
"learning_rate": 0.00019998389999678,
"loss": 2.1267,
"step": 807
},
{
"epoch": 8.08e-05,
"grad_norm": 0.5599010586738586,
"learning_rate": 0.000199983879996776,
"loss": 1.7318,
"step": 808
},
{
"epoch": 8.09e-05,
"grad_norm": 0.3303239941596985,
"learning_rate": 0.000199983859996772,
"loss": 1.6692,
"step": 809
},
{
"epoch": 8.1e-05,
"grad_norm": 0.5346744060516357,
"learning_rate": 0.000199983839996768,
"loss": 1.7508,
"step": 810
},
{
"epoch": 8.11e-05,
"grad_norm": 0.5153371095657349,
"learning_rate": 0.000199983819996764,
"loss": 1.5302,
"step": 811
},
{
"epoch": 8.12e-05,
"grad_norm": 0.5351496934890747,
"learning_rate": 0.00019998379999676,
"loss": 1.5098,
"step": 812
},
{
"epoch": 8.13e-05,
"grad_norm": 0.4825340509414673,
"learning_rate": 0.00019998377999675603,
"loss": 1.5487,
"step": 813
},
{
"epoch": 8.14e-05,
"grad_norm": 0.34448960423469543,
"learning_rate": 0.000199983759996752,
"loss": 1.3178,
"step": 814
},
{
"epoch": 8.15e-05,
"grad_norm": 0.4659689664840698,
"learning_rate": 0.000199983739996748,
"loss": 1.902,
"step": 815
},
{
"epoch": 8.16e-05,
"grad_norm": 0.4726375341415405,
"learning_rate": 0.00019998371999674401,
"loss": 1.5405,
"step": 816
},
{
"epoch": 8.17e-05,
"grad_norm": 0.43636080622673035,
"learning_rate": 0.00019998369999674,
"loss": 1.3874,
"step": 817
},
{
"epoch": 8.18e-05,
"grad_norm": 0.33488962054252625,
"learning_rate": 0.00019998367999673602,
"loss": 1.5379,
"step": 818
},
{
"epoch": 8.19e-05,
"grad_norm": 0.3343623876571655,
"learning_rate": 0.000199983659996732,
"loss": 1.3486,
"step": 819
},
{
"epoch": 8.2e-05,
"grad_norm": 0.38095343112945557,
"learning_rate": 0.000199983639996728,
"loss": 1.4693,
"step": 820
},
{
"epoch": 8.21e-05,
"grad_norm": 0.43350541591644287,
"learning_rate": 0.000199983619996724,
"loss": 1.4636,
"step": 821
},
{
"epoch": 8.22e-05,
"grad_norm": 0.3597382605075836,
"learning_rate": 0.00019998359999672002,
"loss": 1.4867,
"step": 822
},
{
"epoch": 8.23e-05,
"grad_norm": 0.3515518009662628,
"learning_rate": 0.00019998357999671603,
"loss": 1.4884,
"step": 823
},
{
"epoch": 8.24e-05,
"grad_norm": 0.3218376040458679,
"learning_rate": 0.00019998355999671202,
"loss": 1.5593,
"step": 824
},
{
"epoch": 8.25e-05,
"grad_norm": 0.36228442192077637,
"learning_rate": 0.000199983539996708,
"loss": 1.4859,
"step": 825
},
{
"epoch": 8.26e-05,
"grad_norm": 0.40394237637519836,
"learning_rate": 0.00019998351999670402,
"loss": 1.5118,
"step": 826
},
{
"epoch": 8.27e-05,
"grad_norm": 0.38098976016044617,
"learning_rate": 0.0001999834999967,
"loss": 1.6447,
"step": 827
},
{
"epoch": 8.28e-05,
"grad_norm": 0.34835758805274963,
"learning_rate": 0.000199983479996696,
"loss": 1.7321,
"step": 828
},
{
"epoch": 8.29e-05,
"grad_norm": 0.5062150359153748,
"learning_rate": 0.000199983459996692,
"loss": 1.5976,
"step": 829
},
{
"epoch": 8.3e-05,
"grad_norm": 0.32351887226104736,
"learning_rate": 0.000199983439996688,
"loss": 1.4824,
"step": 830
},
{
"epoch": 8.31e-05,
"grad_norm": 0.3703930377960205,
"learning_rate": 0.000199983419996684,
"loss": 1.5016,
"step": 831
},
{
"epoch": 8.32e-05,
"grad_norm": 0.3913329243659973,
"learning_rate": 0.00019998339999668,
"loss": 1.7568,
"step": 832
},
{
"epoch": 8.33e-05,
"grad_norm": 0.42426303029060364,
"learning_rate": 0.000199983379996676,
"loss": 1.8039,
"step": 833
},
{
"epoch": 8.34e-05,
"grad_norm": 0.471743106842041,
"learning_rate": 0.00019998335999667203,
"loss": 1.5872,
"step": 834
},
{
"epoch": 8.35e-05,
"grad_norm": 0.45813998579978943,
"learning_rate": 0.000199983339996668,
"loss": 1.567,
"step": 835
},
{
"epoch": 8.36e-05,
"grad_norm": 0.5768526196479797,
"learning_rate": 0.000199983319996664,
"loss": 1.8451,
"step": 836
},
{
"epoch": 8.37e-05,
"grad_norm": 0.50163733959198,
"learning_rate": 0.00019998329999666001,
"loss": 1.7022,
"step": 837
},
{
"epoch": 8.38e-05,
"grad_norm": 0.48361361026763916,
"learning_rate": 0.000199983279996656,
"loss": 1.6652,
"step": 838
},
{
"epoch": 8.39e-05,
"grad_norm": 0.4683377146720886,
"learning_rate": 0.00019998325999665201,
"loss": 1.7222,
"step": 839
},
{
"epoch": 8.4e-05,
"grad_norm": 0.4751068651676178,
"learning_rate": 0.000199983239996648,
"loss": 1.5728,
"step": 840
},
{
"epoch": 8.41e-05,
"grad_norm": 0.3111203610897064,
"learning_rate": 0.000199983219996644,
"loss": 1.5302,
"step": 841
},
{
"epoch": 8.42e-05,
"grad_norm": 0.5216323733329773,
"learning_rate": 0.00019998319999664,
"loss": 2.0252,
"step": 842
},
{
"epoch": 8.43e-05,
"grad_norm": 0.5294687151908875,
"learning_rate": 0.00019998317999663602,
"loss": 1.6372,
"step": 843
},
{
"epoch": 8.44e-05,
"grad_norm": 0.4909975528717041,
"learning_rate": 0.00019998315999663203,
"loss": 1.7134,
"step": 844
},
{
"epoch": 8.45e-05,
"grad_norm": 0.6667630672454834,
"learning_rate": 0.00019998313999662802,
"loss": 1.9076,
"step": 845
},
{
"epoch": 8.46e-05,
"grad_norm": 0.41149306297302246,
"learning_rate": 0.000199983119996624,
"loss": 1.8221,
"step": 846
},
{
"epoch": 8.47e-05,
"grad_norm": 0.5140305757522583,
"learning_rate": 0.00019998309999662002,
"loss": 1.5774,
"step": 847
},
{
"epoch": 8.48e-05,
"grad_norm": 0.4842669665813446,
"learning_rate": 0.000199983079996616,
"loss": 1.5759,
"step": 848
},
{
"epoch": 8.49e-05,
"grad_norm": 0.7274694442749023,
"learning_rate": 0.000199983059996612,
"loss": 1.8425,
"step": 849
},
{
"epoch": 8.5e-05,
"grad_norm": 0.4844256639480591,
"learning_rate": 0.000199983039996608,
"loss": 1.6749,
"step": 850
},
{
"epoch": 8.51e-05,
"grad_norm": 0.5181903839111328,
"learning_rate": 0.000199983019996604,
"loss": 1.5679,
"step": 851
},
{
"epoch": 8.52e-05,
"grad_norm": 0.5008674263954163,
"learning_rate": 0.0001999829999966,
"loss": 1.7772,
"step": 852
},
{
"epoch": 8.53e-05,
"grad_norm": 0.41132569313049316,
"learning_rate": 0.000199982979996596,
"loss": 1.3939,
"step": 853
},
{
"epoch": 8.54e-05,
"grad_norm": 0.5511699914932251,
"learning_rate": 0.000199982959996592,
"loss": 1.773,
"step": 854
},
{
"epoch": 8.55e-05,
"grad_norm": 0.612603485584259,
"learning_rate": 0.00019998293999658802,
"loss": 1.7383,
"step": 855
},
{
"epoch": 8.56e-05,
"grad_norm": 0.3903108239173889,
"learning_rate": 0.000199982919996584,
"loss": 1.6671,
"step": 856
},
{
"epoch": 8.57e-05,
"grad_norm": 0.36239323019981384,
"learning_rate": 0.00019998289999658003,
"loss": 1.4149,
"step": 857
},
{
"epoch": 8.58e-05,
"grad_norm": 0.401780903339386,
"learning_rate": 0.000199982879996576,
"loss": 1.5716,
"step": 858
},
{
"epoch": 8.59e-05,
"grad_norm": 0.38875460624694824,
"learning_rate": 0.000199982859996572,
"loss": 1.3998,
"step": 859
},
{
"epoch": 8.6e-05,
"grad_norm": 0.509942352771759,
"learning_rate": 0.00019998283999656801,
"loss": 1.6135,
"step": 860
},
{
"epoch": 8.61e-05,
"grad_norm": 0.7268967628479004,
"learning_rate": 0.000199982819996564,
"loss": 1.8168,
"step": 861
},
{
"epoch": 8.62e-05,
"grad_norm": 0.5064080357551575,
"learning_rate": 0.00019998279999656,
"loss": 1.7311,
"step": 862
},
{
"epoch": 8.63e-05,
"grad_norm": 0.45123475790023804,
"learning_rate": 0.000199982779996556,
"loss": 1.6807,
"step": 863
},
{
"epoch": 8.64e-05,
"grad_norm": 0.7958595156669617,
"learning_rate": 0.00019998275999655202,
"loss": 2.1956,
"step": 864
},
{
"epoch": 8.65e-05,
"grad_norm": 0.7676238417625427,
"learning_rate": 0.00019998273999654803,
"loss": 2.3658,
"step": 865
},
{
"epoch": 8.66e-05,
"grad_norm": 0.790037989616394,
"learning_rate": 0.00019998271999654402,
"loss": 2.3108,
"step": 866
},
{
"epoch": 8.67e-05,
"grad_norm": 0.6146484613418579,
"learning_rate": 0.00019998269999654,
"loss": 2.3599,
"step": 867
},
{
"epoch": 8.68e-05,
"grad_norm": 0.46311700344085693,
"learning_rate": 0.00019998267999653602,
"loss": 1.6645,
"step": 868
},
{
"epoch": 8.69e-05,
"grad_norm": 1.0113927125930786,
"learning_rate": 0.000199982659996532,
"loss": 1.7853,
"step": 869
},
{
"epoch": 8.7e-05,
"grad_norm": 0.3932603895664215,
"learning_rate": 0.000199982639996528,
"loss": 1.9129,
"step": 870
},
{
"epoch": 8.71e-05,
"grad_norm": 0.4138290584087372,
"learning_rate": 0.000199982619996524,
"loss": 1.7463,
"step": 871
},
{
"epoch": 8.72e-05,
"grad_norm": 0.9434233903884888,
"learning_rate": 0.00019998259999652,
"loss": 1.8279,
"step": 872
},
{
"epoch": 8.73e-05,
"grad_norm": 0.41251885890960693,
"learning_rate": 0.000199982579996516,
"loss": 1.7345,
"step": 873
},
{
"epoch": 8.74e-05,
"grad_norm": 0.4774368703365326,
"learning_rate": 0.000199982559996512,
"loss": 1.5806,
"step": 874
},
{
"epoch": 8.75e-05,
"grad_norm": 0.6286354660987854,
"learning_rate": 0.000199982539996508,
"loss": 1.81,
"step": 875
},
{
"epoch": 8.76e-05,
"grad_norm": 0.4657753109931946,
"learning_rate": 0.00019998251999650402,
"loss": 1.4438,
"step": 876
},
{
"epoch": 8.77e-05,
"grad_norm": 0.4174712598323822,
"learning_rate": 0.0001999824999965,
"loss": 1.8712,
"step": 877
},
{
"epoch": 8.78e-05,
"grad_norm": 0.482683002948761,
"learning_rate": 0.00019998247999649602,
"loss": 1.9388,
"step": 878
},
{
"epoch": 8.79e-05,
"grad_norm": 0.4749564826488495,
"learning_rate": 0.000199982459996492,
"loss": 1.5209,
"step": 879
},
{
"epoch": 8.8e-05,
"grad_norm": 0.5331429839134216,
"learning_rate": 0.000199982439996488,
"loss": 1.7297,
"step": 880
},
{
"epoch": 8.81e-05,
"grad_norm": 0.5091459155082703,
"learning_rate": 0.000199982419996484,
"loss": 1.7435,
"step": 881
},
{
"epoch": 8.82e-05,
"grad_norm": 0.48349353671073914,
"learning_rate": 0.00019998239999648,
"loss": 1.9255,
"step": 882
},
{
"epoch": 8.83e-05,
"grad_norm": 0.6858983039855957,
"learning_rate": 0.000199982379996476,
"loss": 1.7395,
"step": 883
},
{
"epoch": 8.84e-05,
"grad_norm": 0.6871190667152405,
"learning_rate": 0.000199982359996472,
"loss": 2.0112,
"step": 884
},
{
"epoch": 8.85e-05,
"grad_norm": 0.46113747358322144,
"learning_rate": 0.00019998233999646802,
"loss": 1.5549,
"step": 885
},
{
"epoch": 8.86e-05,
"grad_norm": 0.46943891048431396,
"learning_rate": 0.00019998231999646403,
"loss": 1.9307,
"step": 886
},
{
"epoch": 8.87e-05,
"grad_norm": 0.38158735632896423,
"learning_rate": 0.00019998229999646002,
"loss": 1.6363,
"step": 887
},
{
"epoch": 8.88e-05,
"grad_norm": 0.927079975605011,
"learning_rate": 0.000199982279996456,
"loss": 2.0591,
"step": 888
},
{
"epoch": 8.89e-05,
"grad_norm": 0.6291943192481995,
"learning_rate": 0.00019998225999645202,
"loss": 2.2651,
"step": 889
},
{
"epoch": 8.9e-05,
"grad_norm": 0.8447884321212769,
"learning_rate": 0.000199982239996448,
"loss": 2.5662,
"step": 890
},
{
"epoch": 8.91e-05,
"grad_norm": 0.5097450017929077,
"learning_rate": 0.000199982219996444,
"loss": 2.1692,
"step": 891
},
{
"epoch": 8.92e-05,
"grad_norm": 0.39223480224609375,
"learning_rate": 0.00019998219999644,
"loss": 1.5118,
"step": 892
},
{
"epoch": 8.93e-05,
"grad_norm": 0.37001553177833557,
"learning_rate": 0.000199982179996436,
"loss": 1.6449,
"step": 893
},
{
"epoch": 8.94e-05,
"grad_norm": 0.4028787910938263,
"learning_rate": 0.000199982159996432,
"loss": 1.6948,
"step": 894
},
{
"epoch": 8.95e-05,
"grad_norm": 0.4419090449810028,
"learning_rate": 0.00019998213999642802,
"loss": 1.768,
"step": 895
},
{
"epoch": 8.96e-05,
"grad_norm": 0.6315431594848633,
"learning_rate": 0.000199982119996424,
"loss": 1.8281,
"step": 896
},
{
"epoch": 8.97e-05,
"grad_norm": 0.48787543177604675,
"learning_rate": 0.00019998209999642002,
"loss": 2.0626,
"step": 897
},
{
"epoch": 8.98e-05,
"grad_norm": 0.4552463889122009,
"learning_rate": 0.000199982079996416,
"loss": 2.0431,
"step": 898
},
{
"epoch": 8.99e-05,
"grad_norm": 0.3850920498371124,
"learning_rate": 0.00019998205999641202,
"loss": 1.6186,
"step": 899
},
{
"epoch": 9e-05,
"grad_norm": 0.4540216326713562,
"learning_rate": 0.000199982039996408,
"loss": 1.4792,
"step": 900
},
{
"epoch": 9.01e-05,
"grad_norm": 0.45766547322273254,
"learning_rate": 0.000199982019996404,
"loss": 1.7071,
"step": 901
},
{
"epoch": 9.02e-05,
"grad_norm": 0.41368016600608826,
"learning_rate": 0.0001999819999964,
"loss": 1.5803,
"step": 902
},
{
"epoch": 9.03e-05,
"grad_norm": 0.578170895576477,
"learning_rate": 0.000199981979996396,
"loss": 1.8071,
"step": 903
},
{
"epoch": 9.04e-05,
"grad_norm": 0.48246729373931885,
"learning_rate": 0.00019998195999639199,
"loss": 1.9709,
"step": 904
},
{
"epoch": 9.05e-05,
"grad_norm": 0.5108683109283447,
"learning_rate": 0.000199981939996388,
"loss": 1.9579,
"step": 905
},
{
"epoch": 9.06e-05,
"grad_norm": 0.39800968766212463,
"learning_rate": 0.00019998191999638401,
"loss": 1.4963,
"step": 906
},
{
"epoch": 9.07e-05,
"grad_norm": 0.5096713304519653,
"learning_rate": 0.00019998189999638003,
"loss": 1.754,
"step": 907
},
{
"epoch": 9.08e-05,
"grad_norm": 0.4445015788078308,
"learning_rate": 0.00019998187999637602,
"loss": 1.4638,
"step": 908
},
{
"epoch": 9.09e-05,
"grad_norm": 0.614779531955719,
"learning_rate": 0.000199981859996372,
"loss": 2.0578,
"step": 909
},
{
"epoch": 9.1e-05,
"grad_norm": 0.8481956720352173,
"learning_rate": 0.00019998183999636802,
"loss": 2.0232,
"step": 910
},
{
"epoch": 9.11e-05,
"grad_norm": 0.567804753780365,
"learning_rate": 0.000199981819996364,
"loss": 1.6839,
"step": 911
},
{
"epoch": 9.12e-05,
"grad_norm": 0.3584451377391815,
"learning_rate": 0.00019998179999636002,
"loss": 1.4782,
"step": 912
},
{
"epoch": 9.13e-05,
"grad_norm": 0.5719729065895081,
"learning_rate": 0.000199981779996356,
"loss": 1.7138,
"step": 913
},
{
"epoch": 9.14e-05,
"grad_norm": 0.4246422350406647,
"learning_rate": 0.000199981759996352,
"loss": 1.5059,
"step": 914
},
{
"epoch": 9.15e-05,
"grad_norm": 0.4706515967845917,
"learning_rate": 0.000199981739996348,
"loss": 1.6565,
"step": 915
},
{
"epoch": 9.16e-05,
"grad_norm": 0.42243319749832153,
"learning_rate": 0.00019998171999634402,
"loss": 1.7737,
"step": 916
},
{
"epoch": 9.17e-05,
"grad_norm": 0.4608387351036072,
"learning_rate": 0.00019998169999634,
"loss": 1.638,
"step": 917
},
{
"epoch": 9.18e-05,
"grad_norm": 0.42476144433021545,
"learning_rate": 0.00019998167999633602,
"loss": 1.3934,
"step": 918
},
{
"epoch": 9.19e-05,
"grad_norm": 0.3917334973812103,
"learning_rate": 0.000199981659996332,
"loss": 1.5301,
"step": 919
},
{
"epoch": 9.2e-05,
"grad_norm": 0.5390530824661255,
"learning_rate": 0.00019998163999632802,
"loss": 1.9693,
"step": 920
},
{
"epoch": 9.21e-05,
"grad_norm": 0.5685298442840576,
"learning_rate": 0.000199981619996324,
"loss": 1.8769,
"step": 921
},
{
"epoch": 9.22e-05,
"grad_norm": 0.512224018573761,
"learning_rate": 0.00019998159999632,
"loss": 1.7904,
"step": 922
},
{
"epoch": 9.23e-05,
"grad_norm": 0.5401352047920227,
"learning_rate": 0.000199981579996316,
"loss": 1.9239,
"step": 923
},
{
"epoch": 9.24e-05,
"grad_norm": 0.518602192401886,
"learning_rate": 0.000199981559996312,
"loss": 1.8672,
"step": 924
},
{
"epoch": 9.25e-05,
"grad_norm": 0.45904114842414856,
"learning_rate": 0.00019998153999630799,
"loss": 1.9455,
"step": 925
},
{
"epoch": 9.26e-05,
"grad_norm": 0.4305168390274048,
"learning_rate": 0.000199981519996304,
"loss": 1.5336,
"step": 926
},
{
"epoch": 9.27e-05,
"grad_norm": 0.47211557626724243,
"learning_rate": 0.00019998149999630001,
"loss": 1.6302,
"step": 927
},
{
"epoch": 9.28e-05,
"grad_norm": 0.4305603802204132,
"learning_rate": 0.00019998147999629603,
"loss": 1.9242,
"step": 928
},
{
"epoch": 9.29e-05,
"grad_norm": 0.3603437840938568,
"learning_rate": 0.00019998145999629202,
"loss": 1.7635,
"step": 929
},
{
"epoch": 9.3e-05,
"grad_norm": 0.3755441904067993,
"learning_rate": 0.000199981439996288,
"loss": 1.4896,
"step": 930
},
{
"epoch": 9.31e-05,
"grad_norm": 0.4417133629322052,
"learning_rate": 0.00019998141999628402,
"loss": 1.668,
"step": 931
},
{
"epoch": 9.32e-05,
"grad_norm": 0.5889492034912109,
"learning_rate": 0.00019998139999628,
"loss": 1.8547,
"step": 932
},
{
"epoch": 9.33e-05,
"grad_norm": 0.557550311088562,
"learning_rate": 0.00019998137999627602,
"loss": 1.8981,
"step": 933
},
{
"epoch": 9.34e-05,
"grad_norm": 0.6934526562690735,
"learning_rate": 0.000199981359996272,
"loss": 1.9611,
"step": 934
},
{
"epoch": 9.35e-05,
"grad_norm": 0.5407524704933167,
"learning_rate": 0.000199981339996268,
"loss": 1.8305,
"step": 935
},
{
"epoch": 9.36e-05,
"grad_norm": 0.37491288781166077,
"learning_rate": 0.000199981319996264,
"loss": 1.5298,
"step": 936
},
{
"epoch": 9.37e-05,
"grad_norm": 0.44688770174980164,
"learning_rate": 0.00019998129999626002,
"loss": 1.5508,
"step": 937
},
{
"epoch": 9.38e-05,
"grad_norm": 0.4605537950992584,
"learning_rate": 0.000199981279996256,
"loss": 1.5681,
"step": 938
},
{
"epoch": 9.39e-05,
"grad_norm": 0.5424385070800781,
"learning_rate": 0.00019998125999625202,
"loss": 1.4363,
"step": 939
},
{
"epoch": 9.4e-05,
"grad_norm": 0.47160372138023376,
"learning_rate": 0.000199981239996248,
"loss": 1.5312,
"step": 940
},
{
"epoch": 9.41e-05,
"grad_norm": 0.3767258822917938,
"learning_rate": 0.00019998121999624402,
"loss": 1.6312,
"step": 941
},
{
"epoch": 9.42e-05,
"grad_norm": 0.34652721881866455,
"learning_rate": 0.00019998119999624,
"loss": 1.4286,
"step": 942
},
{
"epoch": 9.43e-05,
"grad_norm": 0.4472532868385315,
"learning_rate": 0.000199981179996236,
"loss": 1.4302,
"step": 943
},
{
"epoch": 9.44e-05,
"grad_norm": 0.4367288053035736,
"learning_rate": 0.000199981159996232,
"loss": 1.4906,
"step": 944
},
{
"epoch": 9.45e-05,
"grad_norm": 0.4123404622077942,
"learning_rate": 0.000199981139996228,
"loss": 1.617,
"step": 945
},
{
"epoch": 9.46e-05,
"grad_norm": 0.3625604510307312,
"learning_rate": 0.00019998111999622398,
"loss": 1.8216,
"step": 946
},
{
"epoch": 9.47e-05,
"grad_norm": 0.5087842345237732,
"learning_rate": 0.00019998109999622,
"loss": 1.5865,
"step": 947
},
{
"epoch": 9.48e-05,
"grad_norm": 0.4849156141281128,
"learning_rate": 0.000199981079996216,
"loss": 1.9806,
"step": 948
},
{
"epoch": 9.49e-05,
"grad_norm": 0.4333500862121582,
"learning_rate": 0.00019998105999621203,
"loss": 1.9192,
"step": 949
},
{
"epoch": 9.5e-05,
"grad_norm": 0.41762951016426086,
"learning_rate": 0.00019998103999620801,
"loss": 1.4463,
"step": 950
},
{
"epoch": 9.51e-05,
"grad_norm": 0.7506889700889587,
"learning_rate": 0.000199981019996204,
"loss": 1.9247,
"step": 951
},
{
"epoch": 9.52e-05,
"grad_norm": 0.44507715106010437,
"learning_rate": 0.00019998099999620002,
"loss": 1.6508,
"step": 952
},
{
"epoch": 9.53e-05,
"grad_norm": 0.508630096912384,
"learning_rate": 0.000199980979996196,
"loss": 1.7669,
"step": 953
},
{
"epoch": 9.54e-05,
"grad_norm": 0.4745084047317505,
"learning_rate": 0.00019998095999619202,
"loss": 1.731,
"step": 954
},
{
"epoch": 9.55e-05,
"grad_norm": 0.525655210018158,
"learning_rate": 0.000199980939996188,
"loss": 1.5893,
"step": 955
},
{
"epoch": 9.56e-05,
"grad_norm": 0.5069213509559631,
"learning_rate": 0.000199980919996184,
"loss": 1.7417,
"step": 956
},
{
"epoch": 9.57e-05,
"grad_norm": 0.6264841556549072,
"learning_rate": 0.00019998089999618,
"loss": 1.9531,
"step": 957
},
{
"epoch": 9.58e-05,
"grad_norm": 0.37628957629203796,
"learning_rate": 0.00019998087999617602,
"loss": 1.8718,
"step": 958
},
{
"epoch": 9.59e-05,
"grad_norm": 0.5268929600715637,
"learning_rate": 0.000199980859996172,
"loss": 1.9058,
"step": 959
},
{
"epoch": 9.6e-05,
"grad_norm": 0.4998350739479065,
"learning_rate": 0.00019998083999616802,
"loss": 2.0525,
"step": 960
},
{
"epoch": 9.61e-05,
"grad_norm": 0.5017493963241577,
"learning_rate": 0.000199980819996164,
"loss": 1.8498,
"step": 961
},
{
"epoch": 9.62e-05,
"grad_norm": 0.6015520691871643,
"learning_rate": 0.00019998079999616002,
"loss": 1.7788,
"step": 962
},
{
"epoch": 9.63e-05,
"grad_norm": 0.43826499581336975,
"learning_rate": 0.000199980779996156,
"loss": 1.5677,
"step": 963
},
{
"epoch": 9.64e-05,
"grad_norm": 0.5457808375358582,
"learning_rate": 0.000199980759996152,
"loss": 1.8116,
"step": 964
},
{
"epoch": 9.65e-05,
"grad_norm": 0.6199738383293152,
"learning_rate": 0.000199980739996148,
"loss": 1.9218,
"step": 965
},
{
"epoch": 9.66e-05,
"grad_norm": 0.4498477280139923,
"learning_rate": 0.000199980719996144,
"loss": 1.6316,
"step": 966
},
{
"epoch": 9.67e-05,
"grad_norm": 0.9297443628311157,
"learning_rate": 0.00019998069999614,
"loss": 2.3163,
"step": 967
},
{
"epoch": 9.68e-05,
"grad_norm": 0.43629059195518494,
"learning_rate": 0.000199980679996136,
"loss": 1.7666,
"step": 968
},
{
"epoch": 9.69e-05,
"grad_norm": 0.6103280782699585,
"learning_rate": 0.000199980659996132,
"loss": 1.7923,
"step": 969
},
{
"epoch": 9.7e-05,
"grad_norm": 0.5318375825881958,
"learning_rate": 0.00019998063999612803,
"loss": 1.3645,
"step": 970
},
{
"epoch": 9.71e-05,
"grad_norm": 0.7005080580711365,
"learning_rate": 0.000199980619996124,
"loss": 1.8196,
"step": 971
},
{
"epoch": 9.72e-05,
"grad_norm": 0.3979056179523468,
"learning_rate": 0.00019998059999612,
"loss": 1.4849,
"step": 972
},
{
"epoch": 9.73e-05,
"grad_norm": 0.41085705161094666,
"learning_rate": 0.00019998057999611601,
"loss": 1.9099,
"step": 973
},
{
"epoch": 9.74e-05,
"grad_norm": 0.48656830191612244,
"learning_rate": 0.000199980559996112,
"loss": 1.968,
"step": 974
},
{
"epoch": 9.75e-05,
"grad_norm": 0.42308491468429565,
"learning_rate": 0.00019998053999610802,
"loss": 1.7212,
"step": 975
},
{
"epoch": 9.76e-05,
"grad_norm": 0.6442344784736633,
"learning_rate": 0.000199980519996104,
"loss": 1.912,
"step": 976
},
{
"epoch": 9.77e-05,
"grad_norm": 0.49754881858825684,
"learning_rate": 0.0001999804999961,
"loss": 1.5851,
"step": 977
},
{
"epoch": 9.78e-05,
"grad_norm": 0.6168963313102722,
"learning_rate": 0.000199980479996096,
"loss": 1.6406,
"step": 978
},
{
"epoch": 9.79e-05,
"grad_norm": 0.5659937858581543,
"learning_rate": 0.00019998045999609202,
"loss": 1.6565,
"step": 979
},
{
"epoch": 9.8e-05,
"grad_norm": 0.9934679865837097,
"learning_rate": 0.000199980439996088,
"loss": 2.0027,
"step": 980
},
{
"epoch": 9.81e-05,
"grad_norm": 0.4356830418109894,
"learning_rate": 0.00019998041999608402,
"loss": 1.5286,
"step": 981
},
{
"epoch": 9.82e-05,
"grad_norm": 0.7636138796806335,
"learning_rate": 0.00019998039999608,
"loss": 1.7567,
"step": 982
},
{
"epoch": 9.83e-05,
"grad_norm": 0.6769589185714722,
"learning_rate": 0.00019998037999607602,
"loss": 2.175,
"step": 983
},
{
"epoch": 9.84e-05,
"grad_norm": 0.6455519199371338,
"learning_rate": 0.000199980359996072,
"loss": 2.3659,
"step": 984
},
{
"epoch": 9.85e-05,
"grad_norm": 0.6768479347229004,
"learning_rate": 0.000199980339996068,
"loss": 2.1482,
"step": 985
},
{
"epoch": 9.86e-05,
"grad_norm": 0.47377318143844604,
"learning_rate": 0.000199980319996064,
"loss": 2.3847,
"step": 986
},
{
"epoch": 9.87e-05,
"grad_norm": 0.5339648723602295,
"learning_rate": 0.00019998029999606,
"loss": 2.0964,
"step": 987
},
{
"epoch": 9.88e-05,
"grad_norm": 0.45153555274009705,
"learning_rate": 0.000199980279996056,
"loss": 1.5722,
"step": 988
},
{
"epoch": 9.89e-05,
"grad_norm": 0.475788414478302,
"learning_rate": 0.000199980259996052,
"loss": 1.5339,
"step": 989
},
{
"epoch": 9.9e-05,
"grad_norm": 0.4350284934043884,
"learning_rate": 0.000199980239996048,
"loss": 1.4488,
"step": 990
},
{
"epoch": 9.91e-05,
"grad_norm": 0.39541056752204895,
"learning_rate": 0.00019998021999604403,
"loss": 1.4495,
"step": 991
},
{
"epoch": 9.92e-05,
"grad_norm": 0.6306726336479187,
"learning_rate": 0.00019998019999604,
"loss": 1.9772,
"step": 992
},
{
"epoch": 9.93e-05,
"grad_norm": 0.6301535964012146,
"learning_rate": 0.000199980179996036,
"loss": 1.9076,
"step": 993
},
{
"epoch": 9.94e-05,
"grad_norm": 0.3772251009941101,
"learning_rate": 0.00019998015999603201,
"loss": 1.5053,
"step": 994
},
{
"epoch": 9.95e-05,
"grad_norm": 0.3819882869720459,
"learning_rate": 0.000199980139996028,
"loss": 1.4731,
"step": 995
},
{
"epoch": 9.96e-05,
"grad_norm": 0.5102061033248901,
"learning_rate": 0.00019998011999602402,
"loss": 1.8263,
"step": 996
},
{
"epoch": 9.97e-05,
"grad_norm": 0.4919547140598297,
"learning_rate": 0.00019998009999602,
"loss": 1.5359,
"step": 997
},
{
"epoch": 9.98e-05,
"grad_norm": 0.7509588003158569,
"learning_rate": 0.000199980079996016,
"loss": 1.7521,
"step": 998
},
{
"epoch": 9.99e-05,
"grad_norm": 0.6362186074256897,
"learning_rate": 0.000199980059996012,
"loss": 1.7225,
"step": 999
},
{
"epoch": 0.0001,
"grad_norm": 0.4658675789833069,
"learning_rate": 0.00019998003999600802,
"loss": 1.8965,
"step": 1000
}
],
"logging_steps": 1,
"max_steps": 10000000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.681998672096051e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}