|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.01616285694659388, |
|
"eval_steps": 125, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.232571389318776e-05, |
|
"eval_loss": NaN, |
|
"eval_runtime": 577.2541, |
|
"eval_samples_per_second": 45.129, |
|
"eval_steps_per_second": 22.565, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 9.697714167956328e-05, |
|
"grad_norm": NaN, |
|
"learning_rate": 3e-05, |
|
"loss": 0.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00019395428335912656, |
|
"grad_norm": NaN, |
|
"learning_rate": 6e-05, |
|
"loss": 0.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0002909314250386898, |
|
"grad_norm": NaN, |
|
"learning_rate": 9e-05, |
|
"loss": 0.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0003879085667182531, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.999588943391597e-05, |
|
"loss": 0.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0004848857083978164, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.99743108100344e-05, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0005818628500773796, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.993424445916923e-05, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0006788399917569429, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.987570520365104e-05, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0007758171334365062, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.979871469976196e-05, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0008727942751160696, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.970330142972401e-05, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0009697714167956328, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.95895006911623e-05, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0010667485584751962, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.945735458404681e-05, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0011637257001547593, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.930691199511775e-05, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0012607028418343226, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.91382285798002e-05, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0013576799835138859, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.895136674161465e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0014546571251934492, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.874639560909117e-05, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0015516342668730125, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.852339101019574e-05, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0016486114085525758, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.828243544427796e-05, |
|
"loss": 0.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.001745588550232139, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.802361805155097e-05, |
|
"loss": 0.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0018425656919117024, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.774703458011453e-05, |
|
"loss": 0.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.0019395428335912655, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.745278735053343e-05, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.002036519975270829, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.714098521798465e-05, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.0021334971169503923, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.681174353198687e-05, |
|
"loss": 0.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.0022304742586299556, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.64651840937276e-05, |
|
"loss": 0.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.0023274514003095185, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.610143511100354e-05, |
|
"loss": 0.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.002424428541989082, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.572063115079063e-05, |
|
"loss": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.002521405683668645, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.53229130894619e-05, |
|
"loss": 0.0, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.0026183828253482084, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.490842806067095e-05, |
|
"loss": 0.0, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.0027153599670277717, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.44773294009206e-05, |
|
"loss": 0.0, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.002812337108707335, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.40297765928369e-05, |
|
"loss": 0.0, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.0029093142503868984, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.356593520616948e-05, |
|
"loss": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0030062913920664617, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.308597683653975e-05, |
|
"loss": 0.0, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.003103268533746025, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.259007904196023e-05, |
|
"loss": 0.0, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.0032002456754255883, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.207842527714767e-05, |
|
"loss": 0.0, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.0032972228171051516, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.155120482565521e-05, |
|
"loss": 0.0, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.003394199958784715, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.10086127298478e-05, |
|
"loss": 0.0, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.003491177100464278, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.0, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.0035881542421438415, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.987812213377424e-05, |
|
"loss": 0.0, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.003685131383823405, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.929064185241213e-05, |
|
"loss": 0.0, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.0037821085255029677, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.868862620982534e-05, |
|
"loss": 0.0, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.003879085667182531, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.807229791845673e-05, |
|
"loss": 0.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.003976062808862094, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.744188498563641e-05, |
|
"loss": 0.0, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.00404071423664847, |
|
"eval_loss": NaN, |
|
"eval_runtime": 574.6926, |
|
"eval_samples_per_second": 45.33, |
|
"eval_steps_per_second": 22.666, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.004073039950541658, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.679762062923175e-05, |
|
"loss": 0.0, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.004170017092221221, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.613974319136958e-05, |
|
"loss": 0.0, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.004266994233900785, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.54684960502629e-05, |
|
"loss": 0.0, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.0043639713755803475, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.478412753017433e-05, |
|
"loss": 0.0, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.004460948517259911, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.408689080954998e-05, |
|
"loss": 0.0, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.004557925658939474, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.33770438273574e-05, |
|
"loss": 0.0, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.004654902800619037, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.265484918766243e-05, |
|
"loss": 0.0, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.004751879942298601, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.192057406248028e-05, |
|
"loss": 0.0, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.004848857083978164, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.117449009293668e-05, |
|
"loss": 0.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.004945834225657727, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.041687328877567e-05, |
|
"loss": 0.0, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.00504281136733729, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.964800392625129e-05, |
|
"loss": 0.0, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.005139788509016854, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.886816644444098e-05, |
|
"loss": 0.0, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.005236765650696417, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.807764934001874e-05, |
|
"loss": 0.0, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.005333742792375981, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.727674506052743e-05, |
|
"loss": 0.0, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.0054307199340555435, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.646574989618938e-05, |
|
"loss": 0.0, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.005527697075735107, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.564496387029532e-05, |
|
"loss": 0.0, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.00562467421741467, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.481469062821252e-05, |
|
"loss": 0.0, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.005721651359094234, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.39752373250527e-05, |
|
"loss": 0.0, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.005818628500773797, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.312691451204178e-05, |
|
"loss": 0.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.0059156056424533605, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.227003602163295e-05, |
|
"loss": 0.0, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.006012582784132923, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.14049188514063e-05, |
|
"loss": 0.0, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.006109559925812486, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.05318830467969e-05, |
|
"loss": 0.0, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.00620653706749205, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.965125158269619e-05, |
|
"loss": 0.0, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.006303514209171613, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.876335024396872e-05, |
|
"loss": 0.0, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.006400491350851177, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.786850750493006e-05, |
|
"loss": 0.0, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.0064974684925307395, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.696705440782938e-05, |
|
"loss": 0.0, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.006594445634210303, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.605932444038229e-05, |
|
"loss": 0.0, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.006691422775889866, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.514565341239861e-05, |
|
"loss": 0.0, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.00678839991756943, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.422637933155162e-05, |
|
"loss": 0.0, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.006885377059248993, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.330184227833376e-05, |
|
"loss": 0.0, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.006982354200928556, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.237238428024572e-05, |
|
"loss": 0.0, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.007079331342608119, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.143834918526527e-05, |
|
"loss": 0.0, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.007176308484287683, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.0500082534642464e-05, |
|
"loss": 0.0, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.007273285625967246, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.955793143506863e-05, |
|
"loss": 0.0, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.00737026276764681, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.861224443026595e-05, |
|
"loss": 0.0, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.0074672399093263725, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.766337137204579e-05, |
|
"loss": 0.0, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.007564217051005935, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.6711663290882776e-05, |
|
"loss": 0.0, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.007661194192685499, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.575747226605298e-05, |
|
"loss": 0.0, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.007758171334365062, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.480115129538409e-05, |
|
"loss": 0.0, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.007855148476044625, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.384305416466584e-05, |
|
"loss": 0.0, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.007952125617724189, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.288353531676873e-05, |
|
"loss": 0.0, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.008049102759403752, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.192294972051992e-05, |
|
"loss": 0.0, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.00808142847329694, |
|
"eval_loss": NaN, |
|
"eval_runtime": 575.6933, |
|
"eval_samples_per_second": 45.252, |
|
"eval_steps_per_second": 22.627, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.008146079901083316, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.0961652739384356e-05, |
|
"loss": 0.0, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.008243057042762878, |
|
"grad_norm": NaN, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.008340034184442442, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.903834726061565e-05, |
|
"loss": 0.0, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.008437011326122006, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.807705027948008e-05, |
|
"loss": 0.0, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.00853398846780157, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.711646468323129e-05, |
|
"loss": 0.0, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.008630965609481131, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.6156945835334184e-05, |
|
"loss": 0.0, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.008727942751160695, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.5198848704615914e-05, |
|
"loss": 0.0, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.008824919892840259, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.424252773394704e-05, |
|
"loss": 0.0, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.008921897034519823, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.328833670911724e-05, |
|
"loss": 0.0, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.009018874176199385, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.23366286279542e-05, |
|
"loss": 0.0, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.009115851317878948, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.138775556973406e-05, |
|
"loss": 0.0, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.009212828459558512, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.04420685649314e-05, |
|
"loss": 0.0, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.009309805601238074, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.9499917465357534e-05, |
|
"loss": 0.0, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.009406782742917638, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.856165081473474e-05, |
|
"loss": 0.0, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.009503759884597202, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.762761571975429e-05, |
|
"loss": 0.0, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.009600737026276765, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.6698157721666246e-05, |
|
"loss": 0.0, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.009697714167956327, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.5773620668448384e-05, |
|
"loss": 0.0, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.009794691309635891, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.48543465876014e-05, |
|
"loss": 0.0, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.009891668451315455, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.3940675559617724e-05, |
|
"loss": 0.0, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.009988645592995019, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.303294559217063e-05, |
|
"loss": 0.0, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.01008562273467458, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.213149249506997e-05, |
|
"loss": 0.0, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.010182599876354144, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.12366497560313e-05, |
|
"loss": 0.0, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.010279577018033708, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.0348748417303823e-05, |
|
"loss": 0.0, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.010376554159713272, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.9468116953203107e-05, |
|
"loss": 0.0, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 0.010473531301392834, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8595081148593738e-05, |
|
"loss": 0.0, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.010570508443072398, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.772996397836704e-05, |
|
"loss": 0.0, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 0.010667485584751961, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.687308548795825e-05, |
|
"loss": 0.0, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.010764462726431523, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.6024762674947313e-05, |
|
"loss": 0.0, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 0.010861439868111087, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.5185309371787513e-05, |
|
"loss": 0.0, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.01095841700979065, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.43550361297047e-05, |
|
"loss": 0.0, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 0.011055394151470214, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.353425010381063e-05, |
|
"loss": 0.0, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 0.011152371293149776, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.272325493947257e-05, |
|
"loss": 0.0, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.01124934843482934, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.192235065998126e-05, |
|
"loss": 0.0, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.011346325576508904, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.1131833555559037e-05, |
|
"loss": 0.0, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 0.011443302718188468, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.0351996073748713e-05, |
|
"loss": 0.0, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 0.01154027985986803, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9583126711224343e-05, |
|
"loss": 0.0, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 0.011637257001547593, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.8825509907063327e-05, |
|
"loss": 0.0, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.011734234143227157, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.807942593751973e-05, |
|
"loss": 0.0, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 0.011831211284906721, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.7345150812337564e-05, |
|
"loss": 0.0, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 0.011928188426586283, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.66229561726426e-05, |
|
"loss": 0.0, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 0.012025165568265847, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5913109190450032e-05, |
|
"loss": 0.0, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.01212214270994541, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5215872469825682e-05, |
|
"loss": 0.0, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.01212214270994541, |
|
"eval_loss": NaN, |
|
"eval_runtime": 573.9041, |
|
"eval_samples_per_second": 45.393, |
|
"eval_steps_per_second": 22.697, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.012219119851624972, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.4531503949737108e-05, |
|
"loss": 0.0, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 0.012316096993304536, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3860256808630428e-05, |
|
"loss": 0.0, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 0.0124130741349841, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3202379370768252e-05, |
|
"loss": 0.0, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.012510051276663664, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2558115014363592e-05, |
|
"loss": 0.0, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 0.012607028418343226, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1927702081543279e-05, |
|
"loss": 0.0, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.01270400556002279, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1311373790174657e-05, |
|
"loss": 0.0, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 0.012800982701702353, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.0709358147587884e-05, |
|
"loss": 0.0, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 0.012897959843381917, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.0121877866225781e-05, |
|
"loss": 0.0, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 0.012994936985061479, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.0, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 0.013091914126741043, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.991387270152201e-06, |
|
"loss": 0.0, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.013188891268420606, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.448795174344804e-06, |
|
"loss": 0.0, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 0.01328586841010017, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.921574722852343e-06, |
|
"loss": 0.0, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 0.013382845551779732, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.409920958039795e-06, |
|
"loss": 0.0, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 0.013479822693459296, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.9140231634602485e-06, |
|
"loss": 0.0, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 0.01357679983513886, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.43406479383053e-06, |
|
"loss": 0.0, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.013673776976818422, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.9702234071631e-06, |
|
"loss": 0.0, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 0.013770754118497985, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.5226705990794155e-06, |
|
"loss": 0.0, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 0.013867731260177549, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.091571939329048e-06, |
|
"loss": 0.0, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 0.013964708401857113, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.677086910538092e-06, |
|
"loss": 0.0, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 0.014061685543536675, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.279368849209381e-06, |
|
"loss": 0.0, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.014158662685216239, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.898564888996476e-06, |
|
"loss": 0.0, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 0.014255639826895802, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.534815906272404e-06, |
|
"loss": 0.0, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 0.014352616968575366, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.18825646801314e-06, |
|
"loss": 0.0, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 0.014449594110254928, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8590147820153513e-06, |
|
"loss": 0.0, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 0.014546571251934492, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.547212649466568e-06, |
|
"loss": 0.0, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.014643548393614056, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.2529654198854835e-06, |
|
"loss": 0.0, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 0.01474052553529362, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9763819484490355e-06, |
|
"loss": 0.0, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 0.014837502676973181, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.7175645557220566e-06, |
|
"loss": 0.0, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 0.014934479818652745, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.4766089898042678e-06, |
|
"loss": 0.0, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 0.015031456960332309, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2536043909088191e-06, |
|
"loss": 0.0, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.01512843410201187, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.0486332583853563e-06, |
|
"loss": 0.0, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 0.015225411243691435, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.617714201998084e-07, |
|
"loss": 0.0, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 0.015322388385370998, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.93088004882253e-07, |
|
"loss": 0.0, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 0.015419365527050562, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.426454159531913e-07, |
|
"loss": 0.0, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 0.015516342668730124, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.104993088376974e-07, |
|
"loss": 0.0, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.015613319810409688, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.966985702759828e-07, |
|
"loss": 0.0, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 0.01571029695208925, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.012853002380466e-07, |
|
"loss": 0.0, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 0.015807274093768815, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2429479634897267e-07, |
|
"loss": 0.0, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 0.015904251235448377, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.575554083078084e-08, |
|
"loss": 0.0, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 0.016001228377127943, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.568918996560532e-08, |
|
"loss": 0.0, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.016098205518807505, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.110566084036816e-09, |
|
"loss": 0.0, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 0.01616285694659388, |
|
"eval_loss": NaN, |
|
"eval_runtime": 660.3671, |
|
"eval_samples_per_second": 39.449, |
|
"eval_steps_per_second": 19.725, |
|
"step": 500 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 125, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.401809133568e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|