|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.9409282700421944, |
|
"eval_steps": 30, |
|
"global_step": 236, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.016877637130801686, |
|
"grad_norm": 0.08447265625, |
|
"learning_rate": 3e-06, |
|
"loss": 1.1751, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.016877637130801686, |
|
"eval_loss": 1.185997486114502, |
|
"eval_runtime": 72.8223, |
|
"eval_samples_per_second": 14.144, |
|
"eval_steps_per_second": 14.144, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03375527426160337, |
|
"grad_norm": 0.08544921875, |
|
"learning_rate": 6e-06, |
|
"loss": 1.1683, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.05063291139240506, |
|
"grad_norm": 0.0830078125, |
|
"learning_rate": 9e-06, |
|
"loss": 1.1737, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.06751054852320675, |
|
"grad_norm": 0.08544921875, |
|
"learning_rate": 1.2e-05, |
|
"loss": 1.1889, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.08438818565400844, |
|
"grad_norm": 0.0849609375, |
|
"learning_rate": 1.5e-05, |
|
"loss": 1.1619, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.10126582278481013, |
|
"grad_norm": 0.0869140625, |
|
"learning_rate": 1.8e-05, |
|
"loss": 1.1815, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.11814345991561181, |
|
"grad_norm": 0.08447265625, |
|
"learning_rate": 2.1e-05, |
|
"loss": 1.1726, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.1350210970464135, |
|
"grad_norm": 0.08740234375, |
|
"learning_rate": 2.4e-05, |
|
"loss": 1.1701, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.1518987341772152, |
|
"grad_norm": 0.08642578125, |
|
"learning_rate": 2.7000000000000002e-05, |
|
"loss": 1.1818, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.16877637130801687, |
|
"grad_norm": 0.08935546875, |
|
"learning_rate": 3e-05, |
|
"loss": 1.1896, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.18565400843881857, |
|
"grad_norm": 0.0888671875, |
|
"learning_rate": 2.999855077059572e-05, |
|
"loss": 1.1873, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.20253164556962025, |
|
"grad_norm": 0.08984375, |
|
"learning_rate": 2.9994203362418313e-05, |
|
"loss": 1.1838, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.21940928270042195, |
|
"grad_norm": 0.0859375, |
|
"learning_rate": 2.998695861552002e-05, |
|
"loss": 1.1569, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.23628691983122363, |
|
"grad_norm": 0.08642578125, |
|
"learning_rate": 2.9976817929807542e-05, |
|
"loss": 1.1595, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.25316455696202533, |
|
"grad_norm": 0.078125, |
|
"learning_rate": 2.996378326477153e-05, |
|
"loss": 1.1348, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.270042194092827, |
|
"grad_norm": 0.07470703125, |
|
"learning_rate": 2.9947857139107964e-05, |
|
"loss": 1.1434, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2869198312236287, |
|
"grad_norm": 0.140625, |
|
"learning_rate": 2.992904263023146e-05, |
|
"loss": 1.8213, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.3037974683544304, |
|
"grad_norm": 0.06787109375, |
|
"learning_rate": 2.990734337368062e-05, |
|
"loss": 1.1289, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.3206751054852321, |
|
"grad_norm": 0.0654296875, |
|
"learning_rate": 2.9882763562415518e-05, |
|
"loss": 1.1106, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.33755274261603374, |
|
"grad_norm": 0.06640625, |
|
"learning_rate": 2.9855307946007532e-05, |
|
"loss": 1.1381, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.35443037974683544, |
|
"grad_norm": 0.064453125, |
|
"learning_rate": 2.982498182972154e-05, |
|
"loss": 1.1192, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.37130801687763715, |
|
"grad_norm": 0.0615234375, |
|
"learning_rate": 2.9791791073490795e-05, |
|
"loss": 1.1105, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3881856540084388, |
|
"grad_norm": 0.061767578125, |
|
"learning_rate": 2.9755742090784617e-05, |
|
"loss": 1.1207, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.4050632911392405, |
|
"grad_norm": 0.0634765625, |
|
"learning_rate": 2.9716841847369106e-05, |
|
"loss": 1.1083, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.4219409282700422, |
|
"grad_norm": 0.058349609375, |
|
"learning_rate": 2.967509785996114e-05, |
|
"loss": 1.1007, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.4388185654008439, |
|
"grad_norm": 0.0615234375, |
|
"learning_rate": 2.963051819477592e-05, |
|
"loss": 1.0842, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.45569620253164556, |
|
"grad_norm": 0.0625, |
|
"learning_rate": 2.958311146596833e-05, |
|
"loss": 1.0961, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.47257383966244726, |
|
"grad_norm": 0.05908203125, |
|
"learning_rate": 2.953288683396841e-05, |
|
"loss": 1.109, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.48945147679324896, |
|
"grad_norm": 0.061767578125, |
|
"learning_rate": 2.9479854003711298e-05, |
|
"loss": 1.0789, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.5063291139240507, |
|
"grad_norm": 0.06103515625, |
|
"learning_rate": 2.9424023222761938e-05, |
|
"loss": 1.1007, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5063291139240507, |
|
"eval_loss": 1.091182827949524, |
|
"eval_runtime": 74.0135, |
|
"eval_samples_per_second": 13.916, |
|
"eval_steps_per_second": 13.916, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5232067510548524, |
|
"grad_norm": 0.059814453125, |
|
"learning_rate": 2.9365405279334904e-05, |
|
"loss": 1.0756, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.540084388185654, |
|
"grad_norm": 0.056396484375, |
|
"learning_rate": 2.930401150020983e-05, |
|
"loss": 1.0939, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.5569620253164557, |
|
"grad_norm": 0.05712890625, |
|
"learning_rate": 2.9239853748542717e-05, |
|
"loss": 1.0901, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.5738396624472574, |
|
"grad_norm": 0.054931640625, |
|
"learning_rate": 2.9172944421573587e-05, |
|
"loss": 1.0873, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.5907172995780591, |
|
"grad_norm": 0.0546875, |
|
"learning_rate": 2.9103296448230986e-05, |
|
"loss": 1.0584, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.6075949367088608, |
|
"grad_norm": 0.0556640625, |
|
"learning_rate": 2.9030923286633703e-05, |
|
"loss": 1.0692, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6244725738396625, |
|
"grad_norm": 0.16015625, |
|
"learning_rate": 2.8955838921490252e-05, |
|
"loss": 1.782, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.6413502109704642, |
|
"grad_norm": 0.052001953125, |
|
"learning_rate": 2.8878057861396606e-05, |
|
"loss": 1.0667, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.6582278481012658, |
|
"grad_norm": 0.052001953125, |
|
"learning_rate": 2.8797595136032675e-05, |
|
"loss": 1.0656, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.6751054852320675, |
|
"grad_norm": 0.052734375, |
|
"learning_rate": 2.8714466293258142e-05, |
|
"loss": 1.0736, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6919831223628692, |
|
"grad_norm": 0.053955078125, |
|
"learning_rate": 2.8628687396108107e-05, |
|
"loss": 1.0638, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.7088607594936709, |
|
"grad_norm": 0.05224609375, |
|
"learning_rate": 2.8540275019689237e-05, |
|
"loss": 1.0746, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.7257383966244726, |
|
"grad_norm": 0.051513671875, |
|
"learning_rate": 2.8449246247976947e-05, |
|
"loss": 1.0608, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.7426160337552743, |
|
"grad_norm": 0.052001953125, |
|
"learning_rate": 2.835561867051426e-05, |
|
"loss": 1.0619, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.759493670886076, |
|
"grad_norm": 0.051025390625, |
|
"learning_rate": 2.825941037901294e-05, |
|
"loss": 1.048, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.7763713080168776, |
|
"grad_norm": 0.05078125, |
|
"learning_rate": 2.816063996385765e-05, |
|
"loss": 1.0761, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.7932489451476793, |
|
"grad_norm": 0.0498046875, |
|
"learning_rate": 2.805932651051372e-05, |
|
"loss": 1.0443, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.810126582278481, |
|
"grad_norm": 0.051025390625, |
|
"learning_rate": 2.7955489595839228e-05, |
|
"loss": 1.0527, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.8270042194092827, |
|
"grad_norm": 0.052734375, |
|
"learning_rate": 2.784914928430218e-05, |
|
"loss": 1.0498, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.8438818565400844, |
|
"grad_norm": 0.049560546875, |
|
"learning_rate": 2.7740326124103416e-05, |
|
"loss": 1.0537, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.8607594936708861, |
|
"grad_norm": 0.053466796875, |
|
"learning_rate": 2.762904114320609e-05, |
|
"loss": 1.0326, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.8776371308016878, |
|
"grad_norm": 0.04931640625, |
|
"learning_rate": 2.751531584527241e-05, |
|
"loss": 1.043, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.8945147679324894, |
|
"grad_norm": 0.05126953125, |
|
"learning_rate": 2.7399172205508476e-05, |
|
"loss": 1.0463, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.9113924050632911, |
|
"grad_norm": 0.0517578125, |
|
"learning_rate": 2.7280632666418013e-05, |
|
"loss": 1.0476, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.9282700421940928, |
|
"grad_norm": 0.053466796875, |
|
"learning_rate": 2.715972013346576e-05, |
|
"loss": 1.0467, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.9451476793248945, |
|
"grad_norm": 0.0498046875, |
|
"learning_rate": 2.703645797065147e-05, |
|
"loss": 1.0467, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.9620253164556962, |
|
"grad_norm": 0.05078125, |
|
"learning_rate": 2.6910869995995247e-05, |
|
"loss": 1.05, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.9789029535864979, |
|
"grad_norm": 0.053955078125, |
|
"learning_rate": 2.678298047693518e-05, |
|
"loss": 1.0453, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.9957805907172996, |
|
"grad_norm": 0.050048828125, |
|
"learning_rate": 2.6652814125638142e-05, |
|
"loss": 1.0348, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.0126582278481013, |
|
"grad_norm": 0.052490234375, |
|
"learning_rate": 2.652039609422463e-05, |
|
"loss": 1.0418, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0126582278481013, |
|
"eval_loss": 1.0428293943405151, |
|
"eval_runtime": 74.0734, |
|
"eval_samples_per_second": 13.905, |
|
"eval_steps_per_second": 13.905, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0147679324894514, |
|
"grad_norm": 0.0732421875, |
|
"learning_rate": 2.638575196990862e-05, |
|
"loss": 1.0194, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.0316455696202531, |
|
"grad_norm": 0.049560546875, |
|
"learning_rate": 2.624890777005332e-05, |
|
"loss": 1.0365, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.0485232067510548, |
|
"grad_norm": 0.05126953125, |
|
"learning_rate": 2.6109889937143828e-05, |
|
"loss": 1.0426, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.0654008438818565, |
|
"grad_norm": 0.05126953125, |
|
"learning_rate": 2.5968725333677628e-05, |
|
"loss": 1.043, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.0822784810126582, |
|
"grad_norm": 0.051513671875, |
|
"learning_rate": 2.582544123697395e-05, |
|
"loss": 1.0243, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.09915611814346, |
|
"grad_norm": 0.0517578125, |
|
"learning_rate": 2.568006533390295e-05, |
|
"loss": 1.0258, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.1160337552742616, |
|
"grad_norm": 0.050537109375, |
|
"learning_rate": 2.5532625715535733e-05, |
|
"loss": 1.0248, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.1329113924050633, |
|
"grad_norm": 0.0498046875, |
|
"learning_rate": 2.5383150871716342e-05, |
|
"loss": 1.0083, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.149789029535865, |
|
"grad_norm": 0.05224609375, |
|
"learning_rate": 2.5231669685556636e-05, |
|
"loss": 1.0207, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.1666666666666667, |
|
"grad_norm": 0.051513671875, |
|
"learning_rate": 2.507821142785516e-05, |
|
"loss": 1.0435, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.1835443037974684, |
|
"grad_norm": 0.05224609375, |
|
"learning_rate": 2.4922805751441174e-05, |
|
"loss": 1.0354, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.20042194092827, |
|
"grad_norm": 0.054443359375, |
|
"learning_rate": 2.4765482685444786e-05, |
|
"loss": 1.0266, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.2172995780590719, |
|
"grad_norm": 0.05126953125, |
|
"learning_rate": 2.460627262949443e-05, |
|
"loss": 1.0411, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.2341772151898733, |
|
"grad_norm": 0.05126953125, |
|
"learning_rate": 2.4445206347842714e-05, |
|
"loss": 1.0224, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.251054852320675, |
|
"grad_norm": 0.052978515625, |
|
"learning_rate": 2.428231496342181e-05, |
|
"loss": 1.0253, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.2679324894514767, |
|
"grad_norm": 0.05322265625, |
|
"learning_rate": 2.4117629951829602e-05, |
|
"loss": 1.0298, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.2848101265822784, |
|
"grad_norm": 0.052490234375, |
|
"learning_rate": 2.395118313524758e-05, |
|
"loss": 1.0239, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.3016877637130801, |
|
"grad_norm": 0.0537109375, |
|
"learning_rate": 2.3783006676291866e-05, |
|
"loss": 1.0212, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.3185654008438819, |
|
"grad_norm": 0.052734375, |
|
"learning_rate": 2.361313307179837e-05, |
|
"loss": 1.0371, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.3354430379746836, |
|
"grad_norm": 0.05126953125, |
|
"learning_rate": 2.3441595146543458e-05, |
|
"loss": 1.0314, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.3523206751054853, |
|
"grad_norm": 0.052001953125, |
|
"learning_rate": 2.3268426046901153e-05, |
|
"loss": 1.0195, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.369198312236287, |
|
"grad_norm": 0.052978515625, |
|
"learning_rate": 2.3093659234438266e-05, |
|
"loss": 1.0219, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.3860759493670887, |
|
"grad_norm": 0.054931640625, |
|
"learning_rate": 2.291732847944861e-05, |
|
"loss": 1.0293, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 1.4029535864978904, |
|
"grad_norm": 0.052734375, |
|
"learning_rate": 2.2739467854427512e-05, |
|
"loss": 0.9992, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.4198312236286919, |
|
"grad_norm": 0.05224609375, |
|
"learning_rate": 2.2560111727488e-05, |
|
"loss": 1.0254, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.4367088607594938, |
|
"grad_norm": 0.0546875, |
|
"learning_rate": 2.237929475571979e-05, |
|
"loss": 1.0148, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.4535864978902953, |
|
"grad_norm": 0.055419921875, |
|
"learning_rate": 2.219705187849254e-05, |
|
"loss": 1.0228, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.4704641350210972, |
|
"grad_norm": 0.05810546875, |
|
"learning_rate": 2.2013418310704422e-05, |
|
"loss": 1.021, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.4873417721518987, |
|
"grad_norm": 0.0537109375, |
|
"learning_rate": 2.1828429535977585e-05, |
|
"loss": 1.0352, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.5042194092827004, |
|
"grad_norm": 0.054931640625, |
|
"learning_rate": 2.1642121299801594e-05, |
|
"loss": 1.0105, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.5042194092827004, |
|
"eval_loss": 1.0232045650482178, |
|
"eval_runtime": 74.0589, |
|
"eval_samples_per_second": 13.908, |
|
"eval_steps_per_second": 13.908, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.521097046413502, |
|
"grad_norm": 0.054931640625, |
|
"learning_rate": 2.1454529602626336e-05, |
|
"loss": 1.0051, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.5379746835443038, |
|
"grad_norm": 0.0546875, |
|
"learning_rate": 2.126569069290562e-05, |
|
"loss": 1.023, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.5548523206751055, |
|
"grad_norm": 0.0546875, |
|
"learning_rate": 2.107564106009286e-05, |
|
"loss": 1.012, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.5717299578059072, |
|
"grad_norm": 0.0546875, |
|
"learning_rate": 2.0884417427590217e-05, |
|
"loss": 1.0136, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.5886075949367089, |
|
"grad_norm": 0.0546875, |
|
"learning_rate": 2.0692056745652483e-05, |
|
"loss": 1.0194, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.6054852320675106, |
|
"grad_norm": 0.0556640625, |
|
"learning_rate": 2.0498596184247196e-05, |
|
"loss": 1.0089, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.6223628691983123, |
|
"grad_norm": 0.054931640625, |
|
"learning_rate": 2.030407312587224e-05, |
|
"loss": 1.0226, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.6392405063291138, |
|
"grad_norm": 0.058349609375, |
|
"learning_rate": 2.010852515833242e-05, |
|
"loss": 1.0219, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.6561181434599157, |
|
"grad_norm": 0.0556640625, |
|
"learning_rate": 1.9911990067476336e-05, |
|
"loss": 1.0035, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.6729957805907172, |
|
"grad_norm": 0.055908203125, |
|
"learning_rate": 1.9714505829895004e-05, |
|
"loss": 1.0052, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.689873417721519, |
|
"grad_norm": 0.05712890625, |
|
"learning_rate": 1.951611060558363e-05, |
|
"loss": 1.0175, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 1.7067510548523206, |
|
"grad_norm": 0.056396484375, |
|
"learning_rate": 1.9316842730567902e-05, |
|
"loss": 1.0099, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 1.7236286919831225, |
|
"grad_norm": 0.05615234375, |
|
"learning_rate": 1.9116740709496334e-05, |
|
"loss": 0.998, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 1.740506329113924, |
|
"grad_norm": 0.05615234375, |
|
"learning_rate": 1.8915843208199967e-05, |
|
"loss": 0.996, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.7573839662447257, |
|
"grad_norm": 0.058837890625, |
|
"learning_rate": 1.8714189046220946e-05, |
|
"loss": 1.009, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.7742616033755274, |
|
"grad_norm": 0.059326171875, |
|
"learning_rate": 1.851181718931141e-05, |
|
"loss": 1.0127, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.7911392405063291, |
|
"grad_norm": 0.0556640625, |
|
"learning_rate": 1.830876674190411e-05, |
|
"loss": 1.0107, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 1.8080168776371308, |
|
"grad_norm": 0.06201171875, |
|
"learning_rate": 1.8105076939556238e-05, |
|
"loss": 1.0264, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.8248945147679325, |
|
"grad_norm": 0.056884765625, |
|
"learning_rate": 1.790078714136792e-05, |
|
"loss": 1.0068, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 1.8417721518987342, |
|
"grad_norm": 0.056640625, |
|
"learning_rate": 1.769593682237682e-05, |
|
"loss": 1.0094, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.8586497890295357, |
|
"grad_norm": 0.057861328125, |
|
"learning_rate": 1.7490565565930382e-05, |
|
"loss": 1.0135, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 1.8755274261603376, |
|
"grad_norm": 0.05859375, |
|
"learning_rate": 1.7284713056037074e-05, |
|
"loss": 0.993, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 1.8924050632911391, |
|
"grad_norm": 0.056884765625, |
|
"learning_rate": 1.7078419069698283e-05, |
|
"loss": 1.015, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 1.909282700421941, |
|
"grad_norm": 0.055908203125, |
|
"learning_rate": 1.687172346922213e-05, |
|
"loss": 1.0043, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 1.9261603375527425, |
|
"grad_norm": 0.05859375, |
|
"learning_rate": 1.6664666194520873e-05, |
|
"loss": 0.9959, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.9430379746835444, |
|
"grad_norm": 0.0625, |
|
"learning_rate": 1.645728725539329e-05, |
|
"loss": 1.0177, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 1.959915611814346, |
|
"grad_norm": 0.05810546875, |
|
"learning_rate": 1.6249626723793572e-05, |
|
"loss": 1.033, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 1.9767932489451476, |
|
"grad_norm": 0.059326171875, |
|
"learning_rate": 1.6041724726088187e-05, |
|
"loss": 1.0155, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 1.9936708860759493, |
|
"grad_norm": 0.060791015625, |
|
"learning_rate": 1.5833621435302247e-05, |
|
"loss": 1.0167, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 2.010548523206751, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 1.5625357063356825e-05, |
|
"loss": 1.0082, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.010548523206751, |
|
"eval_loss": 1.0127062797546387, |
|
"eval_runtime": 74.0711, |
|
"eval_samples_per_second": 13.906, |
|
"eval_steps_per_second": 13.906, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.0147679324894514, |
|
"grad_norm": 0.1865234375, |
|
"learning_rate": 1.541697185329881e-05, |
|
"loss": 1.6592, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 2.0316455696202533, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 1.5208506071524727e-05, |
|
"loss": 1.0041, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 2.048523206751055, |
|
"grad_norm": 0.059326171875, |
|
"learning_rate": 1.5e-05, |
|
"loss": 1.0116, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 2.0654008438818567, |
|
"grad_norm": 0.05859375, |
|
"learning_rate": 1.4791493928475275e-05, |
|
"loss": 1.0026, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 2.0822784810126582, |
|
"grad_norm": 0.05810546875, |
|
"learning_rate": 1.4583028146701191e-05, |
|
"loss": 1.0122, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 2.0991561181434597, |
|
"grad_norm": 0.06005859375, |
|
"learning_rate": 1.437464293664318e-05, |
|
"loss": 1.0024, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 2.1160337552742616, |
|
"grad_norm": 0.060546875, |
|
"learning_rate": 1.4166378564697757e-05, |
|
"loss": 1.0092, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 2.132911392405063, |
|
"grad_norm": 0.05908203125, |
|
"learning_rate": 1.3958275273911812e-05, |
|
"loss": 1.0048, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 2.149789029535865, |
|
"grad_norm": 0.058837890625, |
|
"learning_rate": 1.375037327620643e-05, |
|
"loss": 0.9921, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 2.1666666666666665, |
|
"grad_norm": 0.06005859375, |
|
"learning_rate": 1.3542712744606712e-05, |
|
"loss": 1.0117, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.1835443037974684, |
|
"grad_norm": 0.05859375, |
|
"learning_rate": 1.3335333805479126e-05, |
|
"loss": 0.9965, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 2.20042194092827, |
|
"grad_norm": 0.059814453125, |
|
"learning_rate": 1.3128276530777874e-05, |
|
"loss": 1.0108, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 2.217299578059072, |
|
"grad_norm": 0.060791015625, |
|
"learning_rate": 1.292158093030172e-05, |
|
"loss": 1.0029, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 2.2341772151898733, |
|
"grad_norm": 0.058349609375, |
|
"learning_rate": 1.2715286943962925e-05, |
|
"loss": 0.9958, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 2.2510548523206753, |
|
"grad_norm": 0.05908203125, |
|
"learning_rate": 1.2509434434069625e-05, |
|
"loss": 1.0054, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.2679324894514767, |
|
"grad_norm": 0.0576171875, |
|
"learning_rate": 1.2304063177623182e-05, |
|
"loss": 0.9837, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 2.2848101265822787, |
|
"grad_norm": 0.060302734375, |
|
"learning_rate": 1.2099212858632083e-05, |
|
"loss": 1.0014, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 2.30168776371308, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 1.1894923060443763e-05, |
|
"loss": 0.9816, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 2.318565400843882, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 1.169123325809589e-05, |
|
"loss": 1.0021, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 2.3354430379746836, |
|
"grad_norm": 0.05859375, |
|
"learning_rate": 1.1488182810688593e-05, |
|
"loss": 1.0005, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.352320675105485, |
|
"grad_norm": 0.06103515625, |
|
"learning_rate": 1.1285810953779057e-05, |
|
"loss": 1.0167, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 2.369198312236287, |
|
"grad_norm": 0.059814453125, |
|
"learning_rate": 1.1084156791800036e-05, |
|
"loss": 1.0146, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 2.3860759493670884, |
|
"grad_norm": 0.05859375, |
|
"learning_rate": 1.0883259290503665e-05, |
|
"loss": 1.0024, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 2.4029535864978904, |
|
"grad_norm": 0.0576171875, |
|
"learning_rate": 1.0683157269432097e-05, |
|
"loss": 0.9959, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 2.419831223628692, |
|
"grad_norm": 0.060546875, |
|
"learning_rate": 1.0483889394416373e-05, |
|
"loss": 1.0059, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 2.4367088607594938, |
|
"grad_norm": 0.06396484375, |
|
"learning_rate": 1.0285494170104996e-05, |
|
"loss": 0.983, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 2.4535864978902953, |
|
"grad_norm": 0.06005859375, |
|
"learning_rate": 1.0088009932523664e-05, |
|
"loss": 1.0116, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 2.470464135021097, |
|
"grad_norm": 0.061279296875, |
|
"learning_rate": 9.891474841667585e-06, |
|
"loss": 1.0136, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 2.4873417721518987, |
|
"grad_norm": 0.057861328125, |
|
"learning_rate": 9.695926874127765e-06, |
|
"loss": 0.9937, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 2.5042194092827, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 9.501403815752813e-06, |
|
"loss": 0.9946, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.5042194092827, |
|
"eval_loss": 1.0074498653411865, |
|
"eval_runtime": 74.0482, |
|
"eval_samples_per_second": 13.91, |
|
"eval_steps_per_second": 13.91, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.521097046413502, |
|
"grad_norm": 0.060302734375, |
|
"learning_rate": 9.307943254347521e-06, |
|
"loss": 1.0043, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 2.537974683544304, |
|
"grad_norm": 0.06201171875, |
|
"learning_rate": 9.115582572409789e-06, |
|
"loss": 1.0125, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 2.5548523206751055, |
|
"grad_norm": 0.05859375, |
|
"learning_rate": 8.92435893990714e-06, |
|
"loss": 0.9753, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 2.571729957805907, |
|
"grad_norm": 0.060546875, |
|
"learning_rate": 8.734309307094382e-06, |
|
"loss": 0.9959, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 2.588607594936709, |
|
"grad_norm": 0.061279296875, |
|
"learning_rate": 8.545470397373665e-06, |
|
"loss": 1.0158, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 2.605485232067511, |
|
"grad_norm": 0.062255859375, |
|
"learning_rate": 8.357878700198407e-06, |
|
"loss": 1.0001, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 2.6223628691983123, |
|
"grad_norm": 0.060546875, |
|
"learning_rate": 8.171570464022419e-06, |
|
"loss": 0.9963, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 2.6392405063291138, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 7.986581689295577e-06, |
|
"loss": 0.968, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 2.6561181434599157, |
|
"grad_norm": 0.059326171875, |
|
"learning_rate": 7.802948121507462e-06, |
|
"loss": 0.9872, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 2.672995780590717, |
|
"grad_norm": 0.0625, |
|
"learning_rate": 7.620705244280208e-06, |
|
"loss": 1.0181, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.689873417721519, |
|
"grad_norm": 0.06298828125, |
|
"learning_rate": 7.439888272512004e-06, |
|
"loss": 1.0057, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 2.7067510548523206, |
|
"grad_norm": 0.059814453125, |
|
"learning_rate": 7.260532145572487e-06, |
|
"loss": 0.9985, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 2.7236286919831225, |
|
"grad_norm": 0.059814453125, |
|
"learning_rate": 7.082671520551391e-06, |
|
"loss": 0.9973, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 2.740506329113924, |
|
"grad_norm": 0.06103515625, |
|
"learning_rate": 6.906340765561734e-06, |
|
"loss": 0.991, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 2.757383966244726, |
|
"grad_norm": 0.060546875, |
|
"learning_rate": 6.731573953098851e-06, |
|
"loss": 0.9979, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 2.7742616033755274, |
|
"grad_norm": 0.060546875, |
|
"learning_rate": 6.558404853456545e-06, |
|
"loss": 1.0096, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 2.791139240506329, |
|
"grad_norm": 0.060302734375, |
|
"learning_rate": 6.38686692820163e-06, |
|
"loss": 0.9981, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 2.808016877637131, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 6.2169933237081386e-06, |
|
"loss": 1.0169, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 2.8248945147679327, |
|
"grad_norm": 0.0615234375, |
|
"learning_rate": 6.048816864752422e-06, |
|
"loss": 0.9934, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 2.8417721518987342, |
|
"grad_norm": 0.05859375, |
|
"learning_rate": 5.882370048170403e-06, |
|
"loss": 0.9913, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.8586497890295357, |
|
"grad_norm": 0.06005859375, |
|
"learning_rate": 5.71768503657819e-06, |
|
"loss": 1.0107, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 2.8755274261603376, |
|
"grad_norm": 0.060791015625, |
|
"learning_rate": 5.55479365215729e-06, |
|
"loss": 1.0098, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 2.892405063291139, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 5.393727370505569e-06, |
|
"loss": 1.0123, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 2.909282700421941, |
|
"grad_norm": 0.060546875, |
|
"learning_rate": 5.234517314555213e-06, |
|
"loss": 0.9837, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 2.9261603375527425, |
|
"grad_norm": 0.060791015625, |
|
"learning_rate": 5.077194248558827e-06, |
|
"loss": 0.9865, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 2.9430379746835444, |
|
"grad_norm": 0.058349609375, |
|
"learning_rate": 4.921788572144841e-06, |
|
"loss": 0.995, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 2.959915611814346, |
|
"grad_norm": 0.06201171875, |
|
"learning_rate": 4.768330314443367e-06, |
|
"loss": 1.0009, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 2.976793248945148, |
|
"grad_norm": 0.060546875, |
|
"learning_rate": 4.616849128283658e-06, |
|
"loss": 1.0016, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 2.9936708860759493, |
|
"grad_norm": 0.0625, |
|
"learning_rate": 4.4673742844642716e-06, |
|
"loss": 0.9979, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 3.010548523206751, |
|
"grad_norm": 0.062255859375, |
|
"learning_rate": 4.319934666097055e-06, |
|
"loss": 0.9826, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.010548523206751, |
|
"eval_loss": 1.0056920051574707, |
|
"eval_runtime": 74.0404, |
|
"eval_samples_per_second": 13.911, |
|
"eval_steps_per_second": 13.911, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.0126582278481013, |
|
"grad_norm": 0.06396484375, |
|
"learning_rate": 4.174558763026048e-06, |
|
"loss": 1.0021, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 3.029535864978903, |
|
"grad_norm": 0.06201171875, |
|
"learning_rate": 4.031274666322372e-06, |
|
"loss": 0.9997, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 3.0464135021097047, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 3.8901100628561755e-06, |
|
"loss": 1.0048, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 3.0632911392405062, |
|
"grad_norm": 0.060302734375, |
|
"learning_rate": 3.7510922299466818e-06, |
|
"loss": 0.9874, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 3.080168776371308, |
|
"grad_norm": 0.05810546875, |
|
"learning_rate": 3.6142480300913805e-06, |
|
"loss": 1.0054, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 3.0970464135021096, |
|
"grad_norm": 0.059814453125, |
|
"learning_rate": 3.4796039057753703e-06, |
|
"loss": 0.9991, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 3.1139240506329116, |
|
"grad_norm": 0.05859375, |
|
"learning_rate": 3.3471858743618615e-06, |
|
"loss": 0.9941, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 3.130801687763713, |
|
"grad_norm": 0.061767578125, |
|
"learning_rate": 3.217019523064825e-06, |
|
"loss": 1.0032, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 3.147679324894515, |
|
"grad_norm": 0.060791015625, |
|
"learning_rate": 3.089130004004754e-06, |
|
"loss": 0.9965, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 3.1645569620253164, |
|
"grad_norm": 0.05908203125, |
|
"learning_rate": 2.96354202934853e-06, |
|
"loss": 0.9954, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 3.181434599156118, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 2.8402798665342412e-06, |
|
"loss": 0.9938, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 3.19831223628692, |
|
"grad_norm": 0.0615234375, |
|
"learning_rate": 2.7193673335819893e-06, |
|
"loss": 0.9967, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 3.2151898734177213, |
|
"grad_norm": 0.061279296875, |
|
"learning_rate": 2.600827794491524e-06, |
|
"loss": 1.0009, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 3.2320675105485233, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 2.4846841547275916e-06, |
|
"loss": 0.9902, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 3.2489451476793247, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 2.3709588567939118e-06, |
|
"loss": 0.9915, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 3.2658227848101267, |
|
"grad_norm": 0.061767578125, |
|
"learning_rate": 2.259673875896585e-06, |
|
"loss": 0.993, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 3.282700421940928, |
|
"grad_norm": 0.060302734375, |
|
"learning_rate": 2.150850715697823e-06, |
|
"loss": 1.0027, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 3.29957805907173, |
|
"grad_norm": 0.06005859375, |
|
"learning_rate": 2.044510404160774e-06, |
|
"loss": 0.9874, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 3.3164556962025316, |
|
"grad_norm": 0.1572265625, |
|
"learning_rate": 1.9406734894862848e-06, |
|
"loss": 1.6607, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 1.8393600361423534e-06, |
|
"loss": 1.002, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.350210970464135, |
|
"grad_norm": 0.06103515625, |
|
"learning_rate": 1.7405896209870665e-06, |
|
"loss": 1.0005, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 3.367088607594937, |
|
"grad_norm": 0.060302734375, |
|
"learning_rate": 1.6443813294857452e-06, |
|
"loss": 0.9842, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 3.3839662447257384, |
|
"grad_norm": 0.06298828125, |
|
"learning_rate": 1.550753752023053e-06, |
|
"loss": 0.9877, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 3.40084388185654, |
|
"grad_norm": 0.060546875, |
|
"learning_rate": 1.459724980310767e-06, |
|
"loss": 0.991, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 3.4177215189873418, |
|
"grad_norm": 0.060302734375, |
|
"learning_rate": 1.3713126038918977e-06, |
|
"loss": 0.9839, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 3.4345991561181437, |
|
"grad_norm": 0.060302734375, |
|
"learning_rate": 1.2855337067418576e-06, |
|
"loss": 0.9977, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 3.451476793248945, |
|
"grad_norm": 0.058837890625, |
|
"learning_rate": 1.2024048639673225e-06, |
|
"loss": 0.9981, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 3.4683544303797467, |
|
"grad_norm": 0.059814453125, |
|
"learning_rate": 1.1219421386033958e-06, |
|
"loss": 1.0007, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 3.4852320675105486, |
|
"grad_norm": 0.06298828125, |
|
"learning_rate": 1.0441610785097471e-06, |
|
"loss": 0.9977, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 3.50210970464135, |
|
"grad_norm": 0.0615234375, |
|
"learning_rate": 9.690767133662976e-07, |
|
"loss": 0.9898, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 3.50210970464135, |
|
"eval_loss": 1.0054612159729004, |
|
"eval_runtime": 74.055, |
|
"eval_samples_per_second": 13.909, |
|
"eval_steps_per_second": 13.909, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 3.518987341772152, |
|
"grad_norm": 0.06201171875, |
|
"learning_rate": 8.967035517690147e-07, |
|
"loss": 0.9895, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 3.5358649789029535, |
|
"grad_norm": 0.06005859375, |
|
"learning_rate": 8.270555784264167e-07, |
|
"loss": 1.0005, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 3.5527426160337554, |
|
"grad_norm": 0.06103515625, |
|
"learning_rate": 7.601462514572877e-07, |
|
"loss": 0.9957, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 3.569620253164557, |
|
"grad_norm": 0.060546875, |
|
"learning_rate": 6.959884997901705e-07, |
|
"loss": 1.0034, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 3.586497890295359, |
|
"grad_norm": 0.060302734375, |
|
"learning_rate": 6.345947206650981e-07, |
|
"loss": 0.9898, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 3.6033755274261603, |
|
"grad_norm": 0.060791015625, |
|
"learning_rate": 5.759767772380648e-07, |
|
"loss": 1.0207, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 3.620253164556962, |
|
"grad_norm": 0.058837890625, |
|
"learning_rate": 5.201459962886995e-07, |
|
"loss": 0.9801, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 3.6371308016877637, |
|
"grad_norm": 0.06201171875, |
|
"learning_rate": 4.6711316603159084e-07, |
|
"loss": 1.0018, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 3.6540084388185656, |
|
"grad_norm": 0.1767578125, |
|
"learning_rate": 4.1688853403167194e-07, |
|
"loss": 1.7068, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 3.670886075949367, |
|
"grad_norm": 0.060302734375, |
|
"learning_rate": 3.6948180522408006e-07, |
|
"loss": 0.9958, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 3.6877637130801686, |
|
"grad_norm": 0.06298828125, |
|
"learning_rate": 3.2490214003885963e-07, |
|
"loss": 1.016, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 3.7046413502109705, |
|
"grad_norm": 0.060302734375, |
|
"learning_rate": 2.831581526308935e-07, |
|
"loss": 1.0015, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 3.721518987341772, |
|
"grad_norm": 0.060791015625, |
|
"learning_rate": 2.4425790921538404e-07, |
|
"loss": 1.0011, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 3.738396624472574, |
|
"grad_norm": 0.060791015625, |
|
"learning_rate": 2.0820892650920686e-07, |
|
"loss": 0.9869, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 3.7552742616033754, |
|
"grad_norm": 0.0595703125, |
|
"learning_rate": 1.7501817027846255e-07, |
|
"loss": 0.9881, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 3.7721518987341773, |
|
"grad_norm": 0.061767578125, |
|
"learning_rate": 1.4469205399246844e-07, |
|
"loss": 0.9939, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 3.789029535864979, |
|
"grad_norm": 0.061279296875, |
|
"learning_rate": 1.1723643758448144e-07, |
|
"loss": 0.9719, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 3.8059071729957807, |
|
"grad_norm": 0.061279296875, |
|
"learning_rate": 9.265662631938399e-08, |
|
"loss": 1.0162, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 3.8227848101265822, |
|
"grad_norm": 0.05908203125, |
|
"learning_rate": 7.095736976853895e-08, |
|
"loss": 1.0006, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 3.8396624472573837, |
|
"grad_norm": 0.060546875, |
|
"learning_rate": 5.214286089203546e-08, |
|
"loss": 0.983, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 3.8565400843881856, |
|
"grad_norm": 0.060302734375, |
|
"learning_rate": 3.621673522847035e-08, |
|
"loss": 0.9812, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 3.8734177215189876, |
|
"grad_norm": 0.061767578125, |
|
"learning_rate": 2.3182070192460104e-08, |
|
"loss": 0.9869, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 3.890295358649789, |
|
"grad_norm": 0.06005859375, |
|
"learning_rate": 1.3041384479981489e-08, |
|
"loss": 0.9926, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 3.9071729957805905, |
|
"grad_norm": 0.06005859375, |
|
"learning_rate": 5.796637581689246e-09, |
|
"loss": 0.9996, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 3.9240506329113924, |
|
"grad_norm": 0.06201171875, |
|
"learning_rate": 1.4492294042839361e-09, |
|
"loss": 1.011, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 3.9409282700421944, |
|
"grad_norm": 0.189453125, |
|
"learning_rate": 0.0, |
|
"loss": 1.6705, |
|
"step": 236 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 236, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 30, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.636388374942843e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|