|
{ |
|
"best_metric": 0.7138778567314148, |
|
"best_model_checkpoint": "data/Llama-31-8B_task-2_120-samples_config-3/checkpoint-242", |
|
"epoch": 29.0, |
|
"eval_steps": 500, |
|
"global_step": 319, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09090909090909091, |
|
"grad_norm": 0.7728238701820374, |
|
"learning_rate": 6.060606060606061e-08, |
|
"loss": 1.102, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.18181818181818182, |
|
"grad_norm": 0.8360726237297058, |
|
"learning_rate": 1.2121212121212122e-07, |
|
"loss": 1.2412, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 0.6940823793411255, |
|
"learning_rate": 2.4242424242424244e-07, |
|
"loss": 1.0332, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.5454545454545454, |
|
"grad_norm": 0.7341020107269287, |
|
"learning_rate": 3.6363636363636366e-07, |
|
"loss": 1.1054, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 0.8064824938774109, |
|
"learning_rate": 4.848484848484849e-07, |
|
"loss": 1.2037, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 0.7021966576576233, |
|
"learning_rate": 6.060606060606061e-07, |
|
"loss": 1.0459, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.122668743133545, |
|
"eval_runtime": 24.349, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.0909090909090908, |
|
"grad_norm": 0.7579166293144226, |
|
"learning_rate": 7.272727272727273e-07, |
|
"loss": 1.0994, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.2727272727272727, |
|
"grad_norm": 0.6994158029556274, |
|
"learning_rate": 8.484848484848486e-07, |
|
"loss": 1.1056, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.4545454545454546, |
|
"grad_norm": 0.698021650314331, |
|
"learning_rate": 9.696969696969698e-07, |
|
"loss": 1.1576, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.6363636363636362, |
|
"grad_norm": 0.7830127477645874, |
|
"learning_rate": 1.090909090909091e-06, |
|
"loss": 1.1701, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.8181818181818183, |
|
"grad_norm": 0.749530553817749, |
|
"learning_rate": 1.2121212121212122e-06, |
|
"loss": 1.0871, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.6776620149612427, |
|
"learning_rate": 1.3333333333333334e-06, |
|
"loss": 1.0223, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.1149355173110962, |
|
"eval_runtime": 24.3485, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.1818181818181817, |
|
"grad_norm": 0.6116415858268738, |
|
"learning_rate": 1.4545454545454546e-06, |
|
"loss": 1.0642, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.3636363636363638, |
|
"grad_norm": 0.7151461839675903, |
|
"learning_rate": 1.5757575757575759e-06, |
|
"loss": 1.1885, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.5454545454545454, |
|
"grad_norm": 0.7120382785797119, |
|
"learning_rate": 1.6969696969696973e-06, |
|
"loss": 1.0927, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.7272727272727275, |
|
"grad_norm": 0.6961694955825806, |
|
"learning_rate": 1.8181818181818183e-06, |
|
"loss": 1.0392, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.909090909090909, |
|
"grad_norm": 0.8099948167800903, |
|
"learning_rate": 1.9393939393939395e-06, |
|
"loss": 1.0795, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.1017646789550781, |
|
"eval_runtime": 24.3583, |
|
"eval_samples_per_second": 0.985, |
|
"eval_steps_per_second": 0.985, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 3.090909090909091, |
|
"grad_norm": 0.7341113686561584, |
|
"learning_rate": 2.0606060606060607e-06, |
|
"loss": 1.1445, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 3.2727272727272725, |
|
"grad_norm": 0.7002806067466736, |
|
"learning_rate": 2.181818181818182e-06, |
|
"loss": 1.0858, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.4545454545454546, |
|
"grad_norm": 0.7403613328933716, |
|
"learning_rate": 2.303030303030303e-06, |
|
"loss": 1.0768, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 3.6363636363636362, |
|
"grad_norm": 0.7391121983528137, |
|
"learning_rate": 2.4242424242424244e-06, |
|
"loss": 1.035, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.8181818181818183, |
|
"grad_norm": 0.790711522102356, |
|
"learning_rate": 2.5454545454545456e-06, |
|
"loss": 1.1519, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.633929967880249, |
|
"learning_rate": 2.666666666666667e-06, |
|
"loss": 0.9982, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.078678011894226, |
|
"eval_runtime": 24.3517, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.181818181818182, |
|
"grad_norm": 0.7980412840843201, |
|
"learning_rate": 2.7878787878787885e-06, |
|
"loss": 1.1463, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 4.363636363636363, |
|
"grad_norm": 0.7583560347557068, |
|
"learning_rate": 2.9090909090909093e-06, |
|
"loss": 1.0093, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 4.545454545454545, |
|
"grad_norm": 0.7219624519348145, |
|
"learning_rate": 3.0303030303030305e-06, |
|
"loss": 1.0371, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.7272727272727275, |
|
"grad_norm": 0.7923340797424316, |
|
"learning_rate": 3.1515151515151517e-06, |
|
"loss": 1.1243, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 4.909090909090909, |
|
"grad_norm": 0.6717983484268188, |
|
"learning_rate": 3.272727272727273e-06, |
|
"loss": 0.9702, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 1.0444384813308716, |
|
"eval_runtime": 24.3479, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 5.090909090909091, |
|
"grad_norm": 0.6388964653015137, |
|
"learning_rate": 3.3939393939393946e-06, |
|
"loss": 0.9427, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 5.2727272727272725, |
|
"grad_norm": 0.8183718919754028, |
|
"learning_rate": 3.5151515151515154e-06, |
|
"loss": 1.0683, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 5.454545454545454, |
|
"grad_norm": 0.7228857278823853, |
|
"learning_rate": 3.6363636363636366e-06, |
|
"loss": 1.0193, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.636363636363637, |
|
"grad_norm": 0.6639413237571716, |
|
"learning_rate": 3.757575757575758e-06, |
|
"loss": 1.0165, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 5.818181818181818, |
|
"grad_norm": 0.7365549802780151, |
|
"learning_rate": 3.878787878787879e-06, |
|
"loss": 1.0233, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.642576277256012, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.9509, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 0.9990049004554749, |
|
"eval_runtime": 24.3532, |
|
"eval_samples_per_second": 0.985, |
|
"eval_steps_per_second": 0.985, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.181818181818182, |
|
"grad_norm": 0.6555075645446777, |
|
"learning_rate": 4.1212121212121215e-06, |
|
"loss": 0.9929, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 6.363636363636363, |
|
"grad_norm": 0.5816611647605896, |
|
"learning_rate": 4.242424242424243e-06, |
|
"loss": 0.9891, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 6.545454545454545, |
|
"grad_norm": 0.5633305907249451, |
|
"learning_rate": 4.363636363636364e-06, |
|
"loss": 0.9197, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 6.7272727272727275, |
|
"grad_norm": 0.5987116098403931, |
|
"learning_rate": 4.4848484848484855e-06, |
|
"loss": 0.9596, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 6.909090909090909, |
|
"grad_norm": 0.5823273658752441, |
|
"learning_rate": 4.606060606060606e-06, |
|
"loss": 0.9573, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 0.9499709606170654, |
|
"eval_runtime": 24.3506, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 7.090909090909091, |
|
"grad_norm": 0.5255045890808105, |
|
"learning_rate": 4.727272727272728e-06, |
|
"loss": 0.8918, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 7.2727272727272725, |
|
"grad_norm": 0.5354146957397461, |
|
"learning_rate": 4.848484848484849e-06, |
|
"loss": 0.9815, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 7.454545454545454, |
|
"grad_norm": 0.4640227258205414, |
|
"learning_rate": 4.9696969696969696e-06, |
|
"loss": 0.8863, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 7.636363636363637, |
|
"grad_norm": 0.4414072334766388, |
|
"learning_rate": 5.090909090909091e-06, |
|
"loss": 0.9009, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 7.818181818181818, |
|
"grad_norm": 0.42416912317276, |
|
"learning_rate": 5.212121212121213e-06, |
|
"loss": 0.9165, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 0.3701015114784241, |
|
"learning_rate": 5.333333333333334e-06, |
|
"loss": 0.8624, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 0.9071061015129089, |
|
"eval_runtime": 24.3464, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.181818181818182, |
|
"grad_norm": 0.3807128369808197, |
|
"learning_rate": 5.4545454545454545e-06, |
|
"loss": 0.8405, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 8.363636363636363, |
|
"grad_norm": 0.39005354046821594, |
|
"learning_rate": 5.575757575757577e-06, |
|
"loss": 0.8492, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 8.545454545454545, |
|
"grad_norm": 0.3477958142757416, |
|
"learning_rate": 5.696969696969698e-06, |
|
"loss": 0.8703, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 8.727272727272727, |
|
"grad_norm": 0.34921929240226746, |
|
"learning_rate": 5.8181818181818185e-06, |
|
"loss": 0.9232, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 8.909090909090908, |
|
"grad_norm": 0.38983580470085144, |
|
"learning_rate": 5.93939393939394e-06, |
|
"loss": 0.8804, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 0.8746969103813171, |
|
"eval_runtime": 24.3504, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 9.090909090909092, |
|
"grad_norm": 0.3402298390865326, |
|
"learning_rate": 6.060606060606061e-06, |
|
"loss": 0.8316, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 9.272727272727273, |
|
"grad_norm": 0.3445686399936676, |
|
"learning_rate": 6.181818181818182e-06, |
|
"loss": 0.8235, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 9.454545454545455, |
|
"grad_norm": 0.375674307346344, |
|
"learning_rate": 6.303030303030303e-06, |
|
"loss": 0.8982, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 9.636363636363637, |
|
"grad_norm": 0.3227982223033905, |
|
"learning_rate": 6.424242424242425e-06, |
|
"loss": 0.7633, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 9.818181818181818, |
|
"grad_norm": 0.38300326466560364, |
|
"learning_rate": 6.545454545454546e-06, |
|
"loss": 0.852, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.32602500915527344, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.8515, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 0.8457457423210144, |
|
"eval_runtime": 24.3488, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.181818181818182, |
|
"grad_norm": 0.3338332176208496, |
|
"learning_rate": 6.787878787878789e-06, |
|
"loss": 0.825, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 10.363636363636363, |
|
"grad_norm": 0.44083863496780396, |
|
"learning_rate": 6.90909090909091e-06, |
|
"loss": 0.8433, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 10.545454545454545, |
|
"grad_norm": 0.3659360408782959, |
|
"learning_rate": 7.030303030303031e-06, |
|
"loss": 0.8081, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 10.727272727272727, |
|
"grad_norm": 0.35738250613212585, |
|
"learning_rate": 7.151515151515152e-06, |
|
"loss": 0.8144, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 10.909090909090908, |
|
"grad_norm": 0.33010390400886536, |
|
"learning_rate": 7.272727272727273e-06, |
|
"loss": 0.7864, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 0.8207831382751465, |
|
"eval_runtime": 24.3504, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 11.090909090909092, |
|
"grad_norm": 0.329024076461792, |
|
"learning_rate": 7.393939393939395e-06, |
|
"loss": 0.7717, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 11.272727272727273, |
|
"grad_norm": 0.32361382246017456, |
|
"learning_rate": 7.515151515151516e-06, |
|
"loss": 0.7642, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 11.454545454545455, |
|
"grad_norm": 0.33380502462387085, |
|
"learning_rate": 7.636363636363638e-06, |
|
"loss": 0.7893, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 11.636363636363637, |
|
"grad_norm": 0.3214806318283081, |
|
"learning_rate": 7.757575757575758e-06, |
|
"loss": 0.7901, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 11.818181818181818, |
|
"grad_norm": 0.3193203806877136, |
|
"learning_rate": 7.87878787878788e-06, |
|
"loss": 0.7028, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 0.32331448793411255, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.8648, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 0.8018015027046204, |
|
"eval_runtime": 24.3516, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 12.181818181818182, |
|
"grad_norm": 0.2892431616783142, |
|
"learning_rate": 8.121212121212121e-06, |
|
"loss": 0.764, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 12.363636363636363, |
|
"grad_norm": 0.3037986755371094, |
|
"learning_rate": 8.242424242424243e-06, |
|
"loss": 0.7343, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 12.545454545454545, |
|
"grad_norm": 0.3432358205318451, |
|
"learning_rate": 8.363636363636365e-06, |
|
"loss": 0.7794, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 12.727272727272727, |
|
"grad_norm": 0.3149927258491516, |
|
"learning_rate": 8.484848484848486e-06, |
|
"loss": 0.8031, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 12.909090909090908, |
|
"grad_norm": 0.29204079508781433, |
|
"learning_rate": 8.606060606060606e-06, |
|
"loss": 0.736, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_loss": 0.786732017993927, |
|
"eval_runtime": 24.3508, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 13.090909090909092, |
|
"grad_norm": 0.3326067626476288, |
|
"learning_rate": 8.727272727272728e-06, |
|
"loss": 0.7661, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 13.272727272727273, |
|
"grad_norm": 0.3473084270954132, |
|
"learning_rate": 8.84848484848485e-06, |
|
"loss": 0.7727, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 13.454545454545455, |
|
"grad_norm": 0.28269076347351074, |
|
"learning_rate": 8.969696969696971e-06, |
|
"loss": 0.7282, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 13.636363636363637, |
|
"grad_norm": 0.32704994082450867, |
|
"learning_rate": 9.090909090909091e-06, |
|
"loss": 0.6916, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 13.818181818181818, |
|
"grad_norm": 0.3217592239379883, |
|
"learning_rate": 9.212121212121213e-06, |
|
"loss": 0.7475, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"grad_norm": 0.3401748836040497, |
|
"learning_rate": 9.333333333333334e-06, |
|
"loss": 0.7882, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_loss": 0.7727737426757812, |
|
"eval_runtime": 24.3475, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 14.181818181818182, |
|
"grad_norm": 0.3393374979496002, |
|
"learning_rate": 9.454545454545456e-06, |
|
"loss": 0.6774, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 14.363636363636363, |
|
"grad_norm": 0.28455665707588196, |
|
"learning_rate": 9.575757575757576e-06, |
|
"loss": 0.7579, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 14.545454545454545, |
|
"grad_norm": 0.30775022506713867, |
|
"learning_rate": 9.696969696969698e-06, |
|
"loss": 0.7331, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 14.727272727272727, |
|
"grad_norm": 0.3098331689834595, |
|
"learning_rate": 9.81818181818182e-06, |
|
"loss": 0.7255, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 14.909090909090908, |
|
"grad_norm": 0.39052486419677734, |
|
"learning_rate": 9.939393939393939e-06, |
|
"loss": 0.7452, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_loss": 0.7604131698608398, |
|
"eval_runtime": 24.3513, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 15.090909090909092, |
|
"grad_norm": 0.31109246611595154, |
|
"learning_rate": 9.999988811118232e-06, |
|
"loss": 0.6938, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 15.272727272727273, |
|
"grad_norm": 0.3080443739891052, |
|
"learning_rate": 9.999899300364534e-06, |
|
"loss": 0.6979, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 15.454545454545455, |
|
"grad_norm": 0.37110060453414917, |
|
"learning_rate": 9.999720280459576e-06, |
|
"loss": 0.7866, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 15.636363636363637, |
|
"grad_norm": 0.9138216972351074, |
|
"learning_rate": 9.999451754608208e-06, |
|
"loss": 0.683, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 15.818181818181818, |
|
"grad_norm": 0.37108689546585083, |
|
"learning_rate": 9.99909372761763e-06, |
|
"loss": 0.7214, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"grad_norm": 0.32667478919029236, |
|
"learning_rate": 9.99864620589731e-06, |
|
"loss": 0.6818, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 0.7484579086303711, |
|
"eval_runtime": 24.3469, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 16.181818181818183, |
|
"grad_norm": 0.32395023107528687, |
|
"learning_rate": 9.998109197458865e-06, |
|
"loss": 0.6817, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 16.363636363636363, |
|
"grad_norm": 0.33770957589149475, |
|
"learning_rate": 9.997482711915926e-06, |
|
"loss": 0.7282, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 16.545454545454547, |
|
"grad_norm": 0.3693891763687134, |
|
"learning_rate": 9.996766760483955e-06, |
|
"loss": 0.6857, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 16.727272727272727, |
|
"grad_norm": 0.32818594574928284, |
|
"learning_rate": 9.995961355980052e-06, |
|
"loss": 0.653, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 16.90909090909091, |
|
"grad_norm": 0.3677254915237427, |
|
"learning_rate": 9.99506651282272e-06, |
|
"loss": 0.7119, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_loss": 0.7387299537658691, |
|
"eval_runtime": 24.3449, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 17.09090909090909, |
|
"grad_norm": 0.33137425780296326, |
|
"learning_rate": 9.994082247031613e-06, |
|
"loss": 0.6706, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 17.272727272727273, |
|
"grad_norm": 0.34344998002052307, |
|
"learning_rate": 9.993008576227248e-06, |
|
"loss": 0.656, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 17.454545454545453, |
|
"grad_norm": 0.4169725775718689, |
|
"learning_rate": 9.991845519630679e-06, |
|
"loss": 0.6647, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 17.636363636363637, |
|
"grad_norm": 0.4314097762107849, |
|
"learning_rate": 9.99059309806317e-06, |
|
"loss": 0.6885, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 17.818181818181817, |
|
"grad_norm": 0.3693665862083435, |
|
"learning_rate": 9.989251333945813e-06, |
|
"loss": 0.6567, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"grad_norm": 0.42939141392707825, |
|
"learning_rate": 9.987820251299121e-06, |
|
"loss": 0.7107, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_loss": 0.7306709289550781, |
|
"eval_runtime": 24.3524, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 18.181818181818183, |
|
"grad_norm": 0.3910067677497864, |
|
"learning_rate": 9.986299875742612e-06, |
|
"loss": 0.6819, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 18.363636363636363, |
|
"grad_norm": 0.3679266571998596, |
|
"learning_rate": 9.984690234494338e-06, |
|
"loss": 0.6852, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 18.545454545454547, |
|
"grad_norm": 0.36058109998703003, |
|
"learning_rate": 9.982991356370404e-06, |
|
"loss": 0.6231, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 18.727272727272727, |
|
"grad_norm": 0.4211237132549286, |
|
"learning_rate": 9.98120327178445e-06, |
|
"loss": 0.6758, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 18.90909090909091, |
|
"grad_norm": 0.3913659155368805, |
|
"learning_rate": 9.979326012747106e-06, |
|
"loss": 0.6405, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_loss": 0.7237685322761536, |
|
"eval_runtime": 24.3475, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 19.09090909090909, |
|
"grad_norm": 0.3846379816532135, |
|
"learning_rate": 9.977359612865424e-06, |
|
"loss": 0.5929, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 19.272727272727273, |
|
"grad_norm": 0.44087332487106323, |
|
"learning_rate": 9.975304107342268e-06, |
|
"loss": 0.627, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 19.454545454545453, |
|
"grad_norm": 0.467111200094223, |
|
"learning_rate": 9.973159532975691e-06, |
|
"loss": 0.6858, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 19.636363636363637, |
|
"grad_norm": 0.43696919083595276, |
|
"learning_rate": 9.970925928158275e-06, |
|
"loss": 0.6414, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 19.818181818181817, |
|
"grad_norm": 0.4129337966442108, |
|
"learning_rate": 9.968603332876435e-06, |
|
"loss": 0.6621, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 0.4260455369949341, |
|
"learning_rate": 9.966191788709716e-06, |
|
"loss": 0.6075, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 0.7188207507133484, |
|
"eval_runtime": 24.3505, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 20.181818181818183, |
|
"grad_norm": 0.4215308725833893, |
|
"learning_rate": 9.963691338830045e-06, |
|
"loss": 0.611, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 20.363636363636363, |
|
"grad_norm": 0.4364253282546997, |
|
"learning_rate": 9.961102028000948e-06, |
|
"loss": 0.5843, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 20.545454545454547, |
|
"grad_norm": 0.45465582609176636, |
|
"learning_rate": 9.958423902576764e-06, |
|
"loss": 0.6556, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 20.727272727272727, |
|
"grad_norm": 0.43474891781806946, |
|
"learning_rate": 9.955657010501807e-06, |
|
"loss": 0.6197, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 20.90909090909091, |
|
"grad_norm": 0.4751872420310974, |
|
"learning_rate": 9.952801401309504e-06, |
|
"loss": 0.6323, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"eval_loss": 0.7152169346809387, |
|
"eval_runtime": 24.3508, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 21.09090909090909, |
|
"grad_norm": 0.4626959264278412, |
|
"learning_rate": 9.949857126121519e-06, |
|
"loss": 0.624, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 21.272727272727273, |
|
"grad_norm": 0.45061811804771423, |
|
"learning_rate": 9.946824237646823e-06, |
|
"loss": 0.6272, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 21.454545454545453, |
|
"grad_norm": 0.4539961814880371, |
|
"learning_rate": 9.94370279018077e-06, |
|
"loss": 0.6137, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 21.636363636363637, |
|
"grad_norm": 0.5059633255004883, |
|
"learning_rate": 9.940492839604103e-06, |
|
"loss": 0.5831, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 21.818181818181817, |
|
"grad_norm": 0.46466246247291565, |
|
"learning_rate": 9.937194443381972e-06, |
|
"loss": 0.6131, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"grad_norm": 0.5081361532211304, |
|
"learning_rate": 9.933807660562898e-06, |
|
"loss": 0.557, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"eval_loss": 0.7138778567314148, |
|
"eval_runtime": 24.3506, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 22.181818181818183, |
|
"grad_norm": 0.4696747064590454, |
|
"learning_rate": 9.930332551777709e-06, |
|
"loss": 0.5765, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 22.363636363636363, |
|
"grad_norm": 0.4927414655685425, |
|
"learning_rate": 9.926769179238467e-06, |
|
"loss": 0.6057, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 22.545454545454547, |
|
"grad_norm": 0.5349259376525879, |
|
"learning_rate": 9.923117606737347e-06, |
|
"loss": 0.6059, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 22.727272727272727, |
|
"grad_norm": 0.5289928317070007, |
|
"learning_rate": 9.919377899645497e-06, |
|
"loss": 0.5736, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 22.90909090909091, |
|
"grad_norm": 0.5489732027053833, |
|
"learning_rate": 9.915550124911866e-06, |
|
"loss": 0.5692, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"eval_loss": 0.7158213257789612, |
|
"eval_runtime": 24.36, |
|
"eval_samples_per_second": 0.985, |
|
"eval_steps_per_second": 0.985, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 23.09090909090909, |
|
"grad_norm": 0.5354622602462769, |
|
"learning_rate": 9.91163435106201e-06, |
|
"loss": 0.5341, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 23.272727272727273, |
|
"grad_norm": 0.488086074590683, |
|
"learning_rate": 9.907630648196857e-06, |
|
"loss": 0.593, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 23.454545454545453, |
|
"grad_norm": 0.545102059841156, |
|
"learning_rate": 9.903539087991462e-06, |
|
"loss": 0.568, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 23.636363636363637, |
|
"grad_norm": 0.5522411465644836, |
|
"learning_rate": 9.899359743693715e-06, |
|
"loss": 0.5498, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 23.818181818181817, |
|
"grad_norm": 0.6323682069778442, |
|
"learning_rate": 9.895092690123036e-06, |
|
"loss": 0.5401, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"grad_norm": 0.5789000988006592, |
|
"learning_rate": 9.890738003669029e-06, |
|
"loss": 0.558, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_loss": 0.719824492931366, |
|
"eval_runtime": 24.3548, |
|
"eval_samples_per_second": 0.985, |
|
"eval_steps_per_second": 0.985, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 24.181818181818183, |
|
"grad_norm": 0.5536386966705322, |
|
"learning_rate": 9.886295762290125e-06, |
|
"loss": 0.5058, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 24.363636363636363, |
|
"grad_norm": 0.6637153029441833, |
|
"learning_rate": 9.881766045512176e-06, |
|
"loss": 0.5807, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 24.545454545454547, |
|
"grad_norm": 0.619708240032196, |
|
"learning_rate": 9.877148934427037e-06, |
|
"loss": 0.5592, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 24.727272727272727, |
|
"grad_norm": 0.6658281087875366, |
|
"learning_rate": 9.872444511691108e-06, |
|
"loss": 0.5266, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 24.90909090909091, |
|
"grad_norm": 0.6834419369697571, |
|
"learning_rate": 9.867652861523866e-06, |
|
"loss": 0.5153, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"eval_loss": 0.7296044230461121, |
|
"eval_runtime": 24.3588, |
|
"eval_samples_per_second": 0.985, |
|
"eval_steps_per_second": 0.985, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 25.09090909090909, |
|
"grad_norm": 0.667942225933075, |
|
"learning_rate": 9.862774069706346e-06, |
|
"loss": 0.4993, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 25.272727272727273, |
|
"grad_norm": 0.6705055236816406, |
|
"learning_rate": 9.85780822357961e-06, |
|
"loss": 0.5251, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 25.454545454545453, |
|
"grad_norm": 0.7730400562286377, |
|
"learning_rate": 9.85275541204318e-06, |
|
"loss": 0.5414, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 25.636363636363637, |
|
"grad_norm": 0.6594474911689758, |
|
"learning_rate": 9.847615725553457e-06, |
|
"loss": 0.5029, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 25.818181818181817, |
|
"grad_norm": 0.6896001696586609, |
|
"learning_rate": 9.842389256122086e-06, |
|
"loss": 0.4927, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"grad_norm": 0.714022159576416, |
|
"learning_rate": 9.83707609731432e-06, |
|
"loss": 0.4964, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_loss": 0.7366821765899658, |
|
"eval_runtime": 24.3662, |
|
"eval_samples_per_second": 0.985, |
|
"eval_steps_per_second": 0.985, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 26.181818181818183, |
|
"grad_norm": 0.8182728886604309, |
|
"learning_rate": 9.831676344247343e-06, |
|
"loss": 0.4601, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 26.363636363636363, |
|
"grad_norm": 0.8259897828102112, |
|
"learning_rate": 9.826190093588564e-06, |
|
"loss": 0.5021, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 26.545454545454547, |
|
"grad_norm": 0.8739567995071411, |
|
"learning_rate": 9.820617443553889e-06, |
|
"loss": 0.5133, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 26.727272727272727, |
|
"grad_norm": 0.798852801322937, |
|
"learning_rate": 9.814958493905962e-06, |
|
"loss": 0.4688, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 26.90909090909091, |
|
"grad_norm": 0.7111189365386963, |
|
"learning_rate": 9.80921334595238e-06, |
|
"loss": 0.4713, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"eval_loss": 0.7402606010437012, |
|
"eval_runtime": 24.3554, |
|
"eval_samples_per_second": 0.985, |
|
"eval_steps_per_second": 0.985, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 27.09090909090909, |
|
"grad_norm": 0.7679806351661682, |
|
"learning_rate": 9.80338210254388e-06, |
|
"loss": 0.4414, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 27.272727272727273, |
|
"grad_norm": 0.7741813659667969, |
|
"learning_rate": 9.797464868072489e-06, |
|
"loss": 0.4424, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 27.454545454545453, |
|
"grad_norm": 0.7848456501960754, |
|
"learning_rate": 9.791461748469669e-06, |
|
"loss": 0.461, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 27.636363636363637, |
|
"grad_norm": 0.9988805055618286, |
|
"learning_rate": 9.785372851204415e-06, |
|
"loss": 0.4594, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 27.818181818181817, |
|
"grad_norm": 1.017672061920166, |
|
"learning_rate": 9.779198285281326e-06, |
|
"loss": 0.4656, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"grad_norm": 0.8862157464027405, |
|
"learning_rate": 9.77293816123866e-06, |
|
"loss": 0.4144, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_loss": 0.7620039582252502, |
|
"eval_runtime": 24.3481, |
|
"eval_samples_per_second": 0.986, |
|
"eval_steps_per_second": 0.986, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 28.181818181818183, |
|
"grad_norm": 1.078522801399231, |
|
"learning_rate": 9.766592591146353e-06, |
|
"loss": 0.4126, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 28.363636363636363, |
|
"grad_norm": 0.8740026354789734, |
|
"learning_rate": 9.760161688604008e-06, |
|
"loss": 0.4104, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 28.545454545454547, |
|
"grad_norm": 1.2476239204406738, |
|
"learning_rate": 9.753645568738872e-06, |
|
"loss": 0.4275, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 28.727272727272727, |
|
"grad_norm": 1.1232469081878662, |
|
"learning_rate": 9.747044348203766e-06, |
|
"loss": 0.4, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 28.90909090909091, |
|
"grad_norm": 0.9635869860649109, |
|
"learning_rate": 9.740358145174999e-06, |
|
"loss": 0.4184, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"eval_loss": 0.7954289317131042, |
|
"eval_runtime": 24.3558, |
|
"eval_samples_per_second": 0.985, |
|
"eval_steps_per_second": 0.985, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"step": 319, |
|
"total_flos": 2.4257124139375e+17, |
|
"train_loss": 0.757462413613699, |
|
"train_runtime": 8705.8997, |
|
"train_samples_per_second": 1.516, |
|
"train_steps_per_second": 0.19 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 1650, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 150, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 7, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.4257124139375e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|