|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.102880658436214, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00102880658436214, |
|
"grad_norm": 0.025219934061169624, |
|
"learning_rate": 1e-05, |
|
"loss": 10.3752, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00102880658436214, |
|
"eval_loss": 10.375638961791992, |
|
"eval_runtime": 3.4609, |
|
"eval_samples_per_second": 236.643, |
|
"eval_steps_per_second": 29.761, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00205761316872428, |
|
"grad_norm": 0.02738805301487446, |
|
"learning_rate": 2e-05, |
|
"loss": 10.377, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0030864197530864196, |
|
"grad_norm": 0.026469284668564796, |
|
"learning_rate": 3e-05, |
|
"loss": 10.3742, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00411522633744856, |
|
"grad_norm": 0.02698007971048355, |
|
"learning_rate": 4e-05, |
|
"loss": 10.3783, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0051440329218107, |
|
"grad_norm": 0.02839619107544422, |
|
"learning_rate": 5e-05, |
|
"loss": 10.374, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.006172839506172839, |
|
"grad_norm": 0.029380450025200844, |
|
"learning_rate": 6e-05, |
|
"loss": 10.3777, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00720164609053498, |
|
"grad_norm": 0.024871516972780228, |
|
"learning_rate": 7e-05, |
|
"loss": 10.3754, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.00823045267489712, |
|
"grad_norm": 0.028594117611646652, |
|
"learning_rate": 8e-05, |
|
"loss": 10.3687, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.009259259259259259, |
|
"grad_norm": 0.02646900713443756, |
|
"learning_rate": 9e-05, |
|
"loss": 10.3764, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.009259259259259259, |
|
"eval_loss": 10.375298500061035, |
|
"eval_runtime": 4.7508, |
|
"eval_samples_per_second": 172.393, |
|
"eval_steps_per_second": 21.681, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0102880658436214, |
|
"grad_norm": 0.030630815774202347, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3748, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01131687242798354, |
|
"grad_norm": 0.02231612056493759, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 10.3753, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.012345679012345678, |
|
"grad_norm": 0.03500870242714882, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 10.3749, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.013374485596707819, |
|
"grad_norm": 0.03169140964746475, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 10.3728, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.01440329218106996, |
|
"grad_norm": 0.033007316291332245, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 10.3752, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.015432098765432098, |
|
"grad_norm": 0.027678297832608223, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 10.3716, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.01646090534979424, |
|
"grad_norm": 0.028717216104269028, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 10.3719, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.01748971193415638, |
|
"grad_norm": 0.024401096627116203, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 10.3713, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.018518518518518517, |
|
"grad_norm": 0.03191472589969635, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 10.379, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.018518518518518517, |
|
"eval_loss": 10.374419212341309, |
|
"eval_runtime": 4.6532, |
|
"eval_samples_per_second": 176.007, |
|
"eval_steps_per_second": 22.135, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01954732510288066, |
|
"grad_norm": 0.026906833052635193, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 10.3749, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0205761316872428, |
|
"grad_norm": 0.024135004729032516, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 10.3714, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.021604938271604937, |
|
"grad_norm": 0.030134005472064018, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 10.3771, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.02263374485596708, |
|
"grad_norm": 0.02962920069694519, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 10.3719, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.023662551440329218, |
|
"grad_norm": 0.02854999154806137, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 10.373, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.024691358024691357, |
|
"grad_norm": 0.026968492195010185, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 10.3742, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0257201646090535, |
|
"grad_norm": 0.03159381076693535, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 10.374, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.026748971193415638, |
|
"grad_norm": 0.028679387643933296, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 10.3742, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.027777777777777776, |
|
"grad_norm": 0.027246227487921715, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 10.373, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.027777777777777776, |
|
"eval_loss": 10.373526573181152, |
|
"eval_runtime": 4.4408, |
|
"eval_samples_per_second": 184.426, |
|
"eval_steps_per_second": 23.194, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.02880658436213992, |
|
"grad_norm": 0.02638566493988037, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 10.3752, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.029835390946502057, |
|
"grad_norm": 0.029429133981466293, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 10.3752, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.030864197530864196, |
|
"grad_norm": 0.028540797531604767, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 10.3752, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03189300411522634, |
|
"grad_norm": 0.026979394257068634, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 10.3744, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.03292181069958848, |
|
"grad_norm": 0.03402787446975708, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 10.3702, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.033950617283950615, |
|
"grad_norm": 0.03517807647585869, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 10.3777, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.03497942386831276, |
|
"grad_norm": 0.028791263699531555, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 10.37, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0360082304526749, |
|
"grad_norm": 0.027155417948961258, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 10.374, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.037037037037037035, |
|
"grad_norm": 0.033103760331869125, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 10.3683, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.037037037037037035, |
|
"eval_loss": 10.372629165649414, |
|
"eval_runtime": 4.6687, |
|
"eval_samples_per_second": 175.422, |
|
"eval_steps_per_second": 22.062, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03806584362139918, |
|
"grad_norm": 0.03502420708537102, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 10.371, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.03909465020576132, |
|
"grad_norm": 0.03081183321774006, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 10.3732, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.040123456790123455, |
|
"grad_norm": 0.02784951776266098, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 10.3758, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0411522633744856, |
|
"grad_norm": 0.03854544833302498, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 10.3658, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04218106995884774, |
|
"grad_norm": 0.027156542986631393, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 10.3731, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.043209876543209874, |
|
"grad_norm": 0.03118831478059292, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 10.3659, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.044238683127572016, |
|
"grad_norm": 0.03406394273042679, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 10.3682, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.04526748971193416, |
|
"grad_norm": 0.03691929578781128, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 10.3734, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.046296296296296294, |
|
"grad_norm": 0.03526577353477478, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 10.3724, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.046296296296296294, |
|
"eval_loss": 10.37172794342041, |
|
"eval_runtime": 4.6525, |
|
"eval_samples_per_second": 176.035, |
|
"eval_steps_per_second": 22.139, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.047325102880658436, |
|
"grad_norm": 0.038986314088106155, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 10.3734, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.04835390946502058, |
|
"grad_norm": 0.033386219292879105, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 10.3747, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.04938271604938271, |
|
"grad_norm": 0.03682396188378334, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 10.3702, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.050411522633744855, |
|
"grad_norm": 0.035510748624801636, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 10.3708, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.051440329218107, |
|
"grad_norm": 0.02855323627591133, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 10.3743, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05246913580246913, |
|
"grad_norm": 0.033705513924360275, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 10.3749, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.053497942386831275, |
|
"grad_norm": 0.03999943658709526, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 10.3671, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.05452674897119342, |
|
"grad_norm": 0.043624721467494965, |
|
"learning_rate": 5.348782368720626e-05, |
|
"loss": 10.3708, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.05555555555555555, |
|
"grad_norm": 0.036783572286367416, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 10.3684, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.05555555555555555, |
|
"eval_loss": 10.370903015136719, |
|
"eval_runtime": 4.3048, |
|
"eval_samples_per_second": 190.254, |
|
"eval_steps_per_second": 23.927, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.056584362139917695, |
|
"grad_norm": 0.036852557212114334, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3715, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.05761316872427984, |
|
"grad_norm": 0.036059990525245667, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 10.3685, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.05864197530864197, |
|
"grad_norm": 0.04546878859400749, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 10.3685, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.059670781893004114, |
|
"grad_norm": 0.045334216207265854, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 10.3677, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.060699588477366256, |
|
"grad_norm": 0.046929437667131424, |
|
"learning_rate": 4.3041344951996746e-05, |
|
"loss": 10.3717, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.06172839506172839, |
|
"grad_norm": 0.041340090334415436, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 10.3723, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.06275720164609054, |
|
"grad_norm": 0.042488716542720795, |
|
"learning_rate": 3.960441545911204e-05, |
|
"loss": 10.3692, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.06378600823045268, |
|
"grad_norm": 0.03854437917470932, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 10.3682, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.06481481481481481, |
|
"grad_norm": 0.03789610043168068, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 10.3731, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.06481481481481481, |
|
"eval_loss": 10.370210647583008, |
|
"eval_runtime": 4.6613, |
|
"eval_samples_per_second": 175.703, |
|
"eval_steps_per_second": 22.097, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.06584362139917696, |
|
"grad_norm": 0.044738225638866425, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 10.3694, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.0668724279835391, |
|
"grad_norm": 0.040270205587148666, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 10.3669, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.06790123456790123, |
|
"grad_norm": 0.04272771254181862, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 10.3651, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.06893004115226338, |
|
"grad_norm": 0.043519698083400726, |
|
"learning_rate": 2.9663167846209998e-05, |
|
"loss": 10.3693, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.06995884773662552, |
|
"grad_norm": 0.03874226659536362, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 10.3727, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.07098765432098765, |
|
"grad_norm": 0.05196073278784752, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 10.3709, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.0720164609053498, |
|
"grad_norm": 0.04321382939815521, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 10.3718, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.07304526748971193, |
|
"grad_norm": 0.046473391354084015, |
|
"learning_rate": 2.350403678833976e-05, |
|
"loss": 10.3715, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.07407407407407407, |
|
"grad_norm": 0.04274188354611397, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 10.3679, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.07407407407407407, |
|
"eval_loss": 10.369702339172363, |
|
"eval_runtime": 4.6514, |
|
"eval_samples_per_second": 176.078, |
|
"eval_steps_per_second": 22.144, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.07510288065843622, |
|
"grad_norm": 0.03789234161376953, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 10.3722, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.07613168724279835, |
|
"grad_norm": 0.04123846814036369, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 10.3666, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.07716049382716049, |
|
"grad_norm": 0.043930958956480026, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 10.3657, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.07818930041152264, |
|
"grad_norm": 0.04726821556687355, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 10.3692, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.07921810699588477, |
|
"grad_norm": 0.04689066484570503, |
|
"learning_rate": 1.526708147705013e-05, |
|
"loss": 10.3695, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.08024691358024691, |
|
"grad_norm": 0.05402266979217529, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 10.37, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.08127572016460906, |
|
"grad_norm": 0.04493223875761032, |
|
"learning_rate": 1.2842758726130283e-05, |
|
"loss": 10.3684, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.0823045267489712, |
|
"grad_norm": 0.058183249086141586, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 10.3659, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.08333333333333333, |
|
"grad_norm": 0.04858890920877457, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 10.3681, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.08333333333333333, |
|
"eval_loss": 10.369402885437012, |
|
"eval_runtime": 4.4593, |
|
"eval_samples_per_second": 183.661, |
|
"eval_steps_per_second": 23.098, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.08436213991769548, |
|
"grad_norm": 0.04888492077589035, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 10.3731, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.08539094650205761, |
|
"grad_norm": 0.04856285825371742, |
|
"learning_rate": 8.548121372247918e-06, |
|
"loss": 10.3696, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.08641975308641975, |
|
"grad_norm": 0.053031668066978455, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 10.371, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.0874485596707819, |
|
"grad_norm": 0.04873763024806976, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 10.3726, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.08847736625514403, |
|
"grad_norm": 0.06068224832415581, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 10.3666, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.08950617283950617, |
|
"grad_norm": 0.05208251252770424, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 10.3692, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.09053497942386832, |
|
"grad_norm": 0.044392600655555725, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 10.369, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.09156378600823045, |
|
"grad_norm": 0.05065758153796196, |
|
"learning_rate": 3.6408072716606346e-06, |
|
"loss": 10.3688, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.09259259259259259, |
|
"grad_norm": 0.04816662147641182, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 10.3699, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.09259259259259259, |
|
"eval_loss": 10.36927318572998, |
|
"eval_runtime": 4.7104, |
|
"eval_samples_per_second": 173.872, |
|
"eval_steps_per_second": 21.867, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.09362139917695474, |
|
"grad_norm": 0.040740616619586945, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 10.3726, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.09465020576131687, |
|
"grad_norm": 0.04884503409266472, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 10.3637, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.09567901234567901, |
|
"grad_norm": 0.045312654227018356, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 10.3682, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.09670781893004116, |
|
"grad_norm": 0.03917687013745308, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 10.3687, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.09773662551440329, |
|
"grad_norm": 0.04869252070784569, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 10.3677, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.09876543209876543, |
|
"grad_norm": 0.03939636051654816, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 10.3654, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.09979423868312758, |
|
"grad_norm": 0.04943367838859558, |
|
"learning_rate": 2.7390523158633554e-07, |
|
"loss": 10.366, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.10082304526748971, |
|
"grad_norm": 0.04062531515955925, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 10.3703, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.10185185185185185, |
|
"grad_norm": 0.04285578057169914, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 10.3704, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.10185185185185185, |
|
"eval_loss": 10.369248390197754, |
|
"eval_runtime": 4.5585, |
|
"eval_samples_per_second": 179.666, |
|
"eval_steps_per_second": 22.595, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.102880658436214, |
|
"grad_norm": 0.04049593210220337, |
|
"learning_rate": 0.0, |
|
"loss": 10.3727, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5230244659200.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|