|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.36977120406748326, |
|
"eval_steps": 13, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.007395424081349665, |
|
"grad_norm": 507.5455017089844, |
|
"learning_rate": 5e-05, |
|
"loss": 119.865, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.007395424081349665, |
|
"eval_loss": 3.9573512077331543, |
|
"eval_runtime": 81.2876, |
|
"eval_samples_per_second": 11.207, |
|
"eval_steps_per_second": 2.805, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01479084816269933, |
|
"grad_norm": 472.2460632324219, |
|
"learning_rate": 0.0001, |
|
"loss": 123.5993, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.022186272244048996, |
|
"grad_norm": 214.4851531982422, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 120.4008, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02958169632539866, |
|
"grad_norm": 170.87863159179688, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 114.044, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.036977120406748325, |
|
"grad_norm": 318.2630920410156, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 109.6968, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04437254448809799, |
|
"grad_norm": 166.34352111816406, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 105.4008, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.05176796856944765, |
|
"grad_norm": 97.02327728271484, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 95.6925, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.05916339265079732, |
|
"grad_norm": 103.01248931884766, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 92.4355, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.06655881673214699, |
|
"grad_norm": 115.37718200683594, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 97.7696, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.07395424081349665, |
|
"grad_norm": 74.1399917602539, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 88.485, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08134966489484631, |
|
"grad_norm": 107.24742126464844, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 90.4529, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.08874508897619598, |
|
"grad_norm": 102.1383285522461, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 88.3525, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.09614051305754565, |
|
"grad_norm": 57.68673324584961, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 85.8088, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.09614051305754565, |
|
"eval_loss": 2.8494491577148438, |
|
"eval_runtime": 81.3898, |
|
"eval_samples_per_second": 11.193, |
|
"eval_steps_per_second": 2.801, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.1035359371388953, |
|
"grad_norm": 69.56649017333984, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 86.8469, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.11093136122024497, |
|
"grad_norm": 67.40845489501953, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 88.4199, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.11832678530159464, |
|
"grad_norm": 70.2257308959961, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 91.2391, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.12572220938294432, |
|
"grad_norm": 46.68100357055664, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 87.3729, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.13311763346429398, |
|
"grad_norm": 102.54607391357422, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 82.5651, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.14051305754564364, |
|
"grad_norm": 116.23051452636719, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 93.5197, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.1479084816269933, |
|
"grad_norm": 65.93101501464844, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 82.7672, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.15530390570834296, |
|
"grad_norm": 57.98164749145508, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 85.3329, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.16269932978969262, |
|
"grad_norm": 60.674495697021484, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 84.3847, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.17009475387104228, |
|
"grad_norm": 72.81175231933594, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 88.0836, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.17749017795239197, |
|
"grad_norm": 57.464141845703125, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 83.9486, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.18488560203374163, |
|
"grad_norm": 41.056941986083984, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 79.0182, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1922810261150913, |
|
"grad_norm": 55.79506301879883, |
|
"learning_rate": 5e-05, |
|
"loss": 85.1922, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.1922810261150913, |
|
"eval_loss": 2.7002317905426025, |
|
"eval_runtime": 81.3945, |
|
"eval_samples_per_second": 11.192, |
|
"eval_steps_per_second": 2.801, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.19967645019644095, |
|
"grad_norm": 56.23450469970703, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 85.5899, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2070718742777906, |
|
"grad_norm": 55.729068756103516, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 84.6372, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.21446729835914027, |
|
"grad_norm": 44.94795608520508, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 81.2659, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.22186272244048993, |
|
"grad_norm": 43.78959274291992, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 84.2274, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.22925814652183962, |
|
"grad_norm": 40.38026809692383, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 80.1799, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.23665357060318928, |
|
"grad_norm": 39.87312698364258, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 80.2665, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.24404899468453894, |
|
"grad_norm": 42.08469772338867, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 81.3834, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.25144441876588863, |
|
"grad_norm": 40.6140251159668, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 80.8223, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.25883984284723827, |
|
"grad_norm": 52.44778823852539, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 85.2504, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.26623526692858795, |
|
"grad_norm": 44.90240478515625, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 80.1232, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2736306910099376, |
|
"grad_norm": 44.05722427368164, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 85.218, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.2810261150912873, |
|
"grad_norm": 34.475032806396484, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 74.1421, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.2884215391726369, |
|
"grad_norm": 50.83314514160156, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 85.2003, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.2884215391726369, |
|
"eval_loss": 2.658181667327881, |
|
"eval_runtime": 81.3831, |
|
"eval_samples_per_second": 11.194, |
|
"eval_steps_per_second": 2.802, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.2958169632539866, |
|
"grad_norm": 40.7790412902832, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 80.1685, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3032123873353363, |
|
"grad_norm": 44.15685272216797, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 83.519, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3106078114166859, |
|
"grad_norm": 33.223167419433594, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 74.8312, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.3180032354980356, |
|
"grad_norm": 42.13229751586914, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 79.9294, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.32539865957938524, |
|
"grad_norm": 36.99294662475586, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 78.9502, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.33279408366073493, |
|
"grad_norm": 40.4399528503418, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 82.6471, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.34018950774208456, |
|
"grad_norm": 40.24978256225586, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 82.7816, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.34758493182343425, |
|
"grad_norm": 42.67814254760742, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 80.2397, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.35498035590478394, |
|
"grad_norm": 37.027217864990234, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 80.1654, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3623757799861336, |
|
"grad_norm": 44.58671951293945, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 79.761, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.36977120406748326, |
|
"grad_norm": 44.38752746582031, |
|
"learning_rate": 0.0, |
|
"loss": 76.1375, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.1250084863803392e+18, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|