|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.124463519313305, |
|
"eval_steps": 50, |
|
"global_step": 44, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06866952789699571, |
|
"grad_norm": 71.51506042480469, |
|
"learning_rate": 5e-06, |
|
"loss": 28.3366, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06866952789699571, |
|
"eval_loss": 1.670872449874878, |
|
"eval_runtime": 6.2032, |
|
"eval_samples_per_second": 7.899, |
|
"eval_steps_per_second": 2.096, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.13733905579399142, |
|
"grad_norm": 68.20951843261719, |
|
"learning_rate": 1e-05, |
|
"loss": 25.6432, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.20600858369098712, |
|
"grad_norm": 80.32080078125, |
|
"learning_rate": 1.5e-05, |
|
"loss": 28.4214, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.27467811158798283, |
|
"grad_norm": 68.32494354248047, |
|
"learning_rate": 2e-05, |
|
"loss": 26.3899, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.34334763948497854, |
|
"grad_norm": 53.57563781738281, |
|
"learning_rate": 2.5e-05, |
|
"loss": 24.8964, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.41201716738197425, |
|
"grad_norm": 41.535865783691406, |
|
"learning_rate": 3e-05, |
|
"loss": 22.6295, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.48068669527896996, |
|
"grad_norm": 35.4793701171875, |
|
"learning_rate": 3.5e-05, |
|
"loss": 21.0718, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5493562231759657, |
|
"grad_norm": 27.92133140563965, |
|
"learning_rate": 4e-05, |
|
"loss": 17.8598, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.6180257510729614, |
|
"grad_norm": 23.524612426757812, |
|
"learning_rate": 4.5e-05, |
|
"loss": 15.9413, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.6866952789699571, |
|
"grad_norm": 21.849180221557617, |
|
"learning_rate": 5e-05, |
|
"loss": 14.0453, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.7553648068669528, |
|
"grad_norm": 22.79987907409668, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 12.4121, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.8240343347639485, |
|
"grad_norm": 28.22759246826172, |
|
"learning_rate": 6e-05, |
|
"loss": 11.8626, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.8927038626609443, |
|
"grad_norm": 27.044172286987305, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 10.725, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.9613733905579399, |
|
"grad_norm": 31.80463409423828, |
|
"learning_rate": 7e-05, |
|
"loss": 11.4271, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.0643776824034334, |
|
"grad_norm": 23.66387367248535, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 9.8895, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.1330472103004292, |
|
"grad_norm": 18.42772674560547, |
|
"learning_rate": 8e-05, |
|
"loss": 7.9848, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.201716738197425, |
|
"grad_norm": 16.588830947875977, |
|
"learning_rate": 8.5e-05, |
|
"loss": 7.3708, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.2703862660944205, |
|
"grad_norm": 18.946819305419922, |
|
"learning_rate": 9e-05, |
|
"loss": 6.2703, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.3390557939914163, |
|
"grad_norm": 14.745619773864746, |
|
"learning_rate": 9.5e-05, |
|
"loss": 6.0982, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.407725321888412, |
|
"grad_norm": 23.77857208251953, |
|
"learning_rate": 0.0001, |
|
"loss": 8.2525, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.4763948497854078, |
|
"grad_norm": 17.072463989257812, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 5.8517, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.5450643776824036, |
|
"grad_norm": 19.66710090637207, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 6.3888, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.613733905579399, |
|
"grad_norm": 14.107242584228516, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 5.4112, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.6824034334763949, |
|
"grad_norm": 13.895885467529297, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 4.5546, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.7510729613733904, |
|
"grad_norm": 21.809589385986328, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 6.8744, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.8197424892703862, |
|
"grad_norm": 14.282073974609375, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 4.5111, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.888412017167382, |
|
"grad_norm": 18.04335594177246, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 4.8618, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.9570815450643777, |
|
"grad_norm": 16.261669158935547, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 4.7066, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.060085836909871, |
|
"grad_norm": 14.687187194824219, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 4.5166, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.128755364806867, |
|
"grad_norm": 13.488051414489746, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 3.9256, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.1974248927038627, |
|
"grad_norm": 13.153692245483398, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 3.1638, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.2660944206008584, |
|
"grad_norm": 9.933245658874512, |
|
"learning_rate": 5e-05, |
|
"loss": 2.767, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.334763948497854, |
|
"grad_norm": 10.760828971862793, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 3.6228, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.40343347639485, |
|
"grad_norm": 12.465494155883789, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 4.5534, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.4721030042918457, |
|
"grad_norm": 9.80876636505127, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 2.7875, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.540772532188841, |
|
"grad_norm": 10.498035430908203, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.8723, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.609442060085837, |
|
"grad_norm": 14.041341781616211, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 4.0086, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.6781115879828326, |
|
"grad_norm": 11.766754150390625, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 3.3765, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.7467811158798283, |
|
"grad_norm": 9.810280799865723, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 3.177, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.815450643776824, |
|
"grad_norm": 10.37423038482666, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 3.3284, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.88412017167382, |
|
"grad_norm": 10.795289993286133, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 2.8375, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.9527896995708156, |
|
"grad_norm": 12.411961555480957, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 3.6438, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 3.055793991416309, |
|
"grad_norm": 11.045405387878418, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 3.1393, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 3.124463519313305, |
|
"grad_norm": 10.357391357421875, |
|
"learning_rate": 0.0, |
|
"loss": 3.022, |
|
"step": 44 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 44, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.3416725626997965e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|