|
{ |
|
"best_metric": 11.935344696044922, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.013632568205442803, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0002726513641088561, |
|
"grad_norm": 0.06683540344238281, |
|
"learning_rate": 5e-05, |
|
"loss": 11.9353, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0002726513641088561, |
|
"eval_loss": 11.939288139343262, |
|
"eval_runtime": 105.067, |
|
"eval_samples_per_second": 235.174, |
|
"eval_steps_per_second": 29.4, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005453027282177122, |
|
"grad_norm": 0.05441287159919739, |
|
"learning_rate": 0.0001, |
|
"loss": 11.9452, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0008179540923265681, |
|
"grad_norm": 0.0542253702878952, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 11.948, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0010906054564354243, |
|
"grad_norm": 0.05546192079782486, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 11.9359, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0013632568205442803, |
|
"grad_norm": 0.04851478710770607, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 11.9374, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0016359081846531363, |
|
"grad_norm": 0.08121281117200851, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 11.934, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0019085595487619924, |
|
"grad_norm": 0.05987578630447388, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 11.9413, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0021812109128708486, |
|
"grad_norm": 0.053494490683078766, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 11.9387, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0024538622769797046, |
|
"grad_norm": 0.056421395391225815, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 11.9448, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0027265136410885606, |
|
"grad_norm": 0.06969709694385529, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 11.9377, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0029991650051974165, |
|
"grad_norm": 0.07090578228235245, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 11.9343, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0032718163693062725, |
|
"grad_norm": 0.08282779157161713, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 11.937, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.003544467733415129, |
|
"grad_norm": 0.06570498645305634, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 11.9308, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.003817119097523985, |
|
"grad_norm": 0.0746464878320694, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 11.9384, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.004089770461632841, |
|
"grad_norm": 0.06106943264603615, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 11.945, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.004362421825741697, |
|
"grad_norm": 0.0641741156578064, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 11.9439, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.004635073189850553, |
|
"grad_norm": 0.0463690385222435, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 11.9327, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.004907724553959409, |
|
"grad_norm": 0.0639299675822258, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 11.9392, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.005180375918068265, |
|
"grad_norm": 0.061709675937891006, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 11.9369, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.005453027282177121, |
|
"grad_norm": 0.05760058015584946, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 11.9361, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0057256786462859775, |
|
"grad_norm": 0.060040686279535294, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 11.9424, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.005998330010394833, |
|
"grad_norm": 0.08672542870044708, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 11.9367, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0062709813745036895, |
|
"grad_norm": 0.08303763717412949, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 11.9304, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.006543632738612545, |
|
"grad_norm": 0.08946041762828827, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 11.9338, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.006816284102721401, |
|
"grad_norm": 0.08901721984148026, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 11.9395, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.006816284102721401, |
|
"eval_loss": 11.936413764953613, |
|
"eval_runtime": 105.4372, |
|
"eval_samples_per_second": 234.348, |
|
"eval_steps_per_second": 29.297, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.007088935466830258, |
|
"grad_norm": 0.07601311802864075, |
|
"learning_rate": 5e-05, |
|
"loss": 11.9352, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.007361586830939113, |
|
"grad_norm": 0.05736768618226051, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 11.9408, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.00763423819504797, |
|
"grad_norm": 0.0675104558467865, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 11.9415, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.007906889559156825, |
|
"grad_norm": 0.06402797251939774, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 11.9409, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.008179540923265682, |
|
"grad_norm": 0.06155916303396225, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 11.9298, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.008452192287374538, |
|
"grad_norm": 0.08942660689353943, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 11.9303, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.008724843651483394, |
|
"grad_norm": 0.08944093436002731, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 11.9482, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.00899749501559225, |
|
"grad_norm": 0.06475299596786499, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 11.9365, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.009270146379701106, |
|
"grad_norm": 0.07812382280826569, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 11.9407, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.009542797743809962, |
|
"grad_norm": 0.08350091427564621, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 11.9416, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.009815449107918818, |
|
"grad_norm": 0.0940701887011528, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 11.928, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.010088100472027675, |
|
"grad_norm": 0.08712836354970932, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 11.9316, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.01036075183613653, |
|
"grad_norm": 0.08466362208127975, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 11.9293, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.010633403200245386, |
|
"grad_norm": 0.06831790506839752, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 11.9335, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.010906054564354242, |
|
"grad_norm": 0.06954491883516312, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 11.9384, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.011178705928463099, |
|
"grad_norm": 0.10085248202085495, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 11.9408, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.011451357292571955, |
|
"grad_norm": 0.05636737868189812, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 11.9306, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.01172400865668081, |
|
"grad_norm": 0.07049186527729034, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 11.9382, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.011996660020789666, |
|
"grad_norm": 0.0855134129524231, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 11.9295, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.012269311384898523, |
|
"grad_norm": 0.10347723960876465, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 11.9361, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.012541962749007379, |
|
"grad_norm": 0.06473658233880997, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 11.9392, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.012814614113116235, |
|
"grad_norm": 0.09248223900794983, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 11.9402, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.01308726547722509, |
|
"grad_norm": 0.08586801588535309, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 11.9338, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.013359916841333946, |
|
"grad_norm": 0.1005331352353096, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 11.9279, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.013632568205442803, |
|
"grad_norm": 0.08666856586933136, |
|
"learning_rate": 0.0, |
|
"loss": 11.9394, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.013632568205442803, |
|
"eval_loss": 11.935344696044922, |
|
"eval_runtime": 105.0647, |
|
"eval_samples_per_second": 235.179, |
|
"eval_steps_per_second": 29.401, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1042494259200.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|