|
{ |
|
"best_metric": 1.1957429647445679, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.13065490772497143, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0026130981544994283, |
|
"grad_norm": 30.899654388427734, |
|
"learning_rate": 5e-05, |
|
"loss": 1.1267, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0026130981544994283, |
|
"eval_loss": 1.8499661684036255, |
|
"eval_runtime": 167.2588, |
|
"eval_samples_per_second": 15.413, |
|
"eval_steps_per_second": 1.931, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005226196308998857, |
|
"grad_norm": 33.87411880493164, |
|
"learning_rate": 0.0001, |
|
"loss": 1.2228, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007839294463498285, |
|
"grad_norm": 18.999311447143555, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 1.1836, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.010452392617997713, |
|
"grad_norm": 21.47573471069336, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 1.1927, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.013065490772497142, |
|
"grad_norm": 21.85623550415039, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 1.2051, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01567858892699657, |
|
"grad_norm": 17.990365982055664, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.2142, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.018291687081496, |
|
"grad_norm": 14.827211380004883, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 1.1788, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.020904785235995427, |
|
"grad_norm": 22.092012405395508, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.2685, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.023517883390494855, |
|
"grad_norm": 32.1710319519043, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 1.4338, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.026130981544994283, |
|
"grad_norm": 45.76557540893555, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.566, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02874407969949371, |
|
"grad_norm": 45.74945068359375, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 1.6296, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03135717785399314, |
|
"grad_norm": 52.03664016723633, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.812, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03397027600849257, |
|
"grad_norm": 54.32783126831055, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 1.5289, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.036583374162992, |
|
"grad_norm": 25.161073684692383, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.1103, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.039196472317491425, |
|
"grad_norm": 24.294761657714844, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 1.1767, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04180957047199085, |
|
"grad_norm": 19.136260986328125, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 1.109, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04442266862649028, |
|
"grad_norm": 15.456122398376465, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 1.1021, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04703576678098971, |
|
"grad_norm": 12.054083824157715, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.1288, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04964886493548914, |
|
"grad_norm": 11.738526344299316, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 1.1676, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.05226196308998857, |
|
"grad_norm": 14.183579444885254, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.1864, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.054875061244487995, |
|
"grad_norm": 18.19736099243164, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 1.2671, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05748815939898742, |
|
"grad_norm": 25.0335693359375, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 1.3673, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.06010125755348685, |
|
"grad_norm": 36.76613235473633, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 1.5454, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.06271435570798628, |
|
"grad_norm": 45.20155715942383, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.5598, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06532745386248572, |
|
"grad_norm": 76.99732971191406, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 1.9044, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06532745386248572, |
|
"eval_loss": 1.2333778142929077, |
|
"eval_runtime": 167.251, |
|
"eval_samples_per_second": 15.414, |
|
"eval_steps_per_second": 1.931, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06794055201698514, |
|
"grad_norm": 7.437511920928955, |
|
"learning_rate": 5e-05, |
|
"loss": 0.9982, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.07055365017148457, |
|
"grad_norm": 6.7867207527160645, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 1.0384, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.073166748325984, |
|
"grad_norm": 6.633565902709961, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 1.0307, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.07577984648048343, |
|
"grad_norm": 7.300943851470947, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 1.0679, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.07839294463498285, |
|
"grad_norm": 8.290701866149902, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 1.0623, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08100604278948229, |
|
"grad_norm": 10.008981704711914, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 1.1515, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0836191409439817, |
|
"grad_norm": 12.658711433410645, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.2007, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08623223909848114, |
|
"grad_norm": 17.597726821899414, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 1.24, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08884533725298056, |
|
"grad_norm": 21.86583709716797, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.3628, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.09145843540748, |
|
"grad_norm": 26.087900161743164, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 1.3616, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.09407153356197942, |
|
"grad_norm": 33.14945983886719, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.4062, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09668463171647886, |
|
"grad_norm": 39.74778747558594, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 1.6848, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.09929772987097828, |
|
"grad_norm": 36.329551696777344, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 1.3833, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.10191082802547771, |
|
"grad_norm": 6.016417503356934, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 1.0036, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.10452392617997713, |
|
"grad_norm": 6.548324108123779, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 1.0207, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10713702433447657, |
|
"grad_norm": 6.872424602508545, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 1.0523, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.10975012248897599, |
|
"grad_norm": 7.7177734375, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 1.0816, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.11236322064347543, |
|
"grad_norm": 8.686575889587402, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 1.1517, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.11497631879797485, |
|
"grad_norm": 9.739227294921875, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.115, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.11758941695247428, |
|
"grad_norm": 12.140241622924805, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 1.1213, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.1202025151069737, |
|
"grad_norm": 16.49872398376465, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.2322, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.12281561326147314, |
|
"grad_norm": 21.253034591674805, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 1.3385, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.12542871141597256, |
|
"grad_norm": 28.797561645507812, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.363, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.12804180957047198, |
|
"grad_norm": 35.98500442504883, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 1.3961, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.13065490772497143, |
|
"grad_norm": 53.93410873413086, |
|
"learning_rate": 0.0, |
|
"loss": 1.7542, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13065490772497143, |
|
"eval_loss": 1.1957429647445679, |
|
"eval_runtime": 167.2329, |
|
"eval_samples_per_second": 15.416, |
|
"eval_steps_per_second": 1.931, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.658021338284032e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|