|
{ |
|
"best_metric": 0.7740952372550964, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.3104384943733023, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.006208769887466045, |
|
"grad_norm": 19.560606002807617, |
|
"learning_rate": 5e-05, |
|
"loss": 10.9171, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006208769887466045, |
|
"eval_loss": 1.11298406124115, |
|
"eval_runtime": 28.9605, |
|
"eval_samples_per_second": 37.465, |
|
"eval_steps_per_second": 4.696, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01241753977493209, |
|
"grad_norm": 23.52311897277832, |
|
"learning_rate": 0.0001, |
|
"loss": 13.1041, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.018626309662398137, |
|
"grad_norm": 12.965242385864258, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 13.7213, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02483507954986418, |
|
"grad_norm": 18.21459197998047, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 14.4377, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03104384943733023, |
|
"grad_norm": 15.356135368347168, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 14.4143, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.037252619324796274, |
|
"grad_norm": 11.831315994262695, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 13.9598, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.04346138921226232, |
|
"grad_norm": 11.837441444396973, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 14.6875, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04967015909972836, |
|
"grad_norm": 13.21572494506836, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 14.6386, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.05587892898719441, |
|
"grad_norm": 14.249505043029785, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 14.6072, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.06208769887466046, |
|
"grad_norm": 13.186634063720703, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 14.1525, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0682964687621265, |
|
"grad_norm": 13.565954208374023, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 14.272, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.07450523864959255, |
|
"grad_norm": 14.474984169006348, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 15.3644, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0807140085370586, |
|
"grad_norm": 17.69236946105957, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 13.4338, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.08692277842452464, |
|
"grad_norm": 7.923816680908203, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 11.0057, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.09313154831199069, |
|
"grad_norm": 7.850938320159912, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 11.5823, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.09934031819945673, |
|
"grad_norm": 7.282667636871338, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 12.5114, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.10554908808692277, |
|
"grad_norm": 7.891377925872803, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 12.7295, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.11175785797438882, |
|
"grad_norm": 7.253997325897217, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 12.8208, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.11796662786185487, |
|
"grad_norm": 7.745668411254883, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 13.2546, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.12417539774932092, |
|
"grad_norm": 8.683799743652344, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 13.5887, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.13038416763678695, |
|
"grad_norm": 10.244661331176758, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 13.8284, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.136592937524253, |
|
"grad_norm": 10.36053466796875, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 13.2502, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.14280170741171905, |
|
"grad_norm": 11.601000785827637, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 13.6668, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.1490104772991851, |
|
"grad_norm": 11.934355735778809, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 14.1468, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.15521924718665114, |
|
"grad_norm": 16.675373077392578, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 14.9058, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.15521924718665114, |
|
"eval_loss": 0.7947022318840027, |
|
"eval_runtime": 29.5165, |
|
"eval_samples_per_second": 36.759, |
|
"eval_steps_per_second": 4.608, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.1614280170741172, |
|
"grad_norm": 5.082066535949707, |
|
"learning_rate": 5e-05, |
|
"loss": 9.8421, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.16763678696158324, |
|
"grad_norm": 5.646501064300537, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 10.8886, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1738455568490493, |
|
"grad_norm": 5.460198402404785, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 11.0623, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.18005432673651534, |
|
"grad_norm": 6.139311790466309, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 12.0344, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.18626309662398138, |
|
"grad_norm": 6.533102035522461, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 12.672, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.19247186651144743, |
|
"grad_norm": 7.447323322296143, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 13.082, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.19868063639891345, |
|
"grad_norm": 8.437141418457031, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 13.3574, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2048894062863795, |
|
"grad_norm": 9.51140308380127, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 13.1222, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.21109817617384555, |
|
"grad_norm": 10.1847562789917, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 13.8075, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.2173069460613116, |
|
"grad_norm": 9.696736335754395, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 12.9002, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.22351571594877764, |
|
"grad_norm": 10.21894645690918, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 13.2216, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.2297244858362437, |
|
"grad_norm": 11.558626174926758, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 13.9245, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.23593325572370974, |
|
"grad_norm": 11.94530963897705, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 12.3904, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.2421420256111758, |
|
"grad_norm": 4.622230052947998, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 10.1714, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.24835079549864184, |
|
"grad_norm": 5.022961139678955, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 11.678, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.25455956538610786, |
|
"grad_norm": 5.296011924743652, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 11.71, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.2607683352735739, |
|
"grad_norm": 5.8643412590026855, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 12.5232, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.26697710516103995, |
|
"grad_norm": 6.14801025390625, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 13.006, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.273185875048506, |
|
"grad_norm": 6.819303035736084, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 12.6432, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.27939464493597205, |
|
"grad_norm": 7.7182722091674805, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 12.9021, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.2856034148234381, |
|
"grad_norm": 8.286754608154297, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 13.2977, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.29181218471090414, |
|
"grad_norm": 8.834489822387695, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 12.9507, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.2980209545983702, |
|
"grad_norm": 10.305696487426758, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 12.9905, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.30422972448583624, |
|
"grad_norm": 11.25792121887207, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 14.2701, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.3104384943733023, |
|
"grad_norm": 16.177717208862305, |
|
"learning_rate": 0.0, |
|
"loss": 15.1178, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3104384943733023, |
|
"eval_loss": 0.7740952372550964, |
|
"eval_runtime": 29.1943, |
|
"eval_samples_per_second": 37.165, |
|
"eval_steps_per_second": 4.658, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.6880795595309056e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|