|
{ |
|
"best_metric": 10.373420715332031, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.12610340479192939, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0025220680958385876, |
|
"grad_norm": 0.027054335922002792, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3787, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0025220680958385876, |
|
"eval_loss": 10.378507614135742, |
|
"eval_runtime": 2.8338, |
|
"eval_samples_per_second": 942.898, |
|
"eval_steps_per_second": 117.862, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.005044136191677175, |
|
"grad_norm": 0.023505287244915962, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3786, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007566204287515763, |
|
"grad_norm": 0.026017118245363235, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 10.3786, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01008827238335435, |
|
"grad_norm": 0.0248271394520998, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 10.3781, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.012610340479192938, |
|
"grad_norm": 0.0254810843616724, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 10.3779, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.015132408575031526, |
|
"grad_norm": 0.02538875676691532, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 10.3781, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.017654476670870115, |
|
"grad_norm": 0.025415387004613876, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 10.3776, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0201765447667087, |
|
"grad_norm": 0.029065122827887535, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 10.3779, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02269861286254729, |
|
"grad_norm": 0.02831706777215004, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 10.3786, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.025220680958385876, |
|
"grad_norm": 0.0291791670024395, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 10.377, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.027742749054224466, |
|
"grad_norm": 0.030690718442201614, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 10.3765, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03026481715006305, |
|
"grad_norm": 0.032121799886226654, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 10.377, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.03278688524590164, |
|
"grad_norm": 0.028205260634422302, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 10.3775, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03530895334174023, |
|
"grad_norm": 0.030245285481214523, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 10.3778, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03783102143757881, |
|
"grad_norm": 0.030657567083835602, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 10.3769, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0403530895334174, |
|
"grad_norm": 0.033357568085193634, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 10.3752, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04287515762925599, |
|
"grad_norm": 0.03190469369292259, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 10.3769, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04539722572509458, |
|
"grad_norm": 0.03442990407347679, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 10.3761, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04791929382093316, |
|
"grad_norm": 0.03860880061984062, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 10.3755, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.05044136191677175, |
|
"grad_norm": 0.03901612013578415, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 10.3758, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05296343001261034, |
|
"grad_norm": 0.039788056164979935, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 10.3746, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05548549810844893, |
|
"grad_norm": 0.04064761474728584, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 10.3759, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.058007566204287514, |
|
"grad_norm": 0.04556358978152275, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 10.376, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0605296343001261, |
|
"grad_norm": 0.050544317811727524, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 10.3747, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06305170239596469, |
|
"grad_norm": 0.05241914466023445, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 10.3745, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06305170239596469, |
|
"eval_loss": 10.375017166137695, |
|
"eval_runtime": 2.8388, |
|
"eval_samples_per_second": 941.231, |
|
"eval_steps_per_second": 117.654, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06557377049180328, |
|
"grad_norm": 0.04510338976979256, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3746, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06809583858764187, |
|
"grad_norm": 0.045711468905210495, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 10.3747, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.07061790668348046, |
|
"grad_norm": 0.04386696591973305, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 10.3759, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.07313997477931904, |
|
"grad_norm": 0.045891132205724716, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 10.3753, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.07566204287515763, |
|
"grad_norm": 0.04793156683444977, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 10.374, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07818411097099622, |
|
"grad_norm": 0.05221310257911682, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 10.3743, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0807061790668348, |
|
"grad_norm": 0.051459524780511856, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 10.3736, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0832282471626734, |
|
"grad_norm": 0.05057171732187271, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 10.3746, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08575031525851198, |
|
"grad_norm": 0.05461611598730087, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 10.3743, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.08827238335435057, |
|
"grad_norm": 0.05995216593146324, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 10.3727, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.09079445145018916, |
|
"grad_norm": 0.061333104968070984, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 10.3725, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09331651954602774, |
|
"grad_norm": 0.07217469811439514, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 10.3731, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.09583858764186633, |
|
"grad_norm": 0.05127323046326637, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 10.3735, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.09836065573770492, |
|
"grad_norm": 0.04944981634616852, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 10.3738, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.1008827238335435, |
|
"grad_norm": 0.05343186855316162, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 10.3733, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1034047919293821, |
|
"grad_norm": 0.05616803839802742, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 10.3744, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.10592686002522068, |
|
"grad_norm": 0.0577423982322216, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 10.3737, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.10844892812105927, |
|
"grad_norm": 0.06030035391449928, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 10.3739, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.11097099621689786, |
|
"grad_norm": 0.05783156305551529, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 10.3731, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.11349306431273644, |
|
"grad_norm": 0.06098296493291855, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 10.3737, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11601513240857503, |
|
"grad_norm": 0.06156492978334427, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 10.372, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.11853720050441362, |
|
"grad_norm": 0.06359221786260605, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 10.3731, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.1210592686002522, |
|
"grad_norm": 0.06486078351736069, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 10.3731, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.1235813366960908, |
|
"grad_norm": 0.0683504045009613, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 10.3725, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.12610340479192939, |
|
"grad_norm": 0.07451239973306656, |
|
"learning_rate": 0.0, |
|
"loss": 10.3729, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12610340479192939, |
|
"eval_loss": 10.373420715332031, |
|
"eval_runtime": 2.8358, |
|
"eval_samples_per_second": 942.245, |
|
"eval_steps_per_second": 117.781, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 42888344764416.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|