|
{ |
|
"best_metric": 1.3974195718765259, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.0665889795238888, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001331779590477776, |
|
"grad_norm": 1.716598391532898, |
|
"learning_rate": 5e-05, |
|
"loss": 2.2224, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001331779590477776, |
|
"eval_loss": 2.8852221965789795, |
|
"eval_runtime": 133.4508, |
|
"eval_samples_per_second": 37.909, |
|
"eval_steps_per_second": 4.743, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002663559180955552, |
|
"grad_norm": 1.9544339179992676, |
|
"learning_rate": 0.0001, |
|
"loss": 2.3534, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.003995338771433328, |
|
"grad_norm": 1.9382542371749878, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 2.315, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.005327118361911104, |
|
"grad_norm": 1.8056589365005493, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 2.223, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00665889795238888, |
|
"grad_norm": 1.8618559837341309, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 2.094, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.007990677542866656, |
|
"grad_norm": 1.720023512840271, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.9048, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.009322457133344432, |
|
"grad_norm": 1.8842785358428955, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 1.7269, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.010654236723822208, |
|
"grad_norm": 1.7412643432617188, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.7371, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.011986016314299984, |
|
"grad_norm": 2.0039165019989014, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 1.6742, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01331779590477776, |
|
"grad_norm": 1.7889673709869385, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.5504, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.014649575495255536, |
|
"grad_norm": 1.569449782371521, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 1.4855, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.015981355085733312, |
|
"grad_norm": 1.9054864645004272, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.5665, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.017313134676211088, |
|
"grad_norm": 1.3979929685592651, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 1.8186, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.018644914266688864, |
|
"grad_norm": 0.9748731255531311, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.6544, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01997669385716664, |
|
"grad_norm": 0.8066844344139099, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 1.5756, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.021308473447644416, |
|
"grad_norm": 0.7804756164550781, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 1.6058, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.022640253038122192, |
|
"grad_norm": 0.7928308248519897, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 1.3039, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.023972032628599968, |
|
"grad_norm": 0.8666258454322815, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.3077, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.025303812219077744, |
|
"grad_norm": 0.960999608039856, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 1.4093, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02663559180955552, |
|
"grad_norm": 1.0699888467788696, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.357, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.027967371400033296, |
|
"grad_norm": 1.1570630073547363, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 1.4088, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.029299150990511072, |
|
"grad_norm": 1.1443278789520264, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 1.3868, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.030630930580988848, |
|
"grad_norm": 1.2050145864486694, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 1.3297, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.031962710171466624, |
|
"grad_norm": 1.2424426078796387, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.1877, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0332944897619444, |
|
"grad_norm": 1.9407079219818115, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 1.5371, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0332944897619444, |
|
"eval_loss": 1.4364030361175537, |
|
"eval_runtime": 132.66, |
|
"eval_samples_per_second": 38.135, |
|
"eval_steps_per_second": 4.772, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.034626269352422176, |
|
"grad_norm": 0.7670523524284363, |
|
"learning_rate": 5e-05, |
|
"loss": 1.6652, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.03595804894289995, |
|
"grad_norm": 0.7151476740837097, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 1.5839, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.03728982853337773, |
|
"grad_norm": 0.7840404510498047, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 1.4314, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.038621608123855504, |
|
"grad_norm": 0.7918953895568848, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 1.4365, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.03995338771433328, |
|
"grad_norm": 0.8279181718826294, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 1.3891, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.041285167304811056, |
|
"grad_norm": 0.8578282594680786, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 1.2858, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.04261694689528883, |
|
"grad_norm": 0.8482633829116821, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.1511, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.04394872648576661, |
|
"grad_norm": 0.8851420283317566, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 1.4296, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.045280506076244384, |
|
"grad_norm": 1.0721423625946045, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.4503, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.04661228566672216, |
|
"grad_norm": 1.0197111368179321, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 1.4107, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.047944065257199936, |
|
"grad_norm": 1.1104793548583984, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.236, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.04927584484767771, |
|
"grad_norm": 1.620796799659729, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 1.4854, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.05060762443815549, |
|
"grad_norm": 0.7224025726318359, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 1.7413, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.051939404028633264, |
|
"grad_norm": 0.6184026598930359, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 1.5911, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.05327118361911104, |
|
"grad_norm": 0.6563757658004761, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 1.5257, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.054602963209588816, |
|
"grad_norm": 0.7842867970466614, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 1.4908, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.05593474280006659, |
|
"grad_norm": 0.6858050227165222, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 1.3321, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.05726652239054437, |
|
"grad_norm": 0.7674694061279297, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 1.3322, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.058598301981022144, |
|
"grad_norm": 0.7808869481086731, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.2364, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.05993008157149992, |
|
"grad_norm": 0.9280687570571899, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 1.3535, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.061261861161977696, |
|
"grad_norm": 0.8980047702789307, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.2546, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.06259364075245546, |
|
"grad_norm": 0.9971718192100525, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 1.3707, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.06392542034293325, |
|
"grad_norm": 1.1494028568267822, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.1686, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.06525719993341102, |
|
"grad_norm": 1.1565730571746826, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 1.2963, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0665889795238888, |
|
"grad_norm": 1.65233314037323, |
|
"learning_rate": 0.0, |
|
"loss": 1.5058, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0665889795238888, |
|
"eval_loss": 1.3974195718765259, |
|
"eval_runtime": 132.6366, |
|
"eval_samples_per_second": 38.142, |
|
"eval_steps_per_second": 4.772, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.968083617316864e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|