|
{ |
|
"best_metric": 3.5291860103607178, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 3.0256045519203414, |
|
"eval_steps": 25, |
|
"global_step": 66, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04551920341394026, |
|
"grad_norm": 1.5629485845565796, |
|
"learning_rate": 5e-05, |
|
"loss": 3.7004, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04551920341394026, |
|
"eval_loss": 4.0469136238098145, |
|
"eval_runtime": 4.6058, |
|
"eval_samples_per_second": 10.856, |
|
"eval_steps_per_second": 2.823, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.09103840682788052, |
|
"grad_norm": 1.6075812578201294, |
|
"learning_rate": 0.0001, |
|
"loss": 3.7173, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.13655761024182078, |
|
"grad_norm": 1.4795007705688477, |
|
"learning_rate": 9.994579552923277e-05, |
|
"loss": 3.7871, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.18207681365576103, |
|
"grad_norm": 0.668359100818634, |
|
"learning_rate": 9.978331270024886e-05, |
|
"loss": 3.8203, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.22759601706970128, |
|
"grad_norm": 1.8153729438781738, |
|
"learning_rate": 9.951294294841516e-05, |
|
"loss": 3.8883, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.27311522048364156, |
|
"grad_norm": 2.145763635635376, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 3.6344, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.3186344238975818, |
|
"grad_norm": 1.1244003772735596, |
|
"learning_rate": 9.865140639375449e-05, |
|
"loss": 3.4501, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.36415362731152207, |
|
"grad_norm": 0.4040810465812683, |
|
"learning_rate": 9.80623151079494e-05, |
|
"loss": 3.4181, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.4096728307254623, |
|
"grad_norm": 0.498119592666626, |
|
"learning_rate": 9.736948293323593e-05, |
|
"loss": 3.5928, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.45519203413940257, |
|
"grad_norm": 0.8037330508232117, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 3.6543, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5007112375533428, |
|
"grad_norm": 0.6568593978881836, |
|
"learning_rate": 9.567951819055496e-05, |
|
"loss": 3.4914, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.5462304409672831, |
|
"grad_norm": 0.5638713836669922, |
|
"learning_rate": 9.468645689567598e-05, |
|
"loss": 3.3873, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.5917496443812233, |
|
"grad_norm": 0.4466124475002289, |
|
"learning_rate": 9.359778745001225e-05, |
|
"loss": 3.4433, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.6372688477951636, |
|
"grad_norm": 0.42094528675079346, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 3.4876, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.6827880512091038, |
|
"grad_norm": 0.5585120916366577, |
|
"learning_rate": 9.114433891662902e-05, |
|
"loss": 3.5733, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.7283072546230441, |
|
"grad_norm": 0.6824477910995483, |
|
"learning_rate": 8.978547040132317e-05, |
|
"loss": 3.3876, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.7738264580369844, |
|
"grad_norm": 0.7325260639190674, |
|
"learning_rate": 8.834280064097317e-05, |
|
"loss": 3.4021, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.8193456614509246, |
|
"grad_norm": 0.6445585489273071, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 3.4157, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.8648648648648649, |
|
"grad_norm": 0.4666988253593445, |
|
"learning_rate": 8.522015296811584e-05, |
|
"loss": 3.4133, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.9103840682788051, |
|
"grad_norm": 0.4899771213531494, |
|
"learning_rate": 8.354769778736406e-05, |
|
"loss": 3.5271, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.9559032716927454, |
|
"grad_norm": 0.661791980266571, |
|
"learning_rate": 8.180646870215952e-05, |
|
"loss": 3.6405, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.0085348506401137, |
|
"grad_norm": 0.4138284921646118, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 4.0918, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.054054054054054, |
|
"grad_norm": 0.3383225202560425, |
|
"learning_rate": 7.813462348869497e-05, |
|
"loss": 3.4534, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.0995732574679944, |
|
"grad_norm": 0.3657989501953125, |
|
"learning_rate": 7.62128531571699e-05, |
|
"loss": 3.2994, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.1450924608819346, |
|
"grad_norm": 0.36880072951316833, |
|
"learning_rate": 7.42399792043627e-05, |
|
"loss": 3.2221, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.1450924608819346, |
|
"eval_loss": 3.59340238571167, |
|
"eval_runtime": 4.8896, |
|
"eval_samples_per_second": 10.226, |
|
"eval_steps_per_second": 2.659, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.1906116642958748, |
|
"grad_norm": 0.48038947582244873, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 3.6039, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.236130867709815, |
|
"grad_norm": 0.41462457180023193, |
|
"learning_rate": 7.01600434026499e-05, |
|
"loss": 2.6349, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.2816500711237553, |
|
"grad_norm": 0.5014923214912415, |
|
"learning_rate": 6.80628104764508e-05, |
|
"loss": 3.9871, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.3271692745376955, |
|
"grad_norm": 0.42576462030410767, |
|
"learning_rate": 6.593410809564689e-05, |
|
"loss": 3.366, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.3726884779516357, |
|
"grad_norm": 0.3873802721500397, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 3.2166, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.4182076813655762, |
|
"grad_norm": 0.47148817777633667, |
|
"learning_rate": 6.160287135049127e-05, |
|
"loss": 3.3829, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.4637268847795164, |
|
"grad_norm": 0.4588596820831299, |
|
"learning_rate": 5.941077131483025e-05, |
|
"loss": 2.8047, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.5092460881934566, |
|
"grad_norm": 0.3620421290397644, |
|
"learning_rate": 5.720804534473382e-05, |
|
"loss": 3.88, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.5547652916073968, |
|
"grad_norm": 0.29916054010391235, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 3.2053, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.600284495021337, |
|
"grad_norm": 0.38498491048812866, |
|
"learning_rate": 5.27919546552662e-05, |
|
"loss": 3.4077, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.6458036984352775, |
|
"grad_norm": 0.3801584541797638, |
|
"learning_rate": 5.058922868516978e-05, |
|
"loss": 3.4857, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.6913229018492175, |
|
"grad_norm": 0.3769657015800476, |
|
"learning_rate": 4.839712864950873e-05, |
|
"loss": 2.7502, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.736842105263158, |
|
"grad_norm": 0.4362291097640991, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 3.6884, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.7823613086770982, |
|
"grad_norm": 0.2888215184211731, |
|
"learning_rate": 4.406589190435313e-05, |
|
"loss": 3.2926, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.8278805120910384, |
|
"grad_norm": 0.34629708528518677, |
|
"learning_rate": 4.19371895235492e-05, |
|
"loss": 3.2089, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.8733997155049786, |
|
"grad_norm": 0.382393479347229, |
|
"learning_rate": 3.98399565973501e-05, |
|
"loss": 3.2629, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.9189189189189189, |
|
"grad_norm": 0.5005021691322327, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 3.756, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.9644381223328593, |
|
"grad_norm": 0.6277827620506287, |
|
"learning_rate": 3.576002079563732e-05, |
|
"loss": 3.6943, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.0170697012802274, |
|
"grad_norm": 0.3988167643547058, |
|
"learning_rate": 3.378714684283011e-05, |
|
"loss": 3.3128, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.062588904694168, |
|
"grad_norm": 0.38560742139816284, |
|
"learning_rate": 3.186537651130503e-05, |
|
"loss": 3.331, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.108108108108108, |
|
"grad_norm": 0.32665321230888367, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 3.0789, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.1536273115220483, |
|
"grad_norm": 0.4147993326187134, |
|
"learning_rate": 2.8193531297840503e-05, |
|
"loss": 3.1504, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.199146514935989, |
|
"grad_norm": 0.515488862991333, |
|
"learning_rate": 2.645230221263596e-05, |
|
"loss": 3.508, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.244665718349929, |
|
"grad_norm": 0.41275349259376526, |
|
"learning_rate": 2.4779847031884175e-05, |
|
"loss": 2.3739, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 2.2901849217638692, |
|
"grad_norm": 0.35043126344680786, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 3.9068, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.2901849217638692, |
|
"eval_loss": 3.5291860103607178, |
|
"eval_runtime": 4.8939, |
|
"eval_samples_per_second": 10.217, |
|
"eval_steps_per_second": 2.656, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.3357041251778092, |
|
"grad_norm": 0.3074302077293396, |
|
"learning_rate": 2.165719935902685e-05, |
|
"loss": 3.0223, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 2.3812233285917497, |
|
"grad_norm": 0.43086740374565125, |
|
"learning_rate": 2.0214529598676836e-05, |
|
"loss": 3.2073, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 2.42674253200569, |
|
"grad_norm": 0.5334138870239258, |
|
"learning_rate": 1.8855661083370986e-05, |
|
"loss": 3.475, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 2.47226173541963, |
|
"grad_norm": 0.35528457164764404, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 2.0396, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 2.5177809388335706, |
|
"grad_norm": 0.3946029543876648, |
|
"learning_rate": 1.6402212549987762e-05, |
|
"loss": 4.2628, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.5633001422475106, |
|
"grad_norm": 0.3490312397480011, |
|
"learning_rate": 1.531354310432403e-05, |
|
"loss": 3.2317, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 2.608819345661451, |
|
"grad_norm": 0.3952423334121704, |
|
"learning_rate": 1.4320481809445051e-05, |
|
"loss": 3.2827, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.654338549075391, |
|
"grad_norm": 0.4798361361026764, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 3.2247, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 2.6998577524893315, |
|
"grad_norm": 0.4341035783290863, |
|
"learning_rate": 1.2630517066764069e-05, |
|
"loss": 2.4414, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 2.7453769559032715, |
|
"grad_norm": 0.44139501452445984, |
|
"learning_rate": 1.1937684892050604e-05, |
|
"loss": 4.1782, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.790896159317212, |
|
"grad_norm": 0.3148343861103058, |
|
"learning_rate": 1.1348593606245522e-05, |
|
"loss": 3.0881, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 2.8364153627311524, |
|
"grad_norm": 0.403652161359787, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 3.3378, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.8819345661450924, |
|
"grad_norm": 0.44468310475349426, |
|
"learning_rate": 1.0487057051584856e-05, |
|
"loss": 3.375, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 2.927453769559033, |
|
"grad_norm": 0.5985783934593201, |
|
"learning_rate": 1.0216687299751144e-05, |
|
"loss": 3.8349, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.972972972972973, |
|
"grad_norm": 0.5112795829772949, |
|
"learning_rate": 1.0054204470767243e-05, |
|
"loss": 3.4318, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 3.0256045519203414, |
|
"grad_norm": 0.2921772003173828, |
|
"learning_rate": 1e-05, |
|
"loss": 3.1404, |
|
"step": 66 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 66, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.108787643746877e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|