|
{ |
|
"best_metric": 1.1002987623214722, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.0009760144450137862, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.9520288900275725e-05, |
|
"grad_norm": 10.334268569946289, |
|
"learning_rate": 0.0001, |
|
"loss": 4.7093, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.9520288900275725e-05, |
|
"eval_loss": 1.3136714696884155, |
|
"eval_runtime": 6002.6747, |
|
"eval_samples_per_second": 3.593, |
|
"eval_steps_per_second": 1.797, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 3.904057780055145e-05, |
|
"grad_norm": 12.176424980163574, |
|
"learning_rate": 0.0002, |
|
"loss": 5.0525, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 5.856086670082717e-05, |
|
"grad_norm": 8.576818466186523, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 4.7167, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 7.80811556011029e-05, |
|
"grad_norm": 18.30161476135254, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 5.2112, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 9.760144450137862e-05, |
|
"grad_norm": 8.57841682434082, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 4.479, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00011712173340165434, |
|
"grad_norm": 5.883150100708008, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 4.0482, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00013664202230193008, |
|
"grad_norm": 6.208558082580566, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 4.6876, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0001561623112022058, |
|
"grad_norm": 7.253965377807617, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 4.4962, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.00017568260010248152, |
|
"grad_norm": 7.173295497894287, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 4.579, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00019520288900275724, |
|
"grad_norm": 6.47155237197876, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 4.7364, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00021472317790303296, |
|
"grad_norm": 6.548929691314697, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 4.2588, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.00023424346680330868, |
|
"grad_norm": 6.918939113616943, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 4.2177, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0002537637557035844, |
|
"grad_norm": 5.7343363761901855, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 4.3725, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00027328404460386015, |
|
"grad_norm": 5.065625190734863, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 4.0603, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.00029280433350413584, |
|
"grad_norm": 6.888638496398926, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 4.4999, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0003123246224044116, |
|
"grad_norm": 6.872077465057373, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 4.2474, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0003318449113046873, |
|
"grad_norm": 5.9835309982299805, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 4.4801, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.00035136520020496304, |
|
"grad_norm": 6.098293304443359, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 4.1836, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0003708854891052388, |
|
"grad_norm": 5.965084075927734, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 4.7496, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0003904057780055145, |
|
"grad_norm": 6.869338512420654, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 4.4003, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0004099260669057902, |
|
"grad_norm": 5.926541805267334, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 4.0398, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0004294463558060659, |
|
"grad_norm": 6.244675636291504, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 4.2699, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.00044896664470634167, |
|
"grad_norm": 6.214200496673584, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 4.465, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.00046848693360661736, |
|
"grad_norm": 5.795085430145264, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 4.281, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0004880072225068931, |
|
"grad_norm": 6.204129219055176, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 4.0462, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0004880072225068931, |
|
"eval_loss": 1.1002987623214722, |
|
"eval_runtime": 6028.5222, |
|
"eval_samples_per_second": 3.578, |
|
"eval_steps_per_second": 1.789, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0005075275114071688, |
|
"grad_norm": 5.981378078460693, |
|
"learning_rate": 0.0001, |
|
"loss": 4.6663, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0005270478003074446, |
|
"grad_norm": 6.813650131225586, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 4.4858, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0005465680892077203, |
|
"grad_norm": 5.694484710693359, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 3.8496, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.000566088378107996, |
|
"grad_norm": 5.858161449432373, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 4.4739, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0005856086670082717, |
|
"grad_norm": 6.250691890716553, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 4.0139, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0006051289559085474, |
|
"grad_norm": 5.8988752365112305, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 4.0186, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0006246492448088232, |
|
"grad_norm": 6.843201637268066, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 4.3718, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0006441695337090989, |
|
"grad_norm": 7.384850025177002, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 4.5045, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0006636898226093746, |
|
"grad_norm": 7.434286594390869, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 4.2881, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0006832101115096503, |
|
"grad_norm": 7.059499740600586, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 4.4589, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0007027304004099261, |
|
"grad_norm": 7.253593444824219, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 4.148, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0007222506893102018, |
|
"grad_norm": 7.907142639160156, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 4.2299, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0007417709782104776, |
|
"grad_norm": 7.825556755065918, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 4.2046, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0007612912671107532, |
|
"grad_norm": 7.069599628448486, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 3.8814, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.000780811556011029, |
|
"grad_norm": 7.556497097015381, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 4.1689, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0008003318449113047, |
|
"grad_norm": 8.259849548339844, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 3.7389, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0008198521338115805, |
|
"grad_norm": 8.705476760864258, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 4.6672, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0008393724227118561, |
|
"grad_norm": 7.974400043487549, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 3.6511, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0008588927116121318, |
|
"grad_norm": 7.9710540771484375, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 3.8957, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0008784130005124076, |
|
"grad_norm": 9.321637153625488, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 3.827, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0008979332894126833, |
|
"grad_norm": 9.066277503967285, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 4.1383, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.0009174535783129591, |
|
"grad_norm": 10.388931274414062, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 4.7694, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0009369738672132347, |
|
"grad_norm": 11.57152271270752, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 4.068, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0009564941561135105, |
|
"grad_norm": 10.462427139282227, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 4.6273, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0009760144450137862, |
|
"grad_norm": 12.40966510772705, |
|
"learning_rate": 0.0, |
|
"loss": 5.1242, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0009760144450137862, |
|
"eval_loss": 1.1065218448638916, |
|
"eval_runtime": 6027.0265, |
|
"eval_samples_per_second": 3.579, |
|
"eval_steps_per_second": 1.789, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.066459555076506e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|