|
{ |
|
"best_metric": 10.368429183959961, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.0005295319996187369, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.059063999237474e-05, |
|
"grad_norm": 0.15046997368335724, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3924, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 1.059063999237474e-05, |
|
"eval_loss": 10.403708457946777, |
|
"eval_runtime": 256.1878, |
|
"eval_samples_per_second": 155.191, |
|
"eval_steps_per_second": 77.595, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 2.118127998474948e-05, |
|
"grad_norm": 0.1689300686120987, |
|
"learning_rate": 0.0002, |
|
"loss": 10.4028, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 3.1771919977124216e-05, |
|
"grad_norm": 0.13942524790763855, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 10.4085, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 4.236255996949896e-05, |
|
"grad_norm": 0.19832777976989746, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 10.3694, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 5.29531999618737e-05, |
|
"grad_norm": 0.154804065823555, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 10.3817, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 6.354383995424843e-05, |
|
"grad_norm": 0.1635114997625351, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 10.3978, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 7.413447994662318e-05, |
|
"grad_norm": 0.1821622997522354, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 10.3664, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 8.472511993899791e-05, |
|
"grad_norm": 0.17012329399585724, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 10.4036, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 9.531575993137265e-05, |
|
"grad_norm": 0.20254936814308167, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 10.3974, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0001059063999237474, |
|
"grad_norm": 0.17694656550884247, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 10.3638, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00011649703991612213, |
|
"grad_norm": 0.16968543827533722, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 10.3723, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.00012708767990849687, |
|
"grad_norm": 0.1666555404663086, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 10.3831, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0001376783199008716, |
|
"grad_norm": 0.191905677318573, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 10.3708, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00014826895989324636, |
|
"grad_norm": 0.19630686938762665, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 10.37, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0001588595998856211, |
|
"grad_norm": 0.2109733670949936, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 10.3807, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.00016945023987799583, |
|
"grad_norm": 0.21452225744724274, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 10.3773, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00018004087987037056, |
|
"grad_norm": 0.22486615180969238, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 10.3543, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0001906315198627453, |
|
"grad_norm": 0.21467246115207672, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 10.3444, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.00020122215985512003, |
|
"grad_norm": 0.18565407395362854, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 10.418, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0002118127998474948, |
|
"grad_norm": 0.21927490830421448, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 10.406, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.00022240343983986953, |
|
"grad_norm": 0.23267850279808044, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 10.3427, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.00023299407983224426, |
|
"grad_norm": 0.23452691733837128, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 10.371, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.000243584719824619, |
|
"grad_norm": 0.22493915259838104, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 10.368, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.00025417535981699373, |
|
"grad_norm": 0.2101021260023117, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 10.359, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.00026476599980936846, |
|
"grad_norm": 0.2529934048652649, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 10.3683, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.00026476599980936846, |
|
"eval_loss": 10.378252029418945, |
|
"eval_runtime": 256.2454, |
|
"eval_samples_per_second": 155.156, |
|
"eval_steps_per_second": 77.578, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0002753566398017432, |
|
"grad_norm": 0.24394074082374573, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3524, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.00028594727979411793, |
|
"grad_norm": 0.21432097256183624, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 10.3553, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0002965379197864927, |
|
"grad_norm": 0.22124646604061127, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 10.3506, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.00030712855977886746, |
|
"grad_norm": 0.21801720559597015, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 10.3924, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0003177191997712422, |
|
"grad_norm": 0.22025315463542938, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 10.3803, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0003283098397636169, |
|
"grad_norm": 0.2573018968105316, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 10.3521, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.00033890047975599166, |
|
"grad_norm": 0.23922796547412872, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 10.3644, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0003494911197483664, |
|
"grad_norm": 0.23983879387378693, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 10.3523, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.00036008175974074113, |
|
"grad_norm": 0.2627752125263214, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 10.3997, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.00037067239973311586, |
|
"grad_norm": 0.26665353775024414, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 10.3918, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0003812630397254906, |
|
"grad_norm": 0.2439327985048294, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 10.3513, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.00039185367971786533, |
|
"grad_norm": 0.2464238703250885, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 10.366, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.00040244431971024006, |
|
"grad_norm": 0.2734537422657013, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 10.3739, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.00041303495970261485, |
|
"grad_norm": 0.27960556745529175, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 10.398, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0004236255996949896, |
|
"grad_norm": 0.25336456298828125, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 10.3466, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0004342162396873643, |
|
"grad_norm": 0.2658770978450775, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 10.3344, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.00044480687967973906, |
|
"grad_norm": 0.28324833512306213, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 10.3558, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0004553975196721138, |
|
"grad_norm": 0.2617306411266327, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 10.3627, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0004659881596644885, |
|
"grad_norm": 0.28455761075019836, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 10.3325, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.00047657879965686326, |
|
"grad_norm": 0.2544795572757721, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 10.3822, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.000487169439649238, |
|
"grad_norm": 0.2631848454475403, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 10.375, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.0004977600796416127, |
|
"grad_norm": 0.2916587293148041, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 10.3545, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0005083507196339875, |
|
"grad_norm": 0.3416881263256073, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 10.3671, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0005189413596263622, |
|
"grad_norm": 0.2896738052368164, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 10.3615, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0005295319996187369, |
|
"grad_norm": 0.3720775246620178, |
|
"learning_rate": 0.0, |
|
"loss": 10.3921, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0005295319996187369, |
|
"eval_loss": 10.368429183959961, |
|
"eval_runtime": 256.1367, |
|
"eval_samples_per_second": 155.222, |
|
"eval_steps_per_second": 77.611, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5230244659200.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|