|
{ |
|
"best_metric": 2.272261142730713, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.003957104981995172, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 7.914209963990345e-05, |
|
"grad_norm": 0.47334396839141846, |
|
"learning_rate": 0.0001, |
|
"loss": 2.6318, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 7.914209963990345e-05, |
|
"eval_loss": 2.629798650741577, |
|
"eval_runtime": 1442.4242, |
|
"eval_samples_per_second": 3.689, |
|
"eval_steps_per_second": 1.845, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0001582841992798069, |
|
"grad_norm": 1.077744483947754, |
|
"learning_rate": 0.0002, |
|
"loss": 2.456, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00023742629891971033, |
|
"grad_norm": 0.476300984621048, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 2.3186, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0003165683985596138, |
|
"grad_norm": 0.6186921000480652, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 2.2567, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00039571049819951725, |
|
"grad_norm": 0.5011530518531799, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 2.4795, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00047485259783942065, |
|
"grad_norm": 0.3476628065109253, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 2.3858, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0005539946974793241, |
|
"grad_norm": 0.4523206353187561, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 2.3486, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0006331367971192276, |
|
"grad_norm": 0.2827935516834259, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 2.3498, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.000712278896759131, |
|
"grad_norm": 0.33790653944015503, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 2.2515, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0007914209963990345, |
|
"grad_norm": 0.19474512338638306, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 2.4869, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.000870563096038938, |
|
"grad_norm": 0.3997777998447418, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 2.2487, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0009497051956788413, |
|
"grad_norm": 0.21606910228729248, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 2.2909, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0010288472953187448, |
|
"grad_norm": 0.22740228474140167, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 2.3088, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0011079893949586482, |
|
"grad_norm": 0.23181860148906708, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 2.3148, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0011871314945985517, |
|
"grad_norm": 0.26544469594955444, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 2.1442, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0012662735942384552, |
|
"grad_norm": 0.22167958319187164, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 2.238, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0013454156938783586, |
|
"grad_norm": 0.33192533254623413, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 2.4143, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.001424557793518262, |
|
"grad_norm": 0.2848586440086365, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 2.0197, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0015036998931581655, |
|
"grad_norm": 0.40165361762046814, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 2.2847, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.001582841992798069, |
|
"grad_norm": 0.23846222460269928, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 2.1219, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0016619840924379725, |
|
"grad_norm": 0.22336062788963318, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 2.1434, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.001741126192077876, |
|
"grad_norm": 0.3259464502334595, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 2.2742, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0018202682917177794, |
|
"grad_norm": 0.25928550958633423, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 2.2165, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0018994103913576826, |
|
"grad_norm": 0.31144559383392334, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 2.3496, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.001978552490997586, |
|
"grad_norm": 0.33175715804100037, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 2.3415, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.001978552490997586, |
|
"eval_loss": 2.2873783111572266, |
|
"eval_runtime": 1449.5978, |
|
"eval_samples_per_second": 3.671, |
|
"eval_steps_per_second": 1.836, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0020576945906374895, |
|
"grad_norm": 0.42730939388275146, |
|
"learning_rate": 0.0001, |
|
"loss": 2.0884, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.002136836690277393, |
|
"grad_norm": 0.9263526797294617, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 2.3938, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0022159787899172965, |
|
"grad_norm": 0.3488403856754303, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 2.3595, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0022951208895572, |
|
"grad_norm": 0.37862449884414673, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 2.1439, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0023742629891971034, |
|
"grad_norm": 0.3307685852050781, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 2.2033, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.002453405088837007, |
|
"grad_norm": 0.3769107162952423, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 2.4333, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0025325471884769103, |
|
"grad_norm": 0.38113144040107727, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 2.1927, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0026116892881168138, |
|
"grad_norm": 0.33478325605392456, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 2.3015, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0026908313877567172, |
|
"grad_norm": 0.29439330101013184, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 2.2517, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0027699734873966207, |
|
"grad_norm": 0.3198701739311218, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 2.3491, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.002849115587036524, |
|
"grad_norm": 0.4098028838634491, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 2.4146, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0029282576866764276, |
|
"grad_norm": 0.39363518357276917, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 2.4595, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.003007399786316331, |
|
"grad_norm": 0.37463560700416565, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 2.4214, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0030865418859562345, |
|
"grad_norm": 0.35873377323150635, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 2.2987, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.003165683985596138, |
|
"grad_norm": 0.35473981499671936, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 2.3872, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0032448260852360414, |
|
"grad_norm": 0.38532593846321106, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 2.2592, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.003323968184875945, |
|
"grad_norm": 0.4655337333679199, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 2.4574, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0034031102845158484, |
|
"grad_norm": 0.4277578294277191, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 2.3818, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.003482252384155752, |
|
"grad_norm": 0.5126660466194153, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 2.4937, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0035613944837956553, |
|
"grad_norm": 0.47781333327293396, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 2.5162, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0036405365834355588, |
|
"grad_norm": 0.525256335735321, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 2.275, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.0037196786830754618, |
|
"grad_norm": 0.6399112939834595, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 2.5581, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0037988207827153652, |
|
"grad_norm": 0.5539636015892029, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 2.5673, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0038779628823552687, |
|
"grad_norm": 0.6108574271202087, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 2.4309, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.003957104981995172, |
|
"grad_norm": 0.9037721753120422, |
|
"learning_rate": 0.0, |
|
"loss": 2.5523, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.003957104981995172, |
|
"eval_loss": 2.272261142730713, |
|
"eval_runtime": 1449.1424, |
|
"eval_samples_per_second": 3.672, |
|
"eval_steps_per_second": 1.836, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.59230637948928e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|