|
{ |
|
"best_metric": 2.0446817874908447, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 2.6229508196721314, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05245901639344262, |
|
"grad_norm": 59.75578308105469, |
|
"learning_rate": 5e-05, |
|
"loss": 69.1845, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05245901639344262, |
|
"eval_loss": 2.3756308555603027, |
|
"eval_runtime": 4.6962, |
|
"eval_samples_per_second": 10.647, |
|
"eval_steps_per_second": 2.768, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.10491803278688525, |
|
"grad_norm": 70.11760711669922, |
|
"learning_rate": 0.0001, |
|
"loss": 72.285, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.15737704918032788, |
|
"grad_norm": 40.28403091430664, |
|
"learning_rate": 9.992920667580176e-05, |
|
"loss": 70.4757, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.2098360655737705, |
|
"grad_norm": 50.93065643310547, |
|
"learning_rate": 9.971704944519594e-05, |
|
"loss": 72.0046, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.26229508196721313, |
|
"grad_norm": 67.84244537353516, |
|
"learning_rate": 9.936419583332062e-05, |
|
"loss": 72.9162, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.31475409836065577, |
|
"grad_norm": 39.244895935058594, |
|
"learning_rate": 9.887175604818206e-05, |
|
"loss": 67.498, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.36721311475409835, |
|
"grad_norm": 24.89311408996582, |
|
"learning_rate": 9.82412794875295e-05, |
|
"loss": 67.2244, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.419672131147541, |
|
"grad_norm": 27.440900802612305, |
|
"learning_rate": 9.747474986387654e-05, |
|
"loss": 69.4707, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.4721311475409836, |
|
"grad_norm": 32.1877326965332, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 70.6535, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.5245901639344263, |
|
"grad_norm": 54.610347747802734, |
|
"learning_rate": 9.554359905560886e-05, |
|
"loss": 69.8398, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5770491803278689, |
|
"grad_norm": 19.549211502075195, |
|
"learning_rate": 9.438505398589392e-05, |
|
"loss": 66.0735, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.6295081967213115, |
|
"grad_norm": 21.11090850830078, |
|
"learning_rate": 9.310258896527278e-05, |
|
"loss": 68.2764, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.6819672131147541, |
|
"grad_norm": 19.177579879760742, |
|
"learning_rate": 9.17002391031667e-05, |
|
"loss": 68.0621, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.7344262295081967, |
|
"grad_norm": 21.684904098510742, |
|
"learning_rate": 9.018241671106134e-05, |
|
"loss": 69.1158, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.7868852459016393, |
|
"grad_norm": 25.838293075561523, |
|
"learning_rate": 8.855389741974244e-05, |
|
"loss": 64.8987, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.839344262295082, |
|
"grad_norm": 16.683151245117188, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 66.7402, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.8918032786885246, |
|
"grad_norm": 16.149255752563477, |
|
"learning_rate": 8.498559600784018e-05, |
|
"loss": 67.3468, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.9442622950819672, |
|
"grad_norm": 17.375782012939453, |
|
"learning_rate": 8.305704108364301e-05, |
|
"loss": 66.6194, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.9967213114754099, |
|
"grad_norm": 26.16300392150879, |
|
"learning_rate": 8.104020832809127e-05, |
|
"loss": 69.7571, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.0491803278688525, |
|
"grad_norm": 15.187979698181152, |
|
"learning_rate": 7.894144344319014e-05, |
|
"loss": 63.0399, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.1016393442622952, |
|
"grad_norm": 15.76618480682373, |
|
"learning_rate": 7.67673499197358e-05, |
|
"loss": 63.5516, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.1540983606557378, |
|
"grad_norm": 15.865626335144043, |
|
"learning_rate": 7.452476826029011e-05, |
|
"loss": 63.0109, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.2065573770491804, |
|
"grad_norm": 19.105836868286133, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 64.563, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.2590163934426228, |
|
"grad_norm": 27.65399932861328, |
|
"learning_rate": 6.986255778798253e-05, |
|
"loss": 63.8715, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.3114754098360657, |
|
"grad_norm": 17.719682693481445, |
|
"learning_rate": 6.745759801411822e-05, |
|
"loss": 62.3019, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.3114754098360657, |
|
"eval_loss": 2.0446817874908447, |
|
"eval_runtime": 4.7446, |
|
"eval_samples_per_second": 10.538, |
|
"eval_steps_per_second": 2.74, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.3639344262295081, |
|
"grad_norm": 15.636397361755371, |
|
"learning_rate": 6.501344202803414e-05, |
|
"loss": 61.7672, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.4163934426229507, |
|
"grad_norm": 15.895488739013672, |
|
"learning_rate": 6.253778004871315e-05, |
|
"loss": 62.7228, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.4688524590163934, |
|
"grad_norm": 19.06943702697754, |
|
"learning_rate": 6.003840142464886e-05, |
|
"loss": 64.0609, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.521311475409836, |
|
"grad_norm": 24.51659393310547, |
|
"learning_rate": 5.752317012567363e-05, |
|
"loss": 61.7292, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.5737704918032787, |
|
"grad_norm": 16.205303192138672, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 60.9363, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.6262295081967213, |
|
"grad_norm": 17.461999893188477, |
|
"learning_rate": 5.247682987432637e-05, |
|
"loss": 62.4749, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.678688524590164, |
|
"grad_norm": 17.851083755493164, |
|
"learning_rate": 4.9961598575351155e-05, |
|
"loss": 61.79, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.7311475409836066, |
|
"grad_norm": 20.22283172607422, |
|
"learning_rate": 4.7462219951286867e-05, |
|
"loss": 64.521, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.7836065573770492, |
|
"grad_norm": 28.108795166015625, |
|
"learning_rate": 4.498655797196586e-05, |
|
"loss": 61.2715, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.8360655737704918, |
|
"grad_norm": 18.37066078186035, |
|
"learning_rate": 4.2542401985881784e-05, |
|
"loss": 61.7881, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.8885245901639345, |
|
"grad_norm": 18.368694305419922, |
|
"learning_rate": 4.01374422120175e-05, |
|
"loss": 62.2249, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.940983606557377, |
|
"grad_norm": 18.627840042114258, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 62.6981, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.9934426229508198, |
|
"grad_norm": 22.907976150512695, |
|
"learning_rate": 3.547523173970989e-05, |
|
"loss": 64.4813, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.045901639344262, |
|
"grad_norm": 21.854839324951172, |
|
"learning_rate": 3.323265008026421e-05, |
|
"loss": 57.5238, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.098360655737705, |
|
"grad_norm": 17.733745574951172, |
|
"learning_rate": 3.105855655680986e-05, |
|
"loss": 59.0577, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.1508196721311474, |
|
"grad_norm": 18.677440643310547, |
|
"learning_rate": 2.8959791671908742e-05, |
|
"loss": 59.2771, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.2032786885245903, |
|
"grad_norm": 20.046802520751953, |
|
"learning_rate": 2.6942958916356998e-05, |
|
"loss": 58.3555, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.2557377049180327, |
|
"grad_norm": 26.242361068725586, |
|
"learning_rate": 2.501440399215983e-05, |
|
"loss": 59.7959, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.3081967213114756, |
|
"grad_norm": 17.727699279785156, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 58.5926, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.360655737704918, |
|
"grad_norm": 18.205520629882812, |
|
"learning_rate": 2.144610258025755e-05, |
|
"loss": 58.4511, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.413114754098361, |
|
"grad_norm": 20.044387817382812, |
|
"learning_rate": 1.981758328893866e-05, |
|
"loss": 57.0531, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.4655737704918033, |
|
"grad_norm": 22.331958770751953, |
|
"learning_rate": 1.8299760896833295e-05, |
|
"loss": 60.0991, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.5180327868852457, |
|
"grad_norm": 29.541261672973633, |
|
"learning_rate": 1.6897411034727218e-05, |
|
"loss": 59.2154, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.5704918032786885, |
|
"grad_norm": 20.129823684692383, |
|
"learning_rate": 1.5614946014106085e-05, |
|
"loss": 58.7101, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 2.6229508196721314, |
|
"grad_norm": 20.351736068725586, |
|
"learning_rate": 1.4456400944391146e-05, |
|
"loss": 59.4648, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.6229508196721314, |
|
"eval_loss": 2.0465781688690186, |
|
"eval_runtime": 4.7768, |
|
"eval_samples_per_second": 10.467, |
|
"eval_steps_per_second": 2.721, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 58, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.658021338284032e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|