|
{ |
|
"best_metric": 6.910342693328857, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.022583559168925023, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00045167118337850043, |
|
"grad_norm": 0.09511399269104004, |
|
"learning_rate": 0.0001, |
|
"loss": 6.9411, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00045167118337850043, |
|
"eval_loss": 6.941762447357178, |
|
"eval_runtime": 4.1373, |
|
"eval_samples_per_second": 901.316, |
|
"eval_steps_per_second": 112.876, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009033423667570009, |
|
"grad_norm": 0.12681150436401367, |
|
"learning_rate": 0.0002, |
|
"loss": 6.9449, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0013550135501355014, |
|
"grad_norm": 0.14625313878059387, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 6.9428, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0018066847335140017, |
|
"grad_norm": 0.16340851783752441, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 6.9417, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.002258355916892502, |
|
"grad_norm": 0.18675823509693146, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 6.9435, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0027100271002710027, |
|
"grad_norm": 0.16288436949253082, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 6.9424, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0031616982836495033, |
|
"grad_norm": 0.19281183183193207, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 6.9353, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0036133694670280035, |
|
"grad_norm": 0.18037211894989014, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 6.9329, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0040650406504065045, |
|
"grad_norm": 0.18146973848342896, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 6.9382, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.004516711833785004, |
|
"grad_norm": 0.2021256536245346, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 6.9314, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.004968383017163505, |
|
"grad_norm": 0.20851069688796997, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 6.9346, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.005420054200542005, |
|
"grad_norm": 0.20332486927509308, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 6.9377, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.005871725383920506, |
|
"grad_norm": 0.11624668538570404, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 6.9365, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.006323396567299007, |
|
"grad_norm": 0.13349321484565735, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 6.9301, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.006775067750677507, |
|
"grad_norm": 0.1457970142364502, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 6.9293, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.007226738934056007, |
|
"grad_norm": 0.1603357046842575, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 6.9314, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0076784101174345075, |
|
"grad_norm": 0.16457444429397583, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 6.9303, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.008130081300813009, |
|
"grad_norm": 0.20100098848342896, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 6.9251, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.008581752484191508, |
|
"grad_norm": 0.18849341571331024, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 6.9223, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.009033423667570008, |
|
"grad_norm": 0.18996189534664154, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 6.9237, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.009485094850948509, |
|
"grad_norm": 0.1919945478439331, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 6.9267, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.00993676603432701, |
|
"grad_norm": 0.2134779840707779, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 6.9228, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.01038843721770551, |
|
"grad_norm": 0.22595034539699554, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 6.9165, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.01084010840108401, |
|
"grad_norm": 0.22315850853919983, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 6.9146, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.011291779584462511, |
|
"grad_norm": 0.2357320338487625, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 6.9185, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.011291779584462511, |
|
"eval_loss": 6.917991638183594, |
|
"eval_runtime": 4.0902, |
|
"eval_samples_per_second": 911.681, |
|
"eval_steps_per_second": 114.174, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.011743450767841012, |
|
"grad_norm": 0.12024138867855072, |
|
"learning_rate": 0.0001, |
|
"loss": 6.9329, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.012195121951219513, |
|
"grad_norm": 0.12785819172859192, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 6.9298, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.012646793134598013, |
|
"grad_norm": 0.14583264291286469, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 6.9229, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.013098464317976514, |
|
"grad_norm": 0.17286351323127747, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 6.9189, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.013550135501355014, |
|
"grad_norm": 0.18918299674987793, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 6.9196, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.014001806684733513, |
|
"grad_norm": 0.19743739068508148, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 6.9115, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.014453477868112014, |
|
"grad_norm": 0.20110255479812622, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 6.9122, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.014905149051490514, |
|
"grad_norm": 0.19309815764427185, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 6.9141, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.015356820234869015, |
|
"grad_norm": 0.2259165495634079, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 6.9052, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.015808491418247517, |
|
"grad_norm": 0.2182929962873459, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 6.9042, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.016260162601626018, |
|
"grad_norm": 0.23973923921585083, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 6.9081, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.016711833785004515, |
|
"grad_norm": 0.21056023240089417, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 6.9138, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.017163504968383016, |
|
"grad_norm": 0.14639106392860413, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 6.9281, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.017615176151761516, |
|
"grad_norm": 0.12287917733192444, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 6.9262, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.018066847335140017, |
|
"grad_norm": 0.1542358100414276, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 6.9188, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.018518518518518517, |
|
"grad_norm": 0.18276679515838623, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 6.915, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.018970189701897018, |
|
"grad_norm": 0.19573663175106049, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 6.9107, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.01942186088527552, |
|
"grad_norm": 0.1964210867881775, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 6.9139, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.01987353206865402, |
|
"grad_norm": 0.21382571756839752, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 6.9072, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.02032520325203252, |
|
"grad_norm": 0.21687394380569458, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 6.9014, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.02077687443541102, |
|
"grad_norm": 0.2191643863916397, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 6.9125, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.02122854561878952, |
|
"grad_norm": 0.21285398304462433, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 6.9083, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.02168021680216802, |
|
"grad_norm": 0.20628215372562408, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 6.9121, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.022131887985546522, |
|
"grad_norm": 0.21727484464645386, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 6.9097, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.022583559168925023, |
|
"grad_norm": 0.2488403469324112, |
|
"learning_rate": 0.0, |
|
"loss": 6.9046, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.022583559168925023, |
|
"eval_loss": 6.910342693328857, |
|
"eval_runtime": 4.1189, |
|
"eval_samples_per_second": 905.333, |
|
"eval_steps_per_second": 113.379, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2360158470144.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|