|
{ |
|
"best_metric": 3.585186243057251, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.007033430775655537, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00014066861551311076, |
|
"grad_norm": 0.9218872785568237, |
|
"learning_rate": 5e-05, |
|
"loss": 2.2525, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00014066861551311076, |
|
"eval_loss": 5.381374359130859, |
|
"eval_runtime": 4.3276, |
|
"eval_samples_per_second": 11.554, |
|
"eval_steps_per_second": 3.004, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00028133723102622153, |
|
"grad_norm": 1.4398459196090698, |
|
"learning_rate": 0.0001, |
|
"loss": 3.0518, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00042200584653933227, |
|
"grad_norm": 1.8477466106414795, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 3.6471, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0005626744620524431, |
|
"grad_norm": 2.2851502895355225, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 4.4055, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0007033430775655537, |
|
"grad_norm": 3.3054018020629883, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 4.6705, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0008440116930786645, |
|
"grad_norm": 3.301938056945801, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 4.4529, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0009846803085917753, |
|
"grad_norm": 2.362037420272827, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 4.2649, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0011253489241048861, |
|
"grad_norm": 2.303856372833252, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 4.3427, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0012660175396179967, |
|
"grad_norm": 2.2928717136383057, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 4.2532, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0014066861551311075, |
|
"grad_norm": 2.7700459957122803, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 4.1642, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0015473547706442183, |
|
"grad_norm": 3.446438789367676, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 4.4021, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.001688023386157329, |
|
"grad_norm": 4.7801079750061035, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 4.6508, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0018286920016704399, |
|
"grad_norm": 3.1333518028259277, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 2.4654, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0019693606171835507, |
|
"grad_norm": 3.198070526123047, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 2.8728, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0021100292326966612, |
|
"grad_norm": 2.388134002685547, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 2.9741, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0022506978482097722, |
|
"grad_norm": 2.387596368789673, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 3.3491, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.002391366463722883, |
|
"grad_norm": 3.9135308265686035, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 3.8081, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0025320350792359934, |
|
"grad_norm": 2.6444458961486816, |
|
"learning_rate": 7.75e-05, |
|
"loss": 3.7437, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0026727036947491044, |
|
"grad_norm": 1.9664329290390015, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 3.9872, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.002813372310262215, |
|
"grad_norm": 1.8564341068267822, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 3.9519, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.002954040925775326, |
|
"grad_norm": 2.011428117752075, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 3.7962, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0030947095412884366, |
|
"grad_norm": 2.2582523822784424, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 4.0475, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0032353781568015476, |
|
"grad_norm": 2.4788050651550293, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 3.9397, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.003376046772314658, |
|
"grad_norm": 2.514291763305664, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 4.2982, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0035167153878277687, |
|
"grad_norm": 3.6239752769470215, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 4.8512, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0035167153878277687, |
|
"eval_loss": 3.6986312866210938, |
|
"eval_runtime": 4.3224, |
|
"eval_samples_per_second": 11.568, |
|
"eval_steps_per_second": 3.008, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0036573840033408797, |
|
"grad_norm": 0.6881301403045654, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 1.8389, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0037980526188539903, |
|
"grad_norm": 0.792911171913147, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 2.5026, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.003938721234367101, |
|
"grad_norm": 0.8642730116844177, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 2.9211, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0040793898498802115, |
|
"grad_norm": 1.2017244100570679, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 3.4445, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0042200584653933225, |
|
"grad_norm": 1.1826562881469727, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 3.6573, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0043607270809064335, |
|
"grad_norm": 1.3540639877319336, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 3.7198, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0045013956964195445, |
|
"grad_norm": 1.3807733058929443, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 3.6719, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.004642064311932655, |
|
"grad_norm": 1.3128817081451416, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 3.7725, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.004782732927445766, |
|
"grad_norm": 1.4953864812850952, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 3.7893, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.004923401542958877, |
|
"grad_norm": 1.6556501388549805, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 3.9066, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.005064070158471987, |
|
"grad_norm": 1.7631824016571045, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 4.0591, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.005204738773985098, |
|
"grad_norm": 2.5257699489593506, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 4.4642, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.005345407389498209, |
|
"grad_norm": 0.6542789936065674, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 1.8916, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.00548607600501132, |
|
"grad_norm": 0.5933400392532349, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 2.3769, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.00562674462052443, |
|
"grad_norm": 0.7565528154373169, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 2.7812, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.005767413236037541, |
|
"grad_norm": 0.8536903858184814, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 3.0485, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.005908081851550652, |
|
"grad_norm": 0.993644654750824, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 3.5119, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.006048750467063762, |
|
"grad_norm": 1.0827058553695679, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 3.5816, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.006189419082576873, |
|
"grad_norm": 1.3338035345077515, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 3.7165, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.006330087698089984, |
|
"grad_norm": 1.4324630498886108, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 3.7711, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.006470756313603095, |
|
"grad_norm": 1.5651832818984985, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 3.6286, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.006611424929116205, |
|
"grad_norm": 1.6895540952682495, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 3.5996, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.006752093544629316, |
|
"grad_norm": 2.1800448894500732, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 3.9122, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.006892762160142427, |
|
"grad_norm": 2.110852003097534, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 4.0356, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.007033430775655537, |
|
"grad_norm": 3.81133770942688, |
|
"learning_rate": 1e-05, |
|
"loss": 4.6192, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.007033430775655537, |
|
"eval_loss": 3.585186243057251, |
|
"eval_runtime": 4.3231, |
|
"eval_samples_per_second": 11.566, |
|
"eval_steps_per_second": 3.007, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.968083617316864e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|