{ "best_metric": 3.582388401031494, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.007033430775655537, "eval_steps": 25, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00014066861551311076, "grad_norm": 0.9427841305732727, "learning_rate": 5e-05, "loss": 2.2525, "step": 1 }, { "epoch": 0.00014066861551311076, "eval_loss": 5.381374359130859, "eval_runtime": 4.305, "eval_samples_per_second": 11.614, "eval_steps_per_second": 3.02, "step": 1 }, { "epoch": 0.00028133723102622153, "grad_norm": 1.4493043422698975, "learning_rate": 0.0001, "loss": 3.0518, "step": 2 }, { "epoch": 0.00042200584653933227, "grad_norm": 1.8600473403930664, "learning_rate": 9.990365154573717e-05, "loss": 3.6448, "step": 3 }, { "epoch": 0.0005626744620524431, "grad_norm": 2.302962303161621, "learning_rate": 9.961501876182148e-05, "loss": 4.4023, "step": 4 }, { "epoch": 0.0007033430775655537, "grad_norm": 3.376417875289917, "learning_rate": 9.913533761814537e-05, "loss": 4.6614, "step": 5 }, { "epoch": 0.0008440116930786645, "grad_norm": 3.2473976612091064, "learning_rate": 9.846666218300807e-05, "loss": 4.4394, "step": 6 }, { "epoch": 0.0009846803085917753, "grad_norm": 2.340790033340454, "learning_rate": 9.761185582727977e-05, "loss": 4.2634, "step": 7 }, { "epoch": 0.0011253489241048861, "grad_norm": 2.2660000324249268, "learning_rate": 9.657457896300791e-05, "loss": 4.3473, "step": 8 }, { "epoch": 0.0012660175396179967, "grad_norm": 2.196218729019165, "learning_rate": 9.535927336897098e-05, "loss": 4.26, "step": 9 }, { "epoch": 0.0014066861551311075, "grad_norm": 2.6228277683258057, "learning_rate": 9.397114317029975e-05, "loss": 4.171, "step": 10 }, { "epoch": 0.0015473547706442183, "grad_norm": 3.1706738471984863, "learning_rate": 9.241613255361455e-05, "loss": 4.4074, "step": 11 }, { "epoch": 0.001688023386157329, "grad_norm": 4.587759494781494, "learning_rate": 9.070090031310558e-05, "loss": 4.6637, "step": 12 }, { "epoch": 0.0018286920016704399, "grad_norm": 2.9576611518859863, "learning_rate": 8.883279133655399e-05, "loss": 2.4479, "step": 13 }, { "epoch": 0.0019693606171835507, "grad_norm": 3.024545907974243, "learning_rate": 8.681980515339464e-05, "loss": 2.8594, "step": 14 }, { "epoch": 0.0021100292326966612, "grad_norm": 2.2302956581115723, "learning_rate": 8.467056167950311e-05, "loss": 2.9605, "step": 15 }, { "epoch": 0.0022506978482097722, "grad_norm": 1.8585838079452515, "learning_rate": 8.239426430539243e-05, "loss": 3.3364, "step": 16 }, { "epoch": 0.002391366463722883, "grad_norm": 2.91164493560791, "learning_rate": 8.000066048588211e-05, "loss": 3.79, "step": 17 }, { "epoch": 0.0025320350792359934, "grad_norm": 2.227503538131714, "learning_rate": 7.75e-05, "loss": 3.743, "step": 18 }, { "epoch": 0.0026727036947491044, "grad_norm": 1.9422775506973267, "learning_rate": 7.490299105985507e-05, "loss": 3.9933, "step": 19 }, { "epoch": 0.002813372310262215, "grad_norm": 1.9657392501831055, "learning_rate": 7.222075445642904e-05, "loss": 3.963, "step": 20 }, { "epoch": 0.002954040925775326, "grad_norm": 2.104684591293335, "learning_rate": 6.946477593864228e-05, "loss": 3.8063, "step": 21 }, { "epoch": 0.0030947095412884366, "grad_norm": 2.2109615802764893, "learning_rate": 6.664685702961344e-05, "loss": 4.0589, "step": 22 }, { "epoch": 0.0032353781568015476, "grad_norm": 2.330768585205078, "learning_rate": 6.377906449072578e-05, "loss": 3.9557, "step": 23 }, { "epoch": 0.003376046772314658, "grad_norm": 2.38495135307312, "learning_rate": 6.087367864990233e-05, "loss": 4.3077, "step": 24 }, { "epoch": 0.0035167153878277687, "grad_norm": 3.6864495277404785, "learning_rate": 5.794314081535644e-05, "loss": 4.858, "step": 25 }, { "epoch": 0.0035167153878277687, "eval_loss": 3.688027858734131, "eval_runtime": 4.3449, "eval_samples_per_second": 11.508, "eval_steps_per_second": 2.992, "step": 25 }, { "epoch": 0.0036573840033408797, "grad_norm": 0.7148618698120117, "learning_rate": 5.500000000000001e-05, "loss": 1.8397, "step": 26 }, { "epoch": 0.0037980526188539903, "grad_norm": 0.7965158224105835, "learning_rate": 5.205685918464356e-05, "loss": 2.5025, "step": 27 }, { "epoch": 0.003938721234367101, "grad_norm": 0.8529465198516846, "learning_rate": 4.912632135009769e-05, "loss": 2.9173, "step": 28 }, { "epoch": 0.0040793898498802115, "grad_norm": 1.0659992694854736, "learning_rate": 4.6220935509274235e-05, "loss": 3.4396, "step": 29 }, { "epoch": 0.0042200584653933225, "grad_norm": 1.1692177057266235, "learning_rate": 4.3353142970386564e-05, "loss": 3.6577, "step": 30 }, { "epoch": 0.0043607270809064335, "grad_norm": 1.3070368766784668, "learning_rate": 4.053522406135775e-05, "loss": 3.7179, "step": 31 }, { "epoch": 0.0045013956964195445, "grad_norm": 1.3615529537200928, "learning_rate": 3.777924554357096e-05, "loss": 3.6686, "step": 32 }, { "epoch": 0.004642064311932655, "grad_norm": 1.3193974494934082, "learning_rate": 3.509700894014496e-05, "loss": 3.7734, "step": 33 }, { "epoch": 0.004782732927445766, "grad_norm": 1.5574662685394287, "learning_rate": 3.250000000000001e-05, "loss": 3.7865, "step": 34 }, { "epoch": 0.004923401542958877, "grad_norm": 1.6892421245574951, "learning_rate": 2.9999339514117912e-05, "loss": 3.9089, "step": 35 }, { "epoch": 0.005064070158471987, "grad_norm": 1.7540158033370972, "learning_rate": 2.760573569460757e-05, "loss": 4.0583, "step": 36 }, { "epoch": 0.005204738773985098, "grad_norm": 2.6319198608398438, "learning_rate": 2.53294383204969e-05, "loss": 4.4668, "step": 37 }, { "epoch": 0.005345407389498209, "grad_norm": 0.6882954835891724, "learning_rate": 2.3180194846605367e-05, "loss": 1.892, "step": 38 }, { "epoch": 0.00548607600501132, "grad_norm": 0.6292988657951355, "learning_rate": 2.1167208663446025e-05, "loss": 2.3763, "step": 39 }, { "epoch": 0.00562674462052443, "grad_norm": 0.8061068654060364, "learning_rate": 1.9299099686894423e-05, "loss": 2.7793, "step": 40 }, { "epoch": 0.005767413236037541, "grad_norm": 0.9110883474349976, "learning_rate": 1.758386744638546e-05, "loss": 3.047, "step": 41 }, { "epoch": 0.005908081851550652, "grad_norm": 1.0115941762924194, "learning_rate": 1.602885682970026e-05, "loss": 3.51, "step": 42 }, { "epoch": 0.006048750467063762, "grad_norm": 1.0895260572433472, "learning_rate": 1.464072663102903e-05, "loss": 3.5791, "step": 43 }, { "epoch": 0.006189419082576873, "grad_norm": 1.222615122795105, "learning_rate": 1.3425421036992098e-05, "loss": 3.7129, "step": 44 }, { "epoch": 0.006330087698089984, "grad_norm": 1.33371901512146, "learning_rate": 1.2388144172720251e-05, "loss": 3.7673, "step": 45 }, { "epoch": 0.006470756313603095, "grad_norm": 1.4590684175491333, "learning_rate": 1.1533337816991932e-05, "loss": 3.6255, "step": 46 }, { "epoch": 0.006611424929116205, "grad_norm": 1.6010714769363403, "learning_rate": 1.0864662381854632e-05, "loss": 3.5992, "step": 47 }, { "epoch": 0.006752093544629316, "grad_norm": 2.1239309310913086, "learning_rate": 1.0384981238178534e-05, "loss": 3.9121, "step": 48 }, { "epoch": 0.006892762160142427, "grad_norm": 2.1113851070404053, "learning_rate": 1.0096348454262845e-05, "loss": 4.0347, "step": 49 }, { "epoch": 0.007033430775655537, "grad_norm": 3.646865129470825, "learning_rate": 1e-05, "loss": 4.6202, "step": 50 }, { "epoch": 0.007033430775655537, "eval_loss": 3.582388401031494, "eval_runtime": 4.4168, "eval_samples_per_second": 11.32, "eval_steps_per_second": 2.943, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 1, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5.968083617316864e+17, "train_batch_size": 1, "trial_name": null, "trial_params": null }