{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.11747430249632893, "eval_steps": 5, "global_step": 20, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.005873715124816446, "grad_norm": 0.0762479305267334, "learning_rate": 1e-05, "loss": 10.3821, "step": 1 }, { "epoch": 0.005873715124816446, "eval_loss": NaN, "eval_runtime": 13.5498, "eval_samples_per_second": 84.651, "eval_steps_per_second": 10.627, "step": 1 }, { "epoch": 0.011747430249632892, "grad_norm": 0.07673076540231705, "learning_rate": 2e-05, "loss": 10.3819, "step": 2 }, { "epoch": 0.01762114537444934, "grad_norm": 0.07368427515029907, "learning_rate": 3e-05, "loss": 10.3821, "step": 3 }, { "epoch": 0.023494860499265784, "grad_norm": 0.07770584523677826, "learning_rate": 4e-05, "loss": 10.3819, "step": 4 }, { "epoch": 0.02936857562408223, "grad_norm": 0.0834246352314949, "learning_rate": 5e-05, "loss": 10.3819, "step": 5 }, { "epoch": 0.02936857562408223, "eval_loss": NaN, "eval_runtime": 1.4517, "eval_samples_per_second": 790.082, "eval_steps_per_second": 99.191, "step": 5 }, { "epoch": 0.03524229074889868, "grad_norm": 0.07079087197780609, "learning_rate": 6e-05, "loss": 10.3819, "step": 6 }, { "epoch": 0.041116005873715125, "grad_norm": 0.07339663058519363, "learning_rate": 7e-05, "loss": 10.3814, "step": 7 }, { "epoch": 0.04698972099853157, "grad_norm": 0.07698862254619598, "learning_rate": 8e-05, "loss": 10.3817, "step": 8 }, { "epoch": 0.05286343612334802, "grad_norm": 0.07521183043718338, "learning_rate": 9e-05, "loss": 10.3813, "step": 9 }, { "epoch": 0.05873715124816446, "grad_norm": 0.08161938935518265, "learning_rate": 0.0001, "loss": 10.3804, "step": 10 }, { "epoch": 0.05873715124816446, "eval_loss": NaN, "eval_runtime": 1.4366, "eval_samples_per_second": 798.388, "eval_steps_per_second": 100.234, "step": 10 }, { "epoch": 0.06461086637298091, "grad_norm": 0.08185140043497086, "learning_rate": 9.755282581475769e-05, "loss": 10.3807, "step": 11 }, { "epoch": 0.07048458149779736, "grad_norm": 0.07860841602087021, "learning_rate": 9.045084971874738e-05, "loss": 10.3804, "step": 12 }, { "epoch": 0.0763582966226138, "grad_norm": 0.08291763812303543, "learning_rate": 7.938926261462366e-05, "loss": 10.3801, "step": 13 }, { "epoch": 0.08223201174743025, "grad_norm": 0.07697901129722595, "learning_rate": 6.545084971874738e-05, "loss": 10.3793, "step": 14 }, { "epoch": 0.0881057268722467, "grad_norm": 0.08883082866668701, "learning_rate": 5e-05, "loss": 10.3791, "step": 15 }, { "epoch": 0.0881057268722467, "eval_loss": NaN, "eval_runtime": 1.4409, "eval_samples_per_second": 796.022, "eval_steps_per_second": 99.936, "step": 15 }, { "epoch": 0.09397944199706314, "grad_norm": 0.07468053698539734, "learning_rate": 3.4549150281252636e-05, "loss": 10.3794, "step": 16 }, { "epoch": 0.09985315712187959, "grad_norm": 0.07500291615724564, "learning_rate": 2.061073738537635e-05, "loss": 10.3783, "step": 17 }, { "epoch": 0.10572687224669604, "grad_norm": 0.07731679826974869, "learning_rate": 9.549150281252633e-06, "loss": 10.3793, "step": 18 }, { "epoch": 0.11160058737151249, "grad_norm": 0.07920215278863907, "learning_rate": 2.4471741852423237e-06, "loss": 10.3792, "step": 19 }, { "epoch": 0.11747430249632893, "grad_norm": 0.07347231358289719, "learning_rate": 0.0, "loss": 10.3791, "step": 20 }, { "epoch": 0.11747430249632893, "eval_loss": NaN, "eval_runtime": 1.4584, "eval_samples_per_second": 786.475, "eval_steps_per_second": 98.738, "step": 20 } ], "logging_steps": 1, "max_steps": 20, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 5, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 33473565818880.0, "train_batch_size": 1, "trial_name": null, "trial_params": null }