{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.013427042903598028, "eval_steps": 5, "global_step": 20, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0006713521451799014, "grad_norm": 0.3204704225063324, "learning_rate": 1e-05, "loss": 0.2534, "step": 1 }, { "epoch": 0.0006713521451799014, "eval_loss": 0.34151291847229004, "eval_runtime": 410.3846, "eval_samples_per_second": 24.453, "eval_steps_per_second": 6.114, "step": 1 }, { "epoch": 0.0013427042903598027, "grad_norm": 0.36491429805755615, "learning_rate": 2e-05, "loss": 0.2745, "step": 2 }, { "epoch": 0.0020140564355397043, "grad_norm": 0.3138849139213562, "learning_rate": 3e-05, "loss": 0.2594, "step": 3 }, { "epoch": 0.0026854085807196055, "grad_norm": 0.3455110788345337, "learning_rate": 4e-05, "loss": 0.2766, "step": 4 }, { "epoch": 0.003356760725899507, "grad_norm": 0.3628172278404236, "learning_rate": 5e-05, "loss": 0.2775, "step": 5 }, { "epoch": 0.003356760725899507, "eval_loss": 0.31641557812690735, "eval_runtime": 408.4107, "eval_samples_per_second": 24.571, "eval_steps_per_second": 6.143, "step": 5 }, { "epoch": 0.004028112871079409, "grad_norm": 0.3577265739440918, "learning_rate": 6e-05, "loss": 0.2759, "step": 6 }, { "epoch": 0.00469946501625931, "grad_norm": 0.3720860183238983, "learning_rate": 7e-05, "loss": 0.2451, "step": 7 }, { "epoch": 0.005370817161439211, "grad_norm": 0.38583680987358093, "learning_rate": 8e-05, "loss": 0.202, "step": 8 }, { "epoch": 0.006042169306619113, "grad_norm": 0.43849220871925354, "learning_rate": 9e-05, "loss": 0.1772, "step": 9 }, { "epoch": 0.006713521451799014, "grad_norm": 0.4130704402923584, "learning_rate": 0.0001, "loss": 0.1453, "step": 10 }, { "epoch": 0.006713521451799014, "eval_loss": 0.1879773885011673, "eval_runtime": 408.4313, "eval_samples_per_second": 24.57, "eval_steps_per_second": 6.143, "step": 10 }, { "epoch": 0.007384873596978915, "grad_norm": 0.4552391469478607, "learning_rate": 9.755282581475769e-05, "loss": 0.1409, "step": 11 }, { "epoch": 0.008056225742158817, "grad_norm": 0.50043123960495, "learning_rate": 9.045084971874738e-05, "loss": 0.1259, "step": 12 }, { "epoch": 0.008727577887338718, "grad_norm": 0.44272345304489136, "learning_rate": 7.938926261462366e-05, "loss": 0.0926, "step": 13 }, { "epoch": 0.00939893003251862, "grad_norm": 0.44065895676612854, "learning_rate": 6.545084971874738e-05, "loss": 0.0806, "step": 14 }, { "epoch": 0.01007028217769852, "grad_norm": 0.3156546950340271, "learning_rate": 5e-05, "loss": 0.0583, "step": 15 }, { "epoch": 0.01007028217769852, "eval_loss": 0.06674635410308838, "eval_runtime": 408.516, "eval_samples_per_second": 24.565, "eval_steps_per_second": 6.142, "step": 15 }, { "epoch": 0.010741634322878422, "grad_norm": 0.23266065120697021, "learning_rate": 3.4549150281252636e-05, "loss": 0.0488, "step": 16 }, { "epoch": 0.011412986468058323, "grad_norm": 0.2226492166519165, "learning_rate": 2.061073738537635e-05, "loss": 0.0493, "step": 17 }, { "epoch": 0.012084338613238226, "grad_norm": 0.23076535761356354, "learning_rate": 9.549150281252633e-06, "loss": 0.0403, "step": 18 }, { "epoch": 0.012755690758418127, "grad_norm": 0.21546129882335663, "learning_rate": 2.4471741852423237e-06, "loss": 0.0362, "step": 19 }, { "epoch": 0.013427042903598028, "grad_norm": 0.23565559089183807, "learning_rate": 0.0, "loss": 0.0457, "step": 20 }, { "epoch": 0.013427042903598028, "eval_loss": 0.05111876502633095, "eval_runtime": 408.1451, "eval_samples_per_second": 24.587, "eval_steps_per_second": 6.147, "step": 20 } ], "logging_steps": 1, "max_steps": 20, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 5, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 8.360120555667456e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }