{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.0028410705153701916, "eval_steps": 4, "global_step": 25, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.00011364282061480765, "grad_norm": NaN, "learning_rate": 1.0000000000000002e-06, "loss": 0.0, "step": 1 }, { "epoch": 0.00011364282061480765, "eval_loss": NaN, "eval_runtime": 431.3888, "eval_samples_per_second": 8.589, "eval_steps_per_second": 2.149, "step": 1 }, { "epoch": 0.0002272856412296153, "grad_norm": NaN, "learning_rate": 2.0000000000000003e-06, "loss": 0.0, "step": 2 }, { "epoch": 0.000340928461844423, "grad_norm": NaN, "learning_rate": 3e-06, "loss": 0.0, "step": 3 }, { "epoch": 0.0004545712824592306, "grad_norm": NaN, "learning_rate": 4.000000000000001e-06, "loss": 0.0, "step": 4 }, { "epoch": 0.0004545712824592306, "eval_loss": NaN, "eval_runtime": 431.0085, "eval_samples_per_second": 8.596, "eval_steps_per_second": 2.151, "step": 4 }, { "epoch": 0.0005682141030740383, "grad_norm": NaN, "learning_rate": 5e-06, "loss": 0.0, "step": 5 }, { "epoch": 0.000681856923688846, "grad_norm": NaN, "learning_rate": 6e-06, "loss": 0.0, "step": 6 }, { "epoch": 0.0007954997443036536, "grad_norm": NaN, "learning_rate": 7e-06, "loss": 0.0, "step": 7 }, { "epoch": 0.0009091425649184612, "grad_norm": NaN, "learning_rate": 8.000000000000001e-06, "loss": 0.0, "step": 8 }, { "epoch": 0.0009091425649184612, "eval_loss": NaN, "eval_runtime": 431.1027, "eval_samples_per_second": 8.594, "eval_steps_per_second": 2.15, "step": 8 }, { "epoch": 0.001022785385533269, "grad_norm": NaN, "learning_rate": 9e-06, "loss": 0.0, "step": 9 }, { "epoch": 0.0011364282061480766, "grad_norm": NaN, "learning_rate": 1e-05, "loss": 0.0, "step": 10 }, { "epoch": 0.0012500710267628842, "grad_norm": NaN, "learning_rate": 9.890738003669029e-06, "loss": 0.0, "step": 11 }, { "epoch": 0.001363713847377692, "grad_norm": NaN, "learning_rate": 9.567727288213005e-06, "loss": 0.0, "step": 12 }, { "epoch": 0.001363713847377692, "eval_loss": NaN, "eval_runtime": 431.6008, "eval_samples_per_second": 8.584, "eval_steps_per_second": 2.148, "step": 12 }, { "epoch": 0.0014773566679924996, "grad_norm": NaN, "learning_rate": 9.045084971874738e-06, "loss": 0.0, "step": 13 }, { "epoch": 0.0015909994886073072, "grad_norm": NaN, "learning_rate": 8.345653031794292e-06, "loss": 0.0, "step": 14 }, { "epoch": 0.0017046423092221148, "grad_norm": NaN, "learning_rate": 7.500000000000001e-06, "loss": 0.0, "step": 15 }, { "epoch": 0.0018182851298369225, "grad_norm": NaN, "learning_rate": 6.545084971874738e-06, "loss": 0.0, "step": 16 }, { "epoch": 0.0018182851298369225, "eval_loss": NaN, "eval_runtime": 430.7717, "eval_samples_per_second": 8.601, "eval_steps_per_second": 2.152, "step": 16 }, { "epoch": 0.0019319279504517303, "grad_norm": NaN, "learning_rate": 5.522642316338268e-06, "loss": 0.0, "step": 17 }, { "epoch": 0.002045570771066538, "grad_norm": NaN, "learning_rate": 4.477357683661734e-06, "loss": 0.0, "step": 18 }, { "epoch": 0.0021592135916813455, "grad_norm": NaN, "learning_rate": 3.4549150281252635e-06, "loss": 0.0, "step": 19 }, { "epoch": 0.002272856412296153, "grad_norm": NaN, "learning_rate": 2.5000000000000015e-06, "loss": 0.0, "step": 20 }, { "epoch": 0.002272856412296153, "eval_loss": NaN, "eval_runtime": 430.7811, "eval_samples_per_second": 8.601, "eval_steps_per_second": 2.152, "step": 20 }, { "epoch": 0.0023864992329109607, "grad_norm": NaN, "learning_rate": 1.6543469682057105e-06, "loss": 0.0, "step": 21 }, { "epoch": 0.0025001420535257684, "grad_norm": NaN, "learning_rate": 9.549150281252633e-07, "loss": 0.0, "step": 22 }, { "epoch": 0.002613784874140576, "grad_norm": NaN, "learning_rate": 4.322727117869951e-07, "loss": 0.0, "step": 23 }, { "epoch": 0.002727427694755384, "grad_norm": NaN, "learning_rate": 1.0926199633097156e-07, "loss": 0.0, "step": 24 }, { "epoch": 0.002727427694755384, "eval_loss": NaN, "eval_runtime": 431.2707, "eval_samples_per_second": 8.591, "eval_steps_per_second": 2.149, "step": 24 }, { "epoch": 0.0028410705153701916, "grad_norm": NaN, "learning_rate": 0.0, "loss": 0.0, "step": 25 } ], "logging_steps": 1, "max_steps": 25, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 9273591044505600.0, "train_batch_size": 4, "trial_name": null, "trial_params": null }