|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.004377006127808579, |
|
"eval_steps": 2, |
|
"global_step": 15, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0002918004085205719, |
|
"grad_norm": 0.9763444662094116, |
|
"learning_rate": 2e-05, |
|
"loss": 1.2075, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0002918004085205719, |
|
"eval_loss": 1.429457187652588, |
|
"eval_runtime": 163.3614, |
|
"eval_samples_per_second": 8.833, |
|
"eval_steps_per_second": 4.42, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005836008170411438, |
|
"grad_norm": 1.0415689945220947, |
|
"learning_rate": 4e-05, |
|
"loss": 1.4158, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0005836008170411438, |
|
"eval_loss": 1.4282734394073486, |
|
"eval_runtime": 162.055, |
|
"eval_samples_per_second": 8.904, |
|
"eval_steps_per_second": 4.455, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0008754012255617158, |
|
"grad_norm": 1.0212147235870361, |
|
"learning_rate": 6e-05, |
|
"loss": 1.2149, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0011672016340822876, |
|
"grad_norm": 0.8775227069854736, |
|
"learning_rate": 8e-05, |
|
"loss": 1.0194, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0011672016340822876, |
|
"eval_loss": 1.4084514379501343, |
|
"eval_runtime": 161.6261, |
|
"eval_samples_per_second": 8.928, |
|
"eval_steps_per_second": 4.467, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0014590020426028597, |
|
"grad_norm": 1.1688672304153442, |
|
"learning_rate": 0.0001, |
|
"loss": 1.3914, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0017508024511234317, |
|
"grad_norm": 1.005053997039795, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 1.2212, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0017508024511234317, |
|
"eval_loss": 1.3211736679077148, |
|
"eval_runtime": 161.7504, |
|
"eval_samples_per_second": 8.921, |
|
"eval_steps_per_second": 4.464, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0020426028596440037, |
|
"grad_norm": 1.3174978494644165, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.2575, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0023344032681645753, |
|
"grad_norm": 0.9882346391677856, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 1.1226, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0023344032681645753, |
|
"eval_loss": 1.2546484470367432, |
|
"eval_runtime": 161.6417, |
|
"eval_samples_per_second": 8.927, |
|
"eval_steps_per_second": 4.467, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0026262036766851473, |
|
"grad_norm": 1.0995094776153564, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 1.2479, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0029180040852057193, |
|
"grad_norm": 0.9760543704032898, |
|
"learning_rate": 5e-05, |
|
"loss": 1.2593, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0029180040852057193, |
|
"eval_loss": 1.2201802730560303, |
|
"eval_runtime": 162.0614, |
|
"eval_samples_per_second": 8.904, |
|
"eval_steps_per_second": 4.455, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0032098044937262913, |
|
"grad_norm": 1.1419090032577515, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 1.4679, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0035016049022468633, |
|
"grad_norm": 1.2634228467941284, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 1.243, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0035016049022468633, |
|
"eval_loss": 1.194502353668213, |
|
"eval_runtime": 161.9542, |
|
"eval_samples_per_second": 8.91, |
|
"eval_steps_per_second": 4.458, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.003793405310767435, |
|
"grad_norm": 0.995550274848938, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.9366, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.004085205719288007, |
|
"grad_norm": 1.1408365964889526, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 1.3171, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.004085205719288007, |
|
"eval_loss": 1.18353271484375, |
|
"eval_runtime": 161.9389, |
|
"eval_samples_per_second": 8.911, |
|
"eval_steps_per_second": 4.458, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.004377006127808579, |
|
"grad_norm": 0.9548996090888977, |
|
"learning_rate": 0.0, |
|
"loss": 0.974, |
|
"step": 15 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 15, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0912429064060928e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|