leixa's picture
Training in progress, step 150, checkpoint
d30eaa0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.7272727272727275,
"eval_steps": 13,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01818181818181818,
"eval_loss": 0.7239479422569275,
"eval_runtime": 9.6694,
"eval_samples_per_second": 9.618,
"eval_steps_per_second": 1.241,
"step": 1
},
{
"epoch": 0.05454545454545454,
"grad_norm": 1.006616234779358,
"learning_rate": 3e-05,
"loss": 0.7497,
"step": 3
},
{
"epoch": 0.10909090909090909,
"grad_norm": 0.7354311347007751,
"learning_rate": 6e-05,
"loss": 0.6608,
"step": 6
},
{
"epoch": 0.16363636363636364,
"grad_norm": 0.5442305207252502,
"learning_rate": 9e-05,
"loss": 0.5711,
"step": 9
},
{
"epoch": 0.21818181818181817,
"grad_norm": 0.4888646900653839,
"learning_rate": 9.994965332706573e-05,
"loss": 0.6065,
"step": 12
},
{
"epoch": 0.23636363636363636,
"eval_loss": 0.5811941027641296,
"eval_runtime": 9.3174,
"eval_samples_per_second": 9.981,
"eval_steps_per_second": 1.288,
"step": 13
},
{
"epoch": 0.2727272727272727,
"grad_norm": 0.43622222542762756,
"learning_rate": 9.968561049466214e-05,
"loss": 0.545,
"step": 15
},
{
"epoch": 0.32727272727272727,
"grad_norm": 0.4785720407962799,
"learning_rate": 9.919647942993148e-05,
"loss": 0.552,
"step": 18
},
{
"epoch": 0.38181818181818183,
"grad_norm": 0.5042471289634705,
"learning_rate": 9.848447601883435e-05,
"loss": 0.5068,
"step": 21
},
{
"epoch": 0.43636363636363634,
"grad_norm": 0.5013201832771301,
"learning_rate": 9.755282581475769e-05,
"loss": 0.5388,
"step": 24
},
{
"epoch": 0.4727272727272727,
"eval_loss": 0.4993453621864319,
"eval_runtime": 9.3279,
"eval_samples_per_second": 9.97,
"eval_steps_per_second": 1.286,
"step": 26
},
{
"epoch": 0.4909090909090909,
"grad_norm": 0.45883774757385254,
"learning_rate": 9.640574942595196e-05,
"loss": 0.4867,
"step": 27
},
{
"epoch": 0.5454545454545454,
"grad_norm": 0.546485185623169,
"learning_rate": 9.504844339512095e-05,
"loss": 0.4773,
"step": 30
},
{
"epoch": 0.6,
"grad_norm": 0.5516665577888489,
"learning_rate": 9.348705665778478e-05,
"loss": 0.4846,
"step": 33
},
{
"epoch": 0.6545454545454545,
"grad_norm": 0.5257235765457153,
"learning_rate": 9.172866268606513e-05,
"loss": 0.4445,
"step": 36
},
{
"epoch": 0.7090909090909091,
"grad_norm": 0.5515545010566711,
"learning_rate": 8.978122744408906e-05,
"loss": 0.4303,
"step": 39
},
{
"epoch": 0.7090909090909091,
"eval_loss": 0.41172003746032715,
"eval_runtime": 9.3482,
"eval_samples_per_second": 9.948,
"eval_steps_per_second": 1.284,
"step": 39
},
{
"epoch": 0.7636363636363637,
"grad_norm": 0.6002129912376404,
"learning_rate": 8.765357330018056e-05,
"loss": 0.4047,
"step": 42
},
{
"epoch": 0.8181818181818182,
"grad_norm": 0.6026033163070679,
"learning_rate": 8.535533905932738e-05,
"loss": 0.3728,
"step": 45
},
{
"epoch": 0.8727272727272727,
"grad_norm": 0.7240566611289978,
"learning_rate": 8.289693629698564e-05,
"loss": 0.3553,
"step": 48
},
{
"epoch": 0.9272727272727272,
"grad_norm": 0.6206062436103821,
"learning_rate": 8.0289502192041e-05,
"loss": 0.3555,
"step": 51
},
{
"epoch": 0.9454545454545454,
"eval_loss": 0.33931291103363037,
"eval_runtime": 9.3313,
"eval_samples_per_second": 9.966,
"eval_steps_per_second": 1.286,
"step": 52
},
{
"epoch": 0.9818181818181818,
"grad_norm": 0.6793412566184998,
"learning_rate": 7.754484907260513e-05,
"loss": 0.3225,
"step": 54
},
{
"epoch": 1.0363636363636364,
"grad_norm": 0.653759777545929,
"learning_rate": 7.467541090321735e-05,
"loss": 0.2436,
"step": 57
},
{
"epoch": 1.0909090909090908,
"grad_norm": 0.8996871113777161,
"learning_rate": 7.169418695587791e-05,
"loss": 0.2055,
"step": 60
},
{
"epoch": 1.1454545454545455,
"grad_norm": 0.5913653373718262,
"learning_rate": 6.861468292009727e-05,
"loss": 0.1856,
"step": 63
},
{
"epoch": 1.1818181818181819,
"eval_loss": 0.2821066677570343,
"eval_runtime": 9.338,
"eval_samples_per_second": 9.959,
"eval_steps_per_second": 1.285,
"step": 65
},
{
"epoch": 1.2,
"grad_norm": 0.6527738571166992,
"learning_rate": 6.545084971874738e-05,
"loss": 0.179,
"step": 66
},
{
"epoch": 1.2545454545454544,
"grad_norm": 0.7428808808326721,
"learning_rate": 6.22170203068947e-05,
"loss": 0.174,
"step": 69
},
{
"epoch": 1.309090909090909,
"grad_norm": 0.6538995504379272,
"learning_rate": 5.8927844739931834e-05,
"loss": 0.16,
"step": 72
},
{
"epoch": 1.3636363636363638,
"grad_norm": 0.6126785278320312,
"learning_rate": 5.559822380516539e-05,
"loss": 0.1336,
"step": 75
},
{
"epoch": 1.4181818181818182,
"grad_norm": 0.718612551689148,
"learning_rate": 5.2243241517525754e-05,
"loss": 0.132,
"step": 78
},
{
"epoch": 1.4181818181818182,
"eval_loss": 0.23752006888389587,
"eval_runtime": 9.3452,
"eval_samples_per_second": 9.952,
"eval_steps_per_second": 1.284,
"step": 78
},
{
"epoch": 1.4727272727272727,
"grad_norm": 0.659690260887146,
"learning_rate": 4.887809678520976e-05,
"loss": 0.1273,
"step": 81
},
{
"epoch": 1.5272727272727273,
"grad_norm": 0.5911349654197693,
"learning_rate": 4.551803455482833e-05,
"loss": 0.1286,
"step": 84
},
{
"epoch": 1.5818181818181818,
"grad_norm": 0.6244898438453674,
"learning_rate": 4.2178276747988446e-05,
"loss": 0.1135,
"step": 87
},
{
"epoch": 1.6363636363636362,
"grad_norm": 0.6658645272254944,
"learning_rate": 3.887395330218429e-05,
"loss": 0.1021,
"step": 90
},
{
"epoch": 1.6545454545454545,
"eval_loss": 0.19473232328891754,
"eval_runtime": 9.334,
"eval_samples_per_second": 9.964,
"eval_steps_per_second": 1.286,
"step": 91
},
{
"epoch": 1.690909090909091,
"grad_norm": 0.5781804323196411,
"learning_rate": 3.562003362839914e-05,
"loss": 0.1118,
"step": 93
},
{
"epoch": 1.7454545454545456,
"grad_norm": 0.557227373123169,
"learning_rate": 3.243125879593286e-05,
"loss": 0.0887,
"step": 96
},
{
"epoch": 1.8,
"grad_norm": 0.6238749623298645,
"learning_rate": 2.932207475167398e-05,
"loss": 0.0828,
"step": 99
},
{
"epoch": 1.8545454545454545,
"grad_norm": 0.6486019492149353,
"learning_rate": 2.630656687635007e-05,
"loss": 0.1015,
"step": 102
},
{
"epoch": 1.8909090909090909,
"eval_loss": 0.15355004370212555,
"eval_runtime": 9.3338,
"eval_samples_per_second": 9.964,
"eval_steps_per_second": 1.286,
"step": 104
},
{
"epoch": 1.9090909090909092,
"grad_norm": 0.5612895488739014,
"learning_rate": 2.3398396174233178e-05,
"loss": 0.0825,
"step": 105
},
{
"epoch": 1.9636363636363636,
"grad_norm": 0.5168143510818481,
"learning_rate": 2.061073738537635e-05,
"loss": 0.0798,
"step": 108
},
{
"epoch": 2.018181818181818,
"grad_norm": 0.3419465124607086,
"learning_rate": 1.7956219300748793e-05,
"loss": 0.0638,
"step": 111
},
{
"epoch": 2.0727272727272728,
"grad_norm": 0.3355216085910797,
"learning_rate": 1.544686755065677e-05,
"loss": 0.0374,
"step": 114
},
{
"epoch": 2.1272727272727274,
"grad_norm": 0.513612687587738,
"learning_rate": 1.3094050125632972e-05,
"loss": 0.0367,
"step": 117
},
{
"epoch": 2.1272727272727274,
"eval_loss": 0.1390739530324936,
"eval_runtime": 9.3397,
"eval_samples_per_second": 9.958,
"eval_steps_per_second": 1.285,
"step": 117
},
{
"epoch": 2.1818181818181817,
"grad_norm": 0.3988870680332184,
"learning_rate": 1.090842587659851e-05,
"loss": 0.0385,
"step": 120
},
{
"epoch": 2.2363636363636363,
"grad_norm": 0.3185819983482361,
"learning_rate": 8.899896227604509e-06,
"loss": 0.0351,
"step": 123
},
{
"epoch": 2.290909090909091,
"grad_norm": 0.3625490665435791,
"learning_rate": 7.077560319906695e-06,
"loss": 0.0318,
"step": 126
},
{
"epoch": 2.3454545454545457,
"grad_norm": 0.31989988684654236,
"learning_rate": 5.449673790581611e-06,
"loss": 0.0339,
"step": 129
},
{
"epoch": 2.3636363636363638,
"eval_loss": 0.13768097758293152,
"eval_runtime": 9.3375,
"eval_samples_per_second": 9.96,
"eval_steps_per_second": 1.285,
"step": 130
},
{
"epoch": 2.4,
"grad_norm": 0.3828240931034088,
"learning_rate": 4.023611372427471e-06,
"loss": 0.0376,
"step": 132
},
{
"epoch": 2.4545454545454546,
"grad_norm": 0.315852552652359,
"learning_rate": 2.8058334845816213e-06,
"loss": 0.0392,
"step": 135
},
{
"epoch": 2.509090909090909,
"grad_norm": 0.43011197447776794,
"learning_rate": 1.8018569652073381e-06,
"loss": 0.0369,
"step": 138
},
{
"epoch": 2.5636363636363635,
"grad_norm": 0.5211729407310486,
"learning_rate": 1.016230078838226e-06,
"loss": 0.0382,
"step": 141
},
{
"epoch": 2.6,
"eval_loss": 0.13515090942382812,
"eval_runtime": 9.3396,
"eval_samples_per_second": 9.958,
"eval_steps_per_second": 1.285,
"step": 143
},
{
"epoch": 2.618181818181818,
"grad_norm": 0.3637115955352783,
"learning_rate": 4.52511911603265e-07,
"loss": 0.036,
"step": 144
},
{
"epoch": 2.672727272727273,
"grad_norm": 0.41367557644844055,
"learning_rate": 1.132562476771959e-07,
"loss": 0.0382,
"step": 147
},
{
"epoch": 2.7272727272727275,
"grad_norm": 0.5478535294532776,
"learning_rate": 0.0,
"loss": 0.0333,
"step": 150
}
],
"logging_steps": 3,
"max_steps": 150,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 13,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.389441447244595e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}