t5_large-qg-ap-nopeft / trainer_state.json
tiagoblima's picture
End of training
f9b12bb
raw
history blame contribute delete
No virus
6.55 kB
{
"best_metric": 1.1993927955627441,
"best_model_checkpoint": "/temp/t5_large-qg-ap-nopeft/checkpoint-2424",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 4040,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12,
"learning_rate": 9.752475247524753e-05,
"loss": 1.9922,
"step": 100
},
{
"epoch": 0.25,
"learning_rate": 9.504950495049505e-05,
"loss": 1.3295,
"step": 200
},
{
"epoch": 0.37,
"learning_rate": 9.257425742574259e-05,
"loss": 1.2696,
"step": 300
},
{
"epoch": 0.5,
"learning_rate": 9.009900990099011e-05,
"loss": 1.2353,
"step": 400
},
{
"epoch": 0.62,
"learning_rate": 8.762376237623763e-05,
"loss": 1.2092,
"step": 500
},
{
"epoch": 0.74,
"learning_rate": 8.514851485148515e-05,
"loss": 1.1913,
"step": 600
},
{
"epoch": 0.87,
"learning_rate": 8.267326732673268e-05,
"loss": 1.1831,
"step": 700
},
{
"epoch": 0.99,
"learning_rate": 8.019801980198021e-05,
"loss": 1.1714,
"step": 800
},
{
"epoch": 1.0,
"eval_loss": 1.2384811639785767,
"eval_runtime": 37.8804,
"eval_samples_per_second": 94.64,
"eval_steps_per_second": 5.94,
"step": 808
},
{
"epoch": 1.11,
"learning_rate": 7.772277227722773e-05,
"loss": 1.0963,
"step": 900
},
{
"epoch": 1.24,
"learning_rate": 7.524752475247526e-05,
"loss": 1.0913,
"step": 1000
},
{
"epoch": 1.36,
"learning_rate": 7.277227722772278e-05,
"loss": 1.0815,
"step": 1100
},
{
"epoch": 1.49,
"learning_rate": 7.02970297029703e-05,
"loss": 1.0873,
"step": 1200
},
{
"epoch": 1.61,
"learning_rate": 6.782178217821783e-05,
"loss": 1.0841,
"step": 1300
},
{
"epoch": 1.73,
"learning_rate": 6.534653465346535e-05,
"loss": 1.0671,
"step": 1400
},
{
"epoch": 1.86,
"learning_rate": 6.287128712871287e-05,
"loss": 1.0734,
"step": 1500
},
{
"epoch": 1.98,
"learning_rate": 6.03960396039604e-05,
"loss": 1.0782,
"step": 1600
},
{
"epoch": 2.0,
"eval_loss": 1.2065564393997192,
"eval_runtime": 37.776,
"eval_samples_per_second": 94.901,
"eval_steps_per_second": 5.956,
"step": 1616
},
{
"epoch": 2.1,
"learning_rate": 5.792079207920792e-05,
"loss": 1.0134,
"step": 1700
},
{
"epoch": 2.23,
"learning_rate": 5.544554455445545e-05,
"loss": 1.0127,
"step": 1800
},
{
"epoch": 2.35,
"learning_rate": 5.2970297029702974e-05,
"loss": 0.9995,
"step": 1900
},
{
"epoch": 2.48,
"learning_rate": 5.0495049504950497e-05,
"loss": 1.004,
"step": 2000
},
{
"epoch": 2.6,
"learning_rate": 4.801980198019802e-05,
"loss": 1.0019,
"step": 2100
},
{
"epoch": 2.72,
"learning_rate": 4.554455445544555e-05,
"loss": 0.9972,
"step": 2200
},
{
"epoch": 2.85,
"learning_rate": 4.306930693069307e-05,
"loss": 1.0135,
"step": 2300
},
{
"epoch": 2.97,
"learning_rate": 4.05940594059406e-05,
"loss": 1.0064,
"step": 2400
},
{
"epoch": 3.0,
"eval_loss": 1.1993927955627441,
"eval_runtime": 37.796,
"eval_samples_per_second": 94.851,
"eval_steps_per_second": 5.953,
"step": 2424
},
{
"epoch": 3.09,
"learning_rate": 3.811881188118812e-05,
"loss": 0.9637,
"step": 2500
},
{
"epoch": 3.22,
"learning_rate": 3.5643564356435645e-05,
"loss": 0.9496,
"step": 2600
},
{
"epoch": 3.34,
"learning_rate": 3.3168316831683175e-05,
"loss": 0.9621,
"step": 2700
},
{
"epoch": 3.47,
"learning_rate": 3.06930693069307e-05,
"loss": 0.9592,
"step": 2800
},
{
"epoch": 3.59,
"learning_rate": 2.8217821782178216e-05,
"loss": 0.9571,
"step": 2900
},
{
"epoch": 3.71,
"learning_rate": 2.5742574257425746e-05,
"loss": 0.94,
"step": 3000
},
{
"epoch": 3.84,
"learning_rate": 2.326732673267327e-05,
"loss": 0.9553,
"step": 3100
},
{
"epoch": 3.96,
"learning_rate": 2.079207920792079e-05,
"loss": 0.9555,
"step": 3200
},
{
"epoch": 4.0,
"eval_loss": 1.2030982971191406,
"eval_runtime": 37.7841,
"eval_samples_per_second": 94.881,
"eval_steps_per_second": 5.955,
"step": 3232
},
{
"epoch": 4.08,
"learning_rate": 1.8316831683168317e-05,
"loss": 0.939,
"step": 3300
},
{
"epoch": 4.21,
"learning_rate": 1.5841584158415843e-05,
"loss": 0.9216,
"step": 3400
},
{
"epoch": 4.33,
"learning_rate": 1.3366336633663367e-05,
"loss": 0.9303,
"step": 3500
},
{
"epoch": 4.46,
"learning_rate": 1.0891089108910891e-05,
"loss": 0.9122,
"step": 3600
},
{
"epoch": 4.58,
"learning_rate": 8.415841584158417e-06,
"loss": 0.9262,
"step": 3700
},
{
"epoch": 4.7,
"learning_rate": 5.940594059405941e-06,
"loss": 0.9135,
"step": 3800
},
{
"epoch": 4.83,
"learning_rate": 3.4653465346534657e-06,
"loss": 0.929,
"step": 3900
},
{
"epoch": 4.95,
"learning_rate": 9.900990099009902e-07,
"loss": 0.9261,
"step": 4000
},
{
"epoch": 5.0,
"eval_loss": 1.2047841548919678,
"eval_runtime": 37.8264,
"eval_samples_per_second": 94.775,
"eval_steps_per_second": 5.948,
"step": 4040
},
{
"epoch": 5.0,
"step": 4040,
"total_flos": 5.5970802696192e+17,
"train_loss": 1.0571302187324751,
"train_runtime": 9858.1834,
"train_samples_per_second": 26.224,
"train_steps_per_second": 0.41
}
],
"logging_steps": 100,
"max_steps": 4040,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 5.5970802696192e+17,
"trial_name": null,
"trial_params": null
}