t5_large-qg-aap / trainer_state.json
tiagoblima's picture
End of training
9b61df6
raw
history blame contribute delete
No virus
6.53 kB
{
"best_metric": 5.590092658996582,
"best_model_checkpoint": "/temp/t5_large-qg-aap/checkpoint-4040",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 4040,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12,
"learning_rate": 0.004876237623762376,
"loss": 8.2771,
"step": 100
},
{
"epoch": 0.25,
"learning_rate": 0.004752475247524752,
"loss": 7.8175,
"step": 200
},
{
"epoch": 0.37,
"learning_rate": 0.004628712871287129,
"loss": 7.4476,
"step": 300
},
{
"epoch": 0.5,
"learning_rate": 0.0045049504950495055,
"loss": 7.0782,
"step": 400
},
{
"epoch": 0.62,
"learning_rate": 0.004381188118811882,
"loss": 6.755,
"step": 500
},
{
"epoch": 0.74,
"learning_rate": 0.004257425742574258,
"loss": 6.5292,
"step": 600
},
{
"epoch": 0.87,
"learning_rate": 0.004133663366336634,
"loss": 6.3201,
"step": 700
},
{
"epoch": 0.99,
"learning_rate": 0.00400990099009901,
"loss": 6.15,
"step": 800
},
{
"epoch": 1.0,
"eval_loss": 7.336104869842529,
"eval_runtime": 526.3079,
"eval_samples_per_second": 16.851,
"eval_steps_per_second": 4.214,
"step": 808
},
{
"epoch": 1.11,
"learning_rate": 0.0038861386138613866,
"loss": 6.0156,
"step": 900
},
{
"epoch": 1.24,
"learning_rate": 0.0037623762376237627,
"loss": 5.8846,
"step": 1000
},
{
"epoch": 1.36,
"learning_rate": 0.0036386138613861384,
"loss": 5.7566,
"step": 1100
},
{
"epoch": 1.49,
"learning_rate": 0.0035148514851485145,
"loss": 5.6981,
"step": 1200
},
{
"epoch": 1.61,
"learning_rate": 0.0033910891089108915,
"loss": 5.6151,
"step": 1300
},
{
"epoch": 1.73,
"learning_rate": 0.0032673267326732676,
"loss": 5.4776,
"step": 1400
},
{
"epoch": 1.86,
"learning_rate": 0.0031435643564356438,
"loss": 5.3873,
"step": 1500
},
{
"epoch": 1.98,
"learning_rate": 0.00301980198019802,
"loss": 5.3335,
"step": 1600
},
{
"epoch": 2.0,
"eval_loss": 6.409176349639893,
"eval_runtime": 525.7688,
"eval_samples_per_second": 16.869,
"eval_steps_per_second": 4.219,
"step": 1616
},
{
"epoch": 2.1,
"learning_rate": 0.002896039603960396,
"loss": 5.2361,
"step": 1700
},
{
"epoch": 2.23,
"learning_rate": 0.0027722772277227726,
"loss": 5.1746,
"step": 1800
},
{
"epoch": 2.35,
"learning_rate": 0.0026485148514851487,
"loss": 5.1069,
"step": 1900
},
{
"epoch": 2.48,
"learning_rate": 0.002524752475247525,
"loss": 5.07,
"step": 2000
},
{
"epoch": 2.6,
"learning_rate": 0.002400990099009901,
"loss": 5.0201,
"step": 2100
},
{
"epoch": 2.72,
"learning_rate": 0.0022772277227722775,
"loss": 4.9693,
"step": 2200
},
{
"epoch": 2.85,
"learning_rate": 0.0021534653465346536,
"loss": 4.9579,
"step": 2300
},
{
"epoch": 2.97,
"learning_rate": 0.0020297029702970298,
"loss": 4.8807,
"step": 2400
},
{
"epoch": 3.0,
"eval_loss": 5.913175582885742,
"eval_runtime": 526.104,
"eval_samples_per_second": 16.858,
"eval_steps_per_second": 4.216,
"step": 2424
},
{
"epoch": 3.09,
"learning_rate": 0.0019059405940594061,
"loss": 4.8449,
"step": 2500
},
{
"epoch": 3.22,
"learning_rate": 0.001782178217821782,
"loss": 4.7936,
"step": 2600
},
{
"epoch": 3.34,
"learning_rate": 0.0016584158415841586,
"loss": 4.7996,
"step": 2700
},
{
"epoch": 3.47,
"learning_rate": 0.0015346534653465347,
"loss": 4.7631,
"step": 2800
},
{
"epoch": 3.59,
"learning_rate": 0.0014108910891089108,
"loss": 4.7178,
"step": 2900
},
{
"epoch": 3.71,
"learning_rate": 0.0012871287128712872,
"loss": 4.6978,
"step": 3000
},
{
"epoch": 3.84,
"learning_rate": 0.0011633663366336635,
"loss": 4.6717,
"step": 3100
},
{
"epoch": 3.96,
"learning_rate": 0.0010396039603960397,
"loss": 4.6492,
"step": 3200
},
{
"epoch": 4.0,
"eval_loss": 5.6656341552734375,
"eval_runtime": 526.1669,
"eval_samples_per_second": 16.856,
"eval_steps_per_second": 4.215,
"step": 3232
},
{
"epoch": 4.08,
"learning_rate": 0.0009158415841584158,
"loss": 4.6611,
"step": 3300
},
{
"epoch": 4.21,
"learning_rate": 0.0007920792079207921,
"loss": 4.6159,
"step": 3400
},
{
"epoch": 4.33,
"learning_rate": 0.0006683168316831684,
"loss": 4.6386,
"step": 3500
},
{
"epoch": 4.46,
"learning_rate": 0.0005445544554455446,
"loss": 4.5997,
"step": 3600
},
{
"epoch": 4.58,
"learning_rate": 0.00042079207920792084,
"loss": 4.5819,
"step": 3700
},
{
"epoch": 4.7,
"learning_rate": 0.000297029702970297,
"loss": 4.5785,
"step": 3800
},
{
"epoch": 4.83,
"learning_rate": 0.00017326732673267329,
"loss": 4.588,
"step": 3900
},
{
"epoch": 4.95,
"learning_rate": 4.950495049504951e-05,
"loss": 4.591,
"step": 4000
},
{
"epoch": 5.0,
"eval_loss": 5.590092658996582,
"eval_runtime": 526.0558,
"eval_samples_per_second": 16.859,
"eval_steps_per_second": 4.216,
"step": 4040
},
{
"epoch": 5.0,
"step": 4040,
"total_flos": 4.4078196477394944e+17,
"train_loss": 5.410919136576133,
"train_runtime": 16658.5174,
"train_samples_per_second": 15.519,
"train_steps_per_second": 0.243
}
],
"logging_steps": 100,
"max_steps": 4040,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 4.4078196477394944e+17,
"trial_name": null,
"trial_params": null
}