t5_base-qg-ap-nopeft / trainer_state.json
tiagoblima's picture
End of training
7b3f575 verified
raw
history blame contribute delete
No virus
21.3 kB
{
"best_metric": 1.1979700326919556,
"best_model_checkpoint": "/temp/t5_base-qg-ap-nopeft/checkpoint-9694",
"epoch": 4.9992263654649545,
"eval_steps": 500,
"global_step": 16155,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 9.938099659548128e-05,
"loss": 2.1468,
"step": 100
},
{
"epoch": 0.06,
"learning_rate": 9.876199319096255e-05,
"loss": 1.4194,
"step": 200
},
{
"epoch": 0.09,
"learning_rate": 9.814298978644383e-05,
"loss": 1.3613,
"step": 300
},
{
"epoch": 0.12,
"learning_rate": 9.752398638192511e-05,
"loss": 1.3139,
"step": 400
},
{
"epoch": 0.15,
"learning_rate": 9.690498297740637e-05,
"loss": 1.3216,
"step": 500
},
{
"epoch": 0.19,
"learning_rate": 9.628597957288765e-05,
"loss": 1.286,
"step": 600
},
{
"epoch": 0.22,
"learning_rate": 9.566697616836893e-05,
"loss": 1.2877,
"step": 700
},
{
"epoch": 0.25,
"learning_rate": 9.50479727638502e-05,
"loss": 1.246,
"step": 800
},
{
"epoch": 0.28,
"learning_rate": 9.442896935933148e-05,
"loss": 1.2309,
"step": 900
},
{
"epoch": 0.31,
"learning_rate": 9.380996595481276e-05,
"loss": 1.2611,
"step": 1000
},
{
"epoch": 0.34,
"learning_rate": 9.319096255029403e-05,
"loss": 1.247,
"step": 1100
},
{
"epoch": 0.37,
"learning_rate": 9.257195914577531e-05,
"loss": 1.2398,
"step": 1200
},
{
"epoch": 0.4,
"learning_rate": 9.195295574125658e-05,
"loss": 1.2403,
"step": 1300
},
{
"epoch": 0.43,
"learning_rate": 9.133395233673786e-05,
"loss": 1.2324,
"step": 1400
},
{
"epoch": 0.46,
"learning_rate": 9.071494893221914e-05,
"loss": 1.2069,
"step": 1500
},
{
"epoch": 0.5,
"learning_rate": 9.00959455277004e-05,
"loss": 1.2273,
"step": 1600
},
{
"epoch": 0.53,
"learning_rate": 8.947694212318168e-05,
"loss": 1.2199,
"step": 1700
},
{
"epoch": 0.56,
"learning_rate": 8.885793871866296e-05,
"loss": 1.1963,
"step": 1800
},
{
"epoch": 0.59,
"learning_rate": 8.823893531414423e-05,
"loss": 1.2081,
"step": 1900
},
{
"epoch": 0.62,
"learning_rate": 8.76199319096255e-05,
"loss": 1.1895,
"step": 2000
},
{
"epoch": 0.65,
"learning_rate": 8.700092850510679e-05,
"loss": 1.203,
"step": 2100
},
{
"epoch": 0.68,
"learning_rate": 8.638192510058805e-05,
"loss": 1.1738,
"step": 2200
},
{
"epoch": 0.71,
"learning_rate": 8.576292169606933e-05,
"loss": 1.187,
"step": 2300
},
{
"epoch": 0.74,
"learning_rate": 8.514391829155061e-05,
"loss": 1.1857,
"step": 2400
},
{
"epoch": 0.77,
"learning_rate": 8.452491488703188e-05,
"loss": 1.169,
"step": 2500
},
{
"epoch": 0.8,
"learning_rate": 8.390591148251315e-05,
"loss": 1.2174,
"step": 2600
},
{
"epoch": 0.84,
"learning_rate": 8.328690807799443e-05,
"loss": 1.1737,
"step": 2700
},
{
"epoch": 0.87,
"learning_rate": 8.26679046734757e-05,
"loss": 1.174,
"step": 2800
},
{
"epoch": 0.9,
"learning_rate": 8.204890126895698e-05,
"loss": 1.1709,
"step": 2900
},
{
"epoch": 0.93,
"learning_rate": 8.142989786443826e-05,
"loss": 1.1882,
"step": 3000
},
{
"epoch": 0.96,
"learning_rate": 8.081089445991952e-05,
"loss": 1.1793,
"step": 3100
},
{
"epoch": 0.99,
"learning_rate": 8.019189105540082e-05,
"loss": 1.1634,
"step": 3200
},
{
"epoch": 1.0,
"eval_loss": 1.224627137184143,
"eval_runtime": 100.7601,
"eval_samples_per_second": 35.58,
"eval_steps_per_second": 8.902,
"step": 3231
},
{
"epoch": 1.02,
"learning_rate": 7.95728876508821e-05,
"loss": 1.101,
"step": 3300
},
{
"epoch": 1.05,
"learning_rate": 7.895388424636336e-05,
"loss": 1.0723,
"step": 3400
},
{
"epoch": 1.08,
"learning_rate": 7.833488084184464e-05,
"loss": 1.0846,
"step": 3500
},
{
"epoch": 1.11,
"learning_rate": 7.771587743732592e-05,
"loss": 1.0802,
"step": 3600
},
{
"epoch": 1.14,
"learning_rate": 7.709687403280718e-05,
"loss": 1.0806,
"step": 3700
},
{
"epoch": 1.18,
"learning_rate": 7.647787062828846e-05,
"loss": 1.0844,
"step": 3800
},
{
"epoch": 1.21,
"learning_rate": 7.585886722376974e-05,
"loss": 1.0838,
"step": 3900
},
{
"epoch": 1.24,
"learning_rate": 7.523986381925101e-05,
"loss": 1.0977,
"step": 4000
},
{
"epoch": 1.27,
"learning_rate": 7.462086041473229e-05,
"loss": 1.0832,
"step": 4100
},
{
"epoch": 1.3,
"learning_rate": 7.400185701021357e-05,
"loss": 1.074,
"step": 4200
},
{
"epoch": 1.33,
"learning_rate": 7.338285360569483e-05,
"loss": 1.0835,
"step": 4300
},
{
"epoch": 1.36,
"learning_rate": 7.276385020117611e-05,
"loss": 1.0725,
"step": 4400
},
{
"epoch": 1.39,
"learning_rate": 7.214484679665738e-05,
"loss": 1.0854,
"step": 4500
},
{
"epoch": 1.42,
"learning_rate": 7.152584339213866e-05,
"loss": 1.0879,
"step": 4600
},
{
"epoch": 1.45,
"learning_rate": 7.090683998761994e-05,
"loss": 1.0916,
"step": 4700
},
{
"epoch": 1.49,
"learning_rate": 7.02878365831012e-05,
"loss": 1.0758,
"step": 4800
},
{
"epoch": 1.52,
"learning_rate": 6.966883317858248e-05,
"loss": 1.0953,
"step": 4900
},
{
"epoch": 1.55,
"learning_rate": 6.904982977406376e-05,
"loss": 1.0733,
"step": 5000
},
{
"epoch": 1.58,
"learning_rate": 6.843082636954502e-05,
"loss": 1.0782,
"step": 5100
},
{
"epoch": 1.61,
"learning_rate": 6.78118229650263e-05,
"loss": 1.0785,
"step": 5200
},
{
"epoch": 1.64,
"learning_rate": 6.719281956050758e-05,
"loss": 1.0708,
"step": 5300
},
{
"epoch": 1.67,
"learning_rate": 6.657381615598886e-05,
"loss": 1.0541,
"step": 5400
},
{
"epoch": 1.7,
"learning_rate": 6.595481275147014e-05,
"loss": 1.0629,
"step": 5500
},
{
"epoch": 1.73,
"learning_rate": 6.533580934695142e-05,
"loss": 1.0707,
"step": 5600
},
{
"epoch": 1.76,
"learning_rate": 6.471680594243269e-05,
"loss": 1.077,
"step": 5700
},
{
"epoch": 1.79,
"learning_rate": 6.409780253791397e-05,
"loss": 1.0926,
"step": 5800
},
{
"epoch": 1.83,
"learning_rate": 6.347879913339524e-05,
"loss": 1.062,
"step": 5900
},
{
"epoch": 1.86,
"learning_rate": 6.285979572887651e-05,
"loss": 1.079,
"step": 6000
},
{
"epoch": 1.89,
"learning_rate": 6.224079232435779e-05,
"loss": 1.065,
"step": 6100
},
{
"epoch": 1.92,
"learning_rate": 6.162178891983907e-05,
"loss": 1.0766,
"step": 6200
},
{
"epoch": 1.95,
"learning_rate": 6.100278551532034e-05,
"loss": 1.0814,
"step": 6300
},
{
"epoch": 1.98,
"learning_rate": 6.038378211080161e-05,
"loss": 1.0645,
"step": 6400
},
{
"epoch": 2.0,
"eval_loss": 1.2034717798233032,
"eval_runtime": 100.7994,
"eval_samples_per_second": 35.566,
"eval_steps_per_second": 8.899,
"step": 6463
},
{
"epoch": 2.01,
"learning_rate": 5.9764778706282886e-05,
"loss": 1.0357,
"step": 6500
},
{
"epoch": 2.04,
"learning_rate": 5.914577530176416e-05,
"loss": 0.9951,
"step": 6600
},
{
"epoch": 2.07,
"learning_rate": 5.852677189724544e-05,
"loss": 0.9866,
"step": 6700
},
{
"epoch": 2.1,
"learning_rate": 5.790776849272671e-05,
"loss": 0.9925,
"step": 6800
},
{
"epoch": 2.14,
"learning_rate": 5.728876508820798e-05,
"loss": 1.006,
"step": 6900
},
{
"epoch": 2.17,
"learning_rate": 5.666976168368926e-05,
"loss": 1.0164,
"step": 7000
},
{
"epoch": 2.2,
"learning_rate": 5.6050758279170534e-05,
"loss": 1.0098,
"step": 7100
},
{
"epoch": 2.23,
"learning_rate": 5.5431754874651806e-05,
"loss": 0.9917,
"step": 7200
},
{
"epoch": 2.26,
"learning_rate": 5.4812751470133085e-05,
"loss": 0.9891,
"step": 7300
},
{
"epoch": 2.29,
"learning_rate": 5.419374806561436e-05,
"loss": 0.9932,
"step": 7400
},
{
"epoch": 2.32,
"learning_rate": 5.357474466109564e-05,
"loss": 0.9881,
"step": 7500
},
{
"epoch": 2.35,
"learning_rate": 5.2955741256576916e-05,
"loss": 1.0001,
"step": 7600
},
{
"epoch": 2.38,
"learning_rate": 5.2336737852058195e-05,
"loss": 0.9836,
"step": 7700
},
{
"epoch": 2.41,
"learning_rate": 5.171773444753947e-05,
"loss": 0.9965,
"step": 7800
},
{
"epoch": 2.44,
"learning_rate": 5.109873104302074e-05,
"loss": 1.0102,
"step": 7900
},
{
"epoch": 2.48,
"learning_rate": 5.047972763850202e-05,
"loss": 1.0023,
"step": 8000
},
{
"epoch": 2.51,
"learning_rate": 4.986072423398329e-05,
"loss": 1.0118,
"step": 8100
},
{
"epoch": 2.54,
"learning_rate": 4.9241720829464564e-05,
"loss": 0.9785,
"step": 8200
},
{
"epoch": 2.57,
"learning_rate": 4.862271742494584e-05,
"loss": 1.0105,
"step": 8300
},
{
"epoch": 2.6,
"learning_rate": 4.8003714020427115e-05,
"loss": 0.998,
"step": 8400
},
{
"epoch": 2.63,
"learning_rate": 4.738471061590839e-05,
"loss": 0.9986,
"step": 8500
},
{
"epoch": 2.66,
"learning_rate": 4.676570721138967e-05,
"loss": 0.9938,
"step": 8600
},
{
"epoch": 2.69,
"learning_rate": 4.614670380687094e-05,
"loss": 0.9939,
"step": 8700
},
{
"epoch": 2.72,
"learning_rate": 4.552770040235221e-05,
"loss": 0.9846,
"step": 8800
},
{
"epoch": 2.75,
"learning_rate": 4.490869699783349e-05,
"loss": 1.0124,
"step": 8900
},
{
"epoch": 2.79,
"learning_rate": 4.428969359331476e-05,
"loss": 0.9964,
"step": 9000
},
{
"epoch": 2.82,
"learning_rate": 4.367069018879604e-05,
"loss": 1.0029,
"step": 9100
},
{
"epoch": 2.85,
"learning_rate": 4.3051686784277315e-05,
"loss": 1.0148,
"step": 9200
},
{
"epoch": 2.88,
"learning_rate": 4.2432683379758594e-05,
"loss": 1.0039,
"step": 9300
},
{
"epoch": 2.91,
"learning_rate": 4.1813679975239866e-05,
"loss": 0.9917,
"step": 9400
},
{
"epoch": 2.94,
"learning_rate": 4.119467657072114e-05,
"loss": 1.0066,
"step": 9500
},
{
"epoch": 2.97,
"learning_rate": 4.057567316620242e-05,
"loss": 0.991,
"step": 9600
},
{
"epoch": 3.0,
"eval_loss": 1.1979700326919556,
"eval_runtime": 100.8034,
"eval_samples_per_second": 35.564,
"eval_steps_per_second": 8.899,
"step": 9694
},
{
"epoch": 3.0,
"learning_rate": 3.995666976168369e-05,
"loss": 0.9994,
"step": 9700
},
{
"epoch": 3.03,
"learning_rate": 3.933766635716496e-05,
"loss": 0.9555,
"step": 9800
},
{
"epoch": 3.06,
"learning_rate": 3.871866295264624e-05,
"loss": 0.9364,
"step": 9900
},
{
"epoch": 3.09,
"learning_rate": 3.8099659548127514e-05,
"loss": 0.9392,
"step": 10000
},
{
"epoch": 3.13,
"learning_rate": 3.748065614360879e-05,
"loss": 0.9439,
"step": 10100
},
{
"epoch": 3.16,
"learning_rate": 3.686165273909007e-05,
"loss": 0.9474,
"step": 10200
},
{
"epoch": 3.19,
"learning_rate": 3.6242649334571345e-05,
"loss": 0.9298,
"step": 10300
},
{
"epoch": 3.22,
"learning_rate": 3.562364593005262e-05,
"loss": 0.9245,
"step": 10400
},
{
"epoch": 3.25,
"learning_rate": 3.5004642525533896e-05,
"loss": 0.9724,
"step": 10500
},
{
"epoch": 3.28,
"learning_rate": 3.438563912101517e-05,
"loss": 0.9484,
"step": 10600
},
{
"epoch": 3.31,
"learning_rate": 3.376663571649644e-05,
"loss": 0.9376,
"step": 10700
},
{
"epoch": 3.34,
"learning_rate": 3.314763231197771e-05,
"loss": 0.9549,
"step": 10800
},
{
"epoch": 3.37,
"learning_rate": 3.252862890745899e-05,
"loss": 0.957,
"step": 10900
},
{
"epoch": 3.4,
"learning_rate": 3.1909625502940265e-05,
"loss": 0.9509,
"step": 11000
},
{
"epoch": 3.43,
"learning_rate": 3.129062209842154e-05,
"loss": 0.9427,
"step": 11100
},
{
"epoch": 3.47,
"learning_rate": 3.067161869390282e-05,
"loss": 0.9598,
"step": 11200
},
{
"epoch": 3.5,
"learning_rate": 3.0052615289384095e-05,
"loss": 0.9584,
"step": 11300
},
{
"epoch": 3.53,
"learning_rate": 2.943361188486537e-05,
"loss": 0.9544,
"step": 11400
},
{
"epoch": 3.56,
"learning_rate": 2.8814608480346644e-05,
"loss": 0.9561,
"step": 11500
},
{
"epoch": 3.59,
"learning_rate": 2.819560507582792e-05,
"loss": 0.9374,
"step": 11600
},
{
"epoch": 3.62,
"learning_rate": 2.7576601671309192e-05,
"loss": 0.9236,
"step": 11700
},
{
"epoch": 3.65,
"learning_rate": 2.6957598266790468e-05,
"loss": 0.9434,
"step": 11800
},
{
"epoch": 3.68,
"learning_rate": 2.6338594862271743e-05,
"loss": 0.9363,
"step": 11900
},
{
"epoch": 3.71,
"learning_rate": 2.5719591457753016e-05,
"loss": 0.9381,
"step": 12000
},
{
"epoch": 3.74,
"learning_rate": 2.510058805323429e-05,
"loss": 0.9535,
"step": 12100
},
{
"epoch": 3.78,
"learning_rate": 2.448158464871557e-05,
"loss": 0.9577,
"step": 12200
},
{
"epoch": 3.81,
"learning_rate": 2.3862581244196843e-05,
"loss": 0.9373,
"step": 12300
},
{
"epoch": 3.84,
"learning_rate": 2.324357783967812e-05,
"loss": 0.9499,
"step": 12400
},
{
"epoch": 3.87,
"learning_rate": 2.2624574435159395e-05,
"loss": 0.9442,
"step": 12500
},
{
"epoch": 3.9,
"learning_rate": 2.200557103064067e-05,
"loss": 0.9419,
"step": 12600
},
{
"epoch": 3.93,
"learning_rate": 2.1386567626121946e-05,
"loss": 0.9509,
"step": 12700
},
{
"epoch": 3.96,
"learning_rate": 2.076756422160322e-05,
"loss": 0.9562,
"step": 12800
},
{
"epoch": 3.99,
"learning_rate": 2.0148560817084494e-05,
"loss": 0.9459,
"step": 12900
},
{
"epoch": 4.0,
"eval_loss": 1.2027287483215332,
"eval_runtime": 100.7364,
"eval_samples_per_second": 35.588,
"eval_steps_per_second": 8.904,
"step": 12926
},
{
"epoch": 4.02,
"learning_rate": 1.952955741256577e-05,
"loss": 0.9155,
"step": 13000
},
{
"epoch": 4.05,
"learning_rate": 1.8910554008047046e-05,
"loss": 0.9265,
"step": 13100
},
{
"epoch": 4.08,
"learning_rate": 1.829155060352832e-05,
"loss": 0.9157,
"step": 13200
},
{
"epoch": 4.12,
"learning_rate": 1.7672547199009594e-05,
"loss": 0.9087,
"step": 13300
},
{
"epoch": 4.15,
"learning_rate": 1.705354379449087e-05,
"loss": 0.9044,
"step": 13400
},
{
"epoch": 4.18,
"learning_rate": 1.6434540389972145e-05,
"loss": 0.9176,
"step": 13500
},
{
"epoch": 4.21,
"learning_rate": 1.581553698545342e-05,
"loss": 0.904,
"step": 13600
},
{
"epoch": 4.24,
"learning_rate": 1.5196533580934697e-05,
"loss": 0.9292,
"step": 13700
},
{
"epoch": 4.27,
"learning_rate": 1.4577530176415971e-05,
"loss": 0.9154,
"step": 13800
},
{
"epoch": 4.3,
"learning_rate": 1.3958526771897245e-05,
"loss": 0.9146,
"step": 13900
},
{
"epoch": 4.33,
"learning_rate": 1.3339523367378521e-05,
"loss": 0.9293,
"step": 14000
},
{
"epoch": 4.36,
"learning_rate": 1.2720519962859798e-05,
"loss": 0.9019,
"step": 14100
},
{
"epoch": 4.39,
"learning_rate": 1.210151655834107e-05,
"loss": 0.902,
"step": 14200
},
{
"epoch": 4.43,
"learning_rate": 1.1482513153822347e-05,
"loss": 0.9085,
"step": 14300
},
{
"epoch": 4.46,
"learning_rate": 1.086350974930362e-05,
"loss": 0.8968,
"step": 14400
},
{
"epoch": 4.49,
"learning_rate": 1.0244506344784898e-05,
"loss": 0.9086,
"step": 14500
},
{
"epoch": 4.52,
"learning_rate": 9.625502940266172e-06,
"loss": 0.9153,
"step": 14600
},
{
"epoch": 4.55,
"learning_rate": 9.006499535747446e-06,
"loss": 0.9088,
"step": 14700
},
{
"epoch": 4.58,
"learning_rate": 8.387496131228722e-06,
"loss": 0.9106,
"step": 14800
},
{
"epoch": 4.61,
"learning_rate": 7.768492726709998e-06,
"loss": 0.9198,
"step": 14900
},
{
"epoch": 4.64,
"learning_rate": 7.149489322191272e-06,
"loss": 0.8956,
"step": 15000
},
{
"epoch": 4.67,
"learning_rate": 6.530485917672548e-06,
"loss": 0.8981,
"step": 15100
},
{
"epoch": 4.7,
"learning_rate": 5.9114825131538225e-06,
"loss": 0.9142,
"step": 15200
},
{
"epoch": 4.73,
"learning_rate": 5.2924791086350974e-06,
"loss": 0.9095,
"step": 15300
},
{
"epoch": 4.77,
"learning_rate": 4.673475704116373e-06,
"loss": 0.922,
"step": 15400
},
{
"epoch": 4.8,
"learning_rate": 4.054472299597648e-06,
"loss": 0.9119,
"step": 15500
},
{
"epoch": 4.83,
"learning_rate": 3.4354688950789226e-06,
"loss": 0.9313,
"step": 15600
},
{
"epoch": 4.86,
"learning_rate": 2.816465490560198e-06,
"loss": 0.9035,
"step": 15700
},
{
"epoch": 4.89,
"learning_rate": 2.1974620860414733e-06,
"loss": 0.9041,
"step": 15800
},
{
"epoch": 4.92,
"learning_rate": 1.5784586815227482e-06,
"loss": 0.9316,
"step": 15900
},
{
"epoch": 4.95,
"learning_rate": 9.594552770040236e-07,
"loss": 0.9313,
"step": 16000
},
{
"epoch": 4.98,
"learning_rate": 3.404518724852987e-07,
"loss": 0.9191,
"step": 16100
},
{
"epoch": 5.0,
"eval_loss": 1.2074109315872192,
"eval_runtime": 100.8821,
"eval_samples_per_second": 35.537,
"eval_steps_per_second": 8.892,
"step": 16155
},
{
"epoch": 5.0,
"step": 16155,
"total_flos": 1.574034355519488e+17,
"train_loss": 1.0387100059157441,
"train_runtime": 30224.2981,
"train_samples_per_second": 8.553,
"train_steps_per_second": 0.535
}
],
"logging_steps": 100,
"max_steps": 16155,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 1.574034355519488e+17,
"trial_name": null,
"trial_params": null
}