finetuneMt5English / checkpoint-2400 /trainer_state.json
viditraj860's picture
Upload folder using huggingface_hub
f029e81
{
"best_metric": 3.213425874710083,
"best_model_checkpoint": "/tmp/model/checkpoint-2400",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 2400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 4.166666666666667e-06,
"loss": 5.741,
"step": 20
},
{
"epoch": 0.05,
"learning_rate": 8.333333333333334e-06,
"loss": 5.0717,
"step": 40
},
{
"epoch": 0.07,
"learning_rate": 1.25e-05,
"loss": 4.6499,
"step": 60
},
{
"epoch": 0.1,
"learning_rate": 1.6666666666666667e-05,
"loss": 4.5674,
"step": 80
},
{
"epoch": 0.12,
"learning_rate": 2.0833333333333336e-05,
"loss": 4.41,
"step": 100
},
{
"epoch": 0.15,
"learning_rate": 2.5e-05,
"loss": 4.3577,
"step": 120
},
{
"epoch": 0.17,
"learning_rate": 2.916666666666667e-05,
"loss": 4.4615,
"step": 140
},
{
"epoch": 0.2,
"learning_rate": 3.3333333333333335e-05,
"loss": 4.3785,
"step": 160
},
{
"epoch": 0.23,
"learning_rate": 3.7500000000000003e-05,
"loss": 4.3647,
"step": 180
},
{
"epoch": 0.25,
"learning_rate": 4.166666666666667e-05,
"loss": 4.2084,
"step": 200
},
{
"epoch": 0.28,
"learning_rate": 4.5833333333333334e-05,
"loss": 4.0819,
"step": 220
},
{
"epoch": 0.3,
"learning_rate": 5e-05,
"loss": 4.1261,
"step": 240
},
{
"epoch": 0.33,
"learning_rate": 4.9537037037037035e-05,
"loss": 4.4005,
"step": 260
},
{
"epoch": 0.35,
"learning_rate": 4.9074074074074075e-05,
"loss": 4.539,
"step": 280
},
{
"epoch": 0.38,
"learning_rate": 4.8611111111111115e-05,
"loss": 4.2227,
"step": 300
},
{
"epoch": 0.4,
"learning_rate": 4.814814814814815e-05,
"loss": 4.2553,
"step": 320
},
{
"epoch": 0.42,
"learning_rate": 4.768518518518519e-05,
"loss": 4.1659,
"step": 340
},
{
"epoch": 0.45,
"learning_rate": 4.722222222222222e-05,
"loss": 4.0688,
"step": 360
},
{
"epoch": 0.47,
"learning_rate": 4.675925925925926e-05,
"loss": 4.0632,
"step": 380
},
{
"epoch": 0.5,
"learning_rate": 4.62962962962963e-05,
"loss": 4.0898,
"step": 400
},
{
"epoch": 0.53,
"learning_rate": 4.5833333333333334e-05,
"loss": 3.6001,
"step": 420
},
{
"epoch": 0.55,
"learning_rate": 4.5370370370370374e-05,
"loss": 4.3352,
"step": 440
},
{
"epoch": 0.57,
"learning_rate": 4.490740740740741e-05,
"loss": 4.1648,
"step": 460
},
{
"epoch": 0.6,
"learning_rate": 4.4444444444444447e-05,
"loss": 3.8782,
"step": 480
},
{
"epoch": 0.62,
"learning_rate": 4.3981481481481486e-05,
"loss": 3.7935,
"step": 500
},
{
"epoch": 0.65,
"learning_rate": 4.351851851851852e-05,
"loss": 4.1225,
"step": 520
},
{
"epoch": 0.68,
"learning_rate": 4.305555555555556e-05,
"loss": 3.9524,
"step": 540
},
{
"epoch": 0.7,
"learning_rate": 4.259259259259259e-05,
"loss": 4.0101,
"step": 560
},
{
"epoch": 0.72,
"learning_rate": 4.212962962962963e-05,
"loss": 3.9894,
"step": 580
},
{
"epoch": 0.75,
"learning_rate": 4.166666666666667e-05,
"loss": 3.6272,
"step": 600
},
{
"epoch": 0.78,
"learning_rate": 4.1203703703703705e-05,
"loss": 4.1463,
"step": 620
},
{
"epoch": 0.8,
"learning_rate": 4.074074074074074e-05,
"loss": 3.78,
"step": 640
},
{
"epoch": 0.82,
"learning_rate": 4.027777777777778e-05,
"loss": 4.1798,
"step": 660
},
{
"epoch": 0.85,
"learning_rate": 3.981481481481482e-05,
"loss": 3.9722,
"step": 680
},
{
"epoch": 0.88,
"learning_rate": 3.935185185185186e-05,
"loss": 3.7356,
"step": 700
},
{
"epoch": 0.9,
"learning_rate": 3.888888888888889e-05,
"loss": 4.01,
"step": 720
},
{
"epoch": 0.93,
"learning_rate": 3.8425925925925924e-05,
"loss": 3.78,
"step": 740
},
{
"epoch": 0.95,
"learning_rate": 3.7962962962962964e-05,
"loss": 3.6708,
"step": 760
},
{
"epoch": 0.97,
"learning_rate": 3.7500000000000003e-05,
"loss": 3.817,
"step": 780
},
{
"epoch": 1.0,
"learning_rate": 3.7037037037037037e-05,
"loss": 3.69,
"step": 800
},
{
"epoch": 1.0,
"eval_gen_len": 82.86,
"eval_loss": 3.467059373855591,
"eval_rouge1": 26.488,
"eval_rouge2": 7.7701,
"eval_rougeL": 15.7699,
"eval_rougeLsum": 23.5892,
"eval_runtime": 353.8993,
"eval_samples_per_second": 0.565,
"eval_steps_per_second": 0.141,
"step": 800
},
{
"epoch": 1.02,
"learning_rate": 3.6574074074074076e-05,
"loss": 3.5478,
"step": 820
},
{
"epoch": 1.05,
"learning_rate": 3.611111111111111e-05,
"loss": 3.7204,
"step": 840
},
{
"epoch": 1.07,
"learning_rate": 3.564814814814815e-05,
"loss": 3.6043,
"step": 860
},
{
"epoch": 1.1,
"learning_rate": 3.518518518518519e-05,
"loss": 3.6187,
"step": 880
},
{
"epoch": 1.12,
"learning_rate": 3.472222222222222e-05,
"loss": 3.556,
"step": 900
},
{
"epoch": 1.15,
"learning_rate": 3.425925925925926e-05,
"loss": 3.6192,
"step": 920
},
{
"epoch": 1.18,
"learning_rate": 3.3796296296296295e-05,
"loss": 3.693,
"step": 940
},
{
"epoch": 1.2,
"learning_rate": 3.3333333333333335e-05,
"loss": 3.7153,
"step": 960
},
{
"epoch": 1.23,
"learning_rate": 3.2870370370370375e-05,
"loss": 3.8134,
"step": 980
},
{
"epoch": 1.25,
"learning_rate": 3.240740740740741e-05,
"loss": 3.5188,
"step": 1000
},
{
"epoch": 1.27,
"learning_rate": 3.194444444444444e-05,
"loss": 3.4658,
"step": 1020
},
{
"epoch": 1.3,
"learning_rate": 3.148148148148148e-05,
"loss": 3.834,
"step": 1040
},
{
"epoch": 1.32,
"learning_rate": 3.101851851851852e-05,
"loss": 3.9915,
"step": 1060
},
{
"epoch": 1.35,
"learning_rate": 3.055555555555556e-05,
"loss": 3.3837,
"step": 1080
},
{
"epoch": 1.38,
"learning_rate": 3.0092592592592593e-05,
"loss": 3.4688,
"step": 1100
},
{
"epoch": 1.4,
"learning_rate": 2.962962962962963e-05,
"loss": 3.8257,
"step": 1120
},
{
"epoch": 1.43,
"learning_rate": 2.916666666666667e-05,
"loss": 3.7379,
"step": 1140
},
{
"epoch": 1.45,
"learning_rate": 2.8703703703703706e-05,
"loss": 3.9015,
"step": 1160
},
{
"epoch": 1.48,
"learning_rate": 2.824074074074074e-05,
"loss": 3.7064,
"step": 1180
},
{
"epoch": 1.5,
"learning_rate": 2.777777777777778e-05,
"loss": 3.4937,
"step": 1200
},
{
"epoch": 1.52,
"learning_rate": 2.7314814814814816e-05,
"loss": 3.6392,
"step": 1220
},
{
"epoch": 1.55,
"learning_rate": 2.6851851851851855e-05,
"loss": 3.6589,
"step": 1240
},
{
"epoch": 1.57,
"learning_rate": 2.6388888888888892e-05,
"loss": 3.6652,
"step": 1260
},
{
"epoch": 1.6,
"learning_rate": 2.5925925925925925e-05,
"loss": 3.5664,
"step": 1280
},
{
"epoch": 1.62,
"learning_rate": 2.5462962962962965e-05,
"loss": 3.7538,
"step": 1300
},
{
"epoch": 1.65,
"learning_rate": 2.5e-05,
"loss": 3.5765,
"step": 1320
},
{
"epoch": 1.68,
"learning_rate": 2.4537037037037038e-05,
"loss": 3.5302,
"step": 1340
},
{
"epoch": 1.7,
"learning_rate": 2.4074074074074074e-05,
"loss": 3.3333,
"step": 1360
},
{
"epoch": 1.73,
"learning_rate": 2.361111111111111e-05,
"loss": 3.506,
"step": 1380
},
{
"epoch": 1.75,
"learning_rate": 2.314814814814815e-05,
"loss": 3.5008,
"step": 1400
},
{
"epoch": 1.77,
"learning_rate": 2.2685185185185187e-05,
"loss": 3.8955,
"step": 1420
},
{
"epoch": 1.8,
"learning_rate": 2.2222222222222223e-05,
"loss": 3.3469,
"step": 1440
},
{
"epoch": 1.82,
"learning_rate": 2.175925925925926e-05,
"loss": 3.5623,
"step": 1460
},
{
"epoch": 1.85,
"learning_rate": 2.1296296296296296e-05,
"loss": 3.8341,
"step": 1480
},
{
"epoch": 1.88,
"learning_rate": 2.0833333333333336e-05,
"loss": 3.5073,
"step": 1500
},
{
"epoch": 1.9,
"learning_rate": 2.037037037037037e-05,
"loss": 3.6144,
"step": 1520
},
{
"epoch": 1.93,
"learning_rate": 1.990740740740741e-05,
"loss": 3.7585,
"step": 1540
},
{
"epoch": 1.95,
"learning_rate": 1.9444444444444445e-05,
"loss": 3.541,
"step": 1560
},
{
"epoch": 1.98,
"learning_rate": 1.8981481481481482e-05,
"loss": 3.7732,
"step": 1580
},
{
"epoch": 2.0,
"learning_rate": 1.8518518518518518e-05,
"loss": 3.4184,
"step": 1600
},
{
"epoch": 2.0,
"eval_gen_len": 82.965,
"eval_loss": 3.26387882232666,
"eval_rouge1": 27.1928,
"eval_rouge2": 8.9006,
"eval_rougeL": 16.9355,
"eval_rougeLsum": 24.6536,
"eval_runtime": 353.2482,
"eval_samples_per_second": 0.566,
"eval_steps_per_second": 0.142,
"step": 1600
},
{
"epoch": 2.02,
"learning_rate": 1.8055555555555555e-05,
"loss": 3.4078,
"step": 1620
},
{
"epoch": 2.05,
"learning_rate": 1.7592592592592595e-05,
"loss": 3.5058,
"step": 1640
},
{
"epoch": 2.08,
"learning_rate": 1.712962962962963e-05,
"loss": 3.4788,
"step": 1660
},
{
"epoch": 2.1,
"learning_rate": 1.6666666666666667e-05,
"loss": 3.3683,
"step": 1680
},
{
"epoch": 2.12,
"learning_rate": 1.6203703703703704e-05,
"loss": 3.6269,
"step": 1700
},
{
"epoch": 2.15,
"learning_rate": 1.574074074074074e-05,
"loss": 3.4723,
"step": 1720
},
{
"epoch": 2.17,
"learning_rate": 1.527777777777778e-05,
"loss": 3.5449,
"step": 1740
},
{
"epoch": 2.2,
"learning_rate": 1.4814814814814815e-05,
"loss": 3.5735,
"step": 1760
},
{
"epoch": 2.23,
"learning_rate": 1.4351851851851853e-05,
"loss": 3.6537,
"step": 1780
},
{
"epoch": 2.25,
"learning_rate": 1.388888888888889e-05,
"loss": 3.6652,
"step": 1800
},
{
"epoch": 2.27,
"learning_rate": 1.3425925925925928e-05,
"loss": 3.6673,
"step": 1820
},
{
"epoch": 2.3,
"learning_rate": 1.2962962962962962e-05,
"loss": 3.4442,
"step": 1840
},
{
"epoch": 2.33,
"learning_rate": 1.25e-05,
"loss": 3.2242,
"step": 1860
},
{
"epoch": 2.35,
"learning_rate": 1.2037037037037037e-05,
"loss": 3.4312,
"step": 1880
},
{
"epoch": 2.38,
"learning_rate": 1.1574074074074075e-05,
"loss": 3.2635,
"step": 1900
},
{
"epoch": 2.4,
"learning_rate": 1.1111111111111112e-05,
"loss": 3.2952,
"step": 1920
},
{
"epoch": 2.42,
"learning_rate": 1.0648148148148148e-05,
"loss": 3.3192,
"step": 1940
},
{
"epoch": 2.45,
"learning_rate": 1.0185185185185185e-05,
"loss": 3.6236,
"step": 1960
},
{
"epoch": 2.48,
"learning_rate": 9.722222222222223e-06,
"loss": 3.4545,
"step": 1980
},
{
"epoch": 2.5,
"learning_rate": 9.259259259259259e-06,
"loss": 3.2789,
"step": 2000
},
{
"epoch": 2.52,
"learning_rate": 8.796296296296297e-06,
"loss": 3.647,
"step": 2020
},
{
"epoch": 2.55,
"learning_rate": 8.333333333333334e-06,
"loss": 3.4436,
"step": 2040
},
{
"epoch": 2.58,
"learning_rate": 7.87037037037037e-06,
"loss": 3.4333,
"step": 2060
},
{
"epoch": 2.6,
"learning_rate": 7.4074074074074075e-06,
"loss": 3.5945,
"step": 2080
},
{
"epoch": 2.62,
"learning_rate": 6.944444444444445e-06,
"loss": 3.4041,
"step": 2100
},
{
"epoch": 2.65,
"learning_rate": 6.481481481481481e-06,
"loss": 3.2998,
"step": 2120
},
{
"epoch": 2.67,
"learning_rate": 6.0185185185185185e-06,
"loss": 3.2847,
"step": 2140
},
{
"epoch": 2.7,
"learning_rate": 5.555555555555556e-06,
"loss": 3.3913,
"step": 2160
},
{
"epoch": 2.73,
"learning_rate": 5.092592592592592e-06,
"loss": 3.2782,
"step": 2180
},
{
"epoch": 2.75,
"learning_rate": 4.6296296296296296e-06,
"loss": 3.5704,
"step": 2200
},
{
"epoch": 2.77,
"learning_rate": 4.166666666666667e-06,
"loss": 3.2631,
"step": 2220
},
{
"epoch": 2.8,
"learning_rate": 3.7037037037037037e-06,
"loss": 3.4875,
"step": 2240
},
{
"epoch": 2.83,
"learning_rate": 3.2407407407407406e-06,
"loss": 3.6455,
"step": 2260
},
{
"epoch": 2.85,
"learning_rate": 2.777777777777778e-06,
"loss": 3.2978,
"step": 2280
},
{
"epoch": 2.88,
"learning_rate": 2.3148148148148148e-06,
"loss": 3.7656,
"step": 2300
},
{
"epoch": 2.9,
"learning_rate": 1.8518518518518519e-06,
"loss": 3.1675,
"step": 2320
},
{
"epoch": 2.92,
"learning_rate": 1.388888888888889e-06,
"loss": 3.4532,
"step": 2340
},
{
"epoch": 2.95,
"learning_rate": 9.259259259259259e-07,
"loss": 3.4008,
"step": 2360
},
{
"epoch": 2.98,
"learning_rate": 4.6296296296296297e-07,
"loss": 3.6636,
"step": 2380
},
{
"epoch": 3.0,
"learning_rate": 0.0,
"loss": 3.2196,
"step": 2400
},
{
"epoch": 3.0,
"eval_gen_len": 82.945,
"eval_loss": 3.213425874710083,
"eval_rouge1": 27.3285,
"eval_rouge2": 8.8456,
"eval_rougeL": 16.8011,
"eval_rougeLsum": 24.6941,
"eval_runtime": 353.2997,
"eval_samples_per_second": 0.566,
"eval_steps_per_second": 0.142,
"step": 2400
}
],
"logging_steps": 20,
"max_steps": 2400,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 1.15108428644352e+16,
"trial_name": null,
"trial_params": null
}