mt5_base-qg-ap-oficial / trainer_state.json
tiagoblima's picture
End of training
8cc4659 verified
raw
history blame
10.1 kB
{
"best_metric": 1.095082402229309,
"best_model_checkpoint": "/temp/mt5_base-qg-ap-oficial/checkpoint-6930",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 6930,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 9.855699855699856e-05,
"loss": 9.102,
"step": 100
},
{
"epoch": 0.14,
"learning_rate": 9.711399711399713e-05,
"loss": 2.6848,
"step": 200
},
{
"epoch": 0.22,
"learning_rate": 9.567099567099568e-05,
"loss": 2.1899,
"step": 300
},
{
"epoch": 0.29,
"learning_rate": 9.422799422799424e-05,
"loss": 2.0546,
"step": 400
},
{
"epoch": 0.36,
"learning_rate": 9.278499278499279e-05,
"loss": 1.9973,
"step": 500
},
{
"epoch": 0.43,
"learning_rate": 9.134199134199136e-05,
"loss": 1.953,
"step": 600
},
{
"epoch": 0.51,
"learning_rate": 8.98989898989899e-05,
"loss": 1.9029,
"step": 700
},
{
"epoch": 0.58,
"learning_rate": 8.845598845598845e-05,
"loss": 1.8914,
"step": 800
},
{
"epoch": 0.65,
"learning_rate": 8.701298701298701e-05,
"loss": 1.8197,
"step": 900
},
{
"epoch": 0.72,
"learning_rate": 8.556998556998557e-05,
"loss": 1.7947,
"step": 1000
},
{
"epoch": 0.79,
"learning_rate": 8.412698412698413e-05,
"loss": 1.8097,
"step": 1100
},
{
"epoch": 0.87,
"learning_rate": 8.268398268398268e-05,
"loss": 1.7513,
"step": 1200
},
{
"epoch": 0.94,
"learning_rate": 8.124098124098124e-05,
"loss": 1.7276,
"step": 1300
},
{
"epoch": 1.0,
"eval_loss": 1.348934531211853,
"eval_runtime": 231.7834,
"eval_samples_per_second": 194.608,
"eval_steps_per_second": 3.042,
"step": 1386
},
{
"epoch": 1.01,
"learning_rate": 7.97979797979798e-05,
"loss": 1.7093,
"step": 1400
},
{
"epoch": 1.08,
"learning_rate": 7.835497835497836e-05,
"loss": 1.6345,
"step": 1500
},
{
"epoch": 1.15,
"learning_rate": 7.691197691197691e-05,
"loss": 1.6314,
"step": 1600
},
{
"epoch": 1.23,
"learning_rate": 7.546897546897548e-05,
"loss": 1.6207,
"step": 1700
},
{
"epoch": 1.3,
"learning_rate": 7.402597402597404e-05,
"loss": 1.589,
"step": 1800
},
{
"epoch": 1.37,
"learning_rate": 7.258297258297259e-05,
"loss": 1.612,
"step": 1900
},
{
"epoch": 1.44,
"learning_rate": 7.113997113997114e-05,
"loss": 1.5812,
"step": 2000
},
{
"epoch": 1.52,
"learning_rate": 6.96969696969697e-05,
"loss": 1.6024,
"step": 2100
},
{
"epoch": 1.59,
"learning_rate": 6.825396825396825e-05,
"loss": 1.5719,
"step": 2200
},
{
"epoch": 1.66,
"learning_rate": 6.681096681096681e-05,
"loss": 1.5706,
"step": 2300
},
{
"epoch": 1.73,
"learning_rate": 6.536796536796536e-05,
"loss": 1.5704,
"step": 2400
},
{
"epoch": 1.8,
"learning_rate": 6.392496392496393e-05,
"loss": 1.5733,
"step": 2500
},
{
"epoch": 1.88,
"learning_rate": 6.248196248196248e-05,
"loss": 1.5852,
"step": 2600
},
{
"epoch": 1.95,
"learning_rate": 6.103896103896104e-05,
"loss": 1.5698,
"step": 2700
},
{
"epoch": 2.0,
"eval_loss": 1.2226001024246216,
"eval_runtime": 231.7119,
"eval_samples_per_second": 194.668,
"eval_steps_per_second": 3.043,
"step": 2772
},
{
"epoch": 2.02,
"learning_rate": 5.959595959595959e-05,
"loss": 1.5239,
"step": 2800
},
{
"epoch": 2.09,
"learning_rate": 5.815295815295816e-05,
"loss": 1.4899,
"step": 2900
},
{
"epoch": 2.16,
"learning_rate": 5.6709956709956715e-05,
"loss": 1.461,
"step": 3000
},
{
"epoch": 2.24,
"learning_rate": 5.526695526695527e-05,
"loss": 1.4747,
"step": 3100
},
{
"epoch": 2.31,
"learning_rate": 5.382395382395382e-05,
"loss": 1.481,
"step": 3200
},
{
"epoch": 2.38,
"learning_rate": 5.2380952380952384e-05,
"loss": 1.4496,
"step": 3300
},
{
"epoch": 2.45,
"learning_rate": 5.093795093795094e-05,
"loss": 1.4665,
"step": 3400
},
{
"epoch": 2.53,
"learning_rate": 4.94949494949495e-05,
"loss": 1.4397,
"step": 3500
},
{
"epoch": 2.6,
"learning_rate": 4.8051948051948054e-05,
"loss": 1.4711,
"step": 3600
},
{
"epoch": 2.67,
"learning_rate": 4.6608946608946615e-05,
"loss": 1.4528,
"step": 3700
},
{
"epoch": 2.74,
"learning_rate": 4.516594516594517e-05,
"loss": 1.4661,
"step": 3800
},
{
"epoch": 2.81,
"learning_rate": 4.3722943722943724e-05,
"loss": 1.4583,
"step": 3900
},
{
"epoch": 2.89,
"learning_rate": 4.227994227994228e-05,
"loss": 1.4645,
"step": 4000
},
{
"epoch": 2.96,
"learning_rate": 4.083694083694084e-05,
"loss": 1.4547,
"step": 4100
},
{
"epoch": 3.0,
"eval_loss": 1.1470191478729248,
"eval_runtime": 231.6851,
"eval_samples_per_second": 194.691,
"eval_steps_per_second": 3.043,
"step": 4158
},
{
"epoch": 3.03,
"learning_rate": 3.939393939393939e-05,
"loss": 1.4251,
"step": 4200
},
{
"epoch": 3.1,
"learning_rate": 3.7950937950937954e-05,
"loss": 1.3732,
"step": 4300
},
{
"epoch": 3.17,
"learning_rate": 3.650793650793651e-05,
"loss": 1.3843,
"step": 4400
},
{
"epoch": 3.25,
"learning_rate": 3.506493506493507e-05,
"loss": 1.3884,
"step": 4500
},
{
"epoch": 3.32,
"learning_rate": 3.3621933621933624e-05,
"loss": 1.3902,
"step": 4600
},
{
"epoch": 3.39,
"learning_rate": 3.217893217893218e-05,
"loss": 1.4071,
"step": 4700
},
{
"epoch": 3.46,
"learning_rate": 3.073593073593073e-05,
"loss": 1.3857,
"step": 4800
},
{
"epoch": 3.54,
"learning_rate": 2.9292929292929294e-05,
"loss": 1.3623,
"step": 4900
},
{
"epoch": 3.61,
"learning_rate": 2.7849927849927855e-05,
"loss": 1.3954,
"step": 5000
},
{
"epoch": 3.68,
"learning_rate": 2.640692640692641e-05,
"loss": 1.39,
"step": 5100
},
{
"epoch": 3.75,
"learning_rate": 2.4963924963924963e-05,
"loss": 1.3866,
"step": 5200
},
{
"epoch": 3.82,
"learning_rate": 2.352092352092352e-05,
"loss": 1.382,
"step": 5300
},
{
"epoch": 3.9,
"learning_rate": 2.207792207792208e-05,
"loss": 1.3959,
"step": 5400
},
{
"epoch": 3.97,
"learning_rate": 2.0634920634920636e-05,
"loss": 1.3969,
"step": 5500
},
{
"epoch": 4.0,
"eval_loss": 1.1056883335113525,
"eval_runtime": 231.7985,
"eval_samples_per_second": 194.596,
"eval_steps_per_second": 3.041,
"step": 5544
},
{
"epoch": 4.04,
"learning_rate": 1.919191919191919e-05,
"loss": 1.3549,
"step": 5600
},
{
"epoch": 4.11,
"learning_rate": 1.7748917748917752e-05,
"loss": 1.3454,
"step": 5700
},
{
"epoch": 4.18,
"learning_rate": 1.630591630591631e-05,
"loss": 1.3436,
"step": 5800
},
{
"epoch": 4.26,
"learning_rate": 1.4862914862914865e-05,
"loss": 1.3465,
"step": 5900
},
{
"epoch": 4.33,
"learning_rate": 1.3419913419913421e-05,
"loss": 1.3498,
"step": 6000
},
{
"epoch": 4.4,
"learning_rate": 1.1976911976911977e-05,
"loss": 1.3535,
"step": 6100
},
{
"epoch": 4.47,
"learning_rate": 1.0533910533910535e-05,
"loss": 1.3471,
"step": 6200
},
{
"epoch": 4.55,
"learning_rate": 9.090909090909091e-06,
"loss": 1.3427,
"step": 6300
},
{
"epoch": 4.62,
"learning_rate": 7.647907647907649e-06,
"loss": 1.3496,
"step": 6400
},
{
"epoch": 4.69,
"learning_rate": 6.204906204906205e-06,
"loss": 1.3428,
"step": 6500
},
{
"epoch": 4.76,
"learning_rate": 4.7619047619047615e-06,
"loss": 1.3362,
"step": 6600
},
{
"epoch": 4.83,
"learning_rate": 3.318903318903319e-06,
"loss": 1.3424,
"step": 6700
},
{
"epoch": 4.91,
"learning_rate": 1.875901875901876e-06,
"loss": 1.3475,
"step": 6800
},
{
"epoch": 4.98,
"learning_rate": 4.329004329004329e-07,
"loss": 1.3748,
"step": 6900
},
{
"epoch": 5.0,
"eval_loss": 1.095082402229309,
"eval_runtime": 232.1115,
"eval_samples_per_second": 194.333,
"eval_steps_per_second": 3.037,
"step": 6930
},
{
"epoch": 5.0,
"step": 6930,
"total_flos": 2.6579255601659904e+17,
"train_loss": 1.650609011189074,
"train_runtime": 5663.0471,
"train_samples_per_second": 39.143,
"train_steps_per_second": 1.224
}
],
"logging_steps": 100,
"max_steps": 6930,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 2.6579255601659904e+17,
"trial_name": null,
"trial_params": null
}