mt5_base-qg-af-oficial / trainer_state.json
tiagoblima's picture
End of training
7c92b20 verified
{
"best_metric": 1.5155326128005981,
"best_model_checkpoint": "/temp/mt5_base-qg-af-oficial/checkpoint-5544",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 6930,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 9.855699855699856e-05,
"loss": 12.1403,
"step": 100
},
{
"epoch": 0.14,
"learning_rate": 9.711399711399713e-05,
"loss": 3.3838,
"step": 200
},
{
"epoch": 0.22,
"learning_rate": 9.567099567099568e-05,
"loss": 2.2366,
"step": 300
},
{
"epoch": 0.29,
"learning_rate": 9.422799422799424e-05,
"loss": 2.1106,
"step": 400
},
{
"epoch": 0.36,
"learning_rate": 9.278499278499279e-05,
"loss": 2.0552,
"step": 500
},
{
"epoch": 0.43,
"learning_rate": 9.134199134199136e-05,
"loss": 2.0231,
"step": 600
},
{
"epoch": 0.51,
"learning_rate": 8.98989898989899e-05,
"loss": 1.974,
"step": 700
},
{
"epoch": 0.58,
"learning_rate": 8.845598845598845e-05,
"loss": 1.9583,
"step": 800
},
{
"epoch": 0.65,
"learning_rate": 8.701298701298701e-05,
"loss": 1.8944,
"step": 900
},
{
"epoch": 0.72,
"learning_rate": 8.556998556998557e-05,
"loss": 1.8719,
"step": 1000
},
{
"epoch": 0.79,
"learning_rate": 8.412698412698413e-05,
"loss": 1.8895,
"step": 1100
},
{
"epoch": 0.87,
"learning_rate": 8.268398268398268e-05,
"loss": 1.8459,
"step": 1200
},
{
"epoch": 0.94,
"learning_rate": 8.124098124098124e-05,
"loss": 1.8203,
"step": 1300
},
{
"epoch": 1.0,
"eval_loss": 1.6114802360534668,
"eval_runtime": 33.1219,
"eval_samples_per_second": 196.577,
"eval_steps_per_second": 3.08,
"step": 1386
},
{
"epoch": 1.01,
"learning_rate": 7.97979797979798e-05,
"loss": 1.8007,
"step": 1400
},
{
"epoch": 1.08,
"learning_rate": 7.835497835497836e-05,
"loss": 1.7247,
"step": 1500
},
{
"epoch": 1.15,
"learning_rate": 7.691197691197691e-05,
"loss": 1.7288,
"step": 1600
},
{
"epoch": 1.23,
"learning_rate": 7.546897546897548e-05,
"loss": 1.7174,
"step": 1700
},
{
"epoch": 1.3,
"learning_rate": 7.402597402597404e-05,
"loss": 1.6836,
"step": 1800
},
{
"epoch": 1.37,
"learning_rate": 7.258297258297259e-05,
"loss": 1.7096,
"step": 1900
},
{
"epoch": 1.44,
"learning_rate": 7.113997113997114e-05,
"loss": 1.681,
"step": 2000
},
{
"epoch": 1.52,
"learning_rate": 6.96969696969697e-05,
"loss": 1.6965,
"step": 2100
},
{
"epoch": 1.59,
"learning_rate": 6.825396825396825e-05,
"loss": 1.6696,
"step": 2200
},
{
"epoch": 1.66,
"learning_rate": 6.681096681096681e-05,
"loss": 1.6779,
"step": 2300
},
{
"epoch": 1.73,
"learning_rate": 6.536796536796536e-05,
"loss": 1.6736,
"step": 2400
},
{
"epoch": 1.8,
"learning_rate": 6.392496392496393e-05,
"loss": 1.6729,
"step": 2500
},
{
"epoch": 1.88,
"learning_rate": 6.248196248196248e-05,
"loss": 1.6795,
"step": 2600
},
{
"epoch": 1.95,
"learning_rate": 6.103896103896104e-05,
"loss": 1.6624,
"step": 2700
},
{
"epoch": 2.0,
"eval_loss": 1.5542315244674683,
"eval_runtime": 33.2024,
"eval_samples_per_second": 196.101,
"eval_steps_per_second": 3.072,
"step": 2772
},
{
"epoch": 2.02,
"learning_rate": 5.959595959595959e-05,
"loss": 1.6222,
"step": 2800
},
{
"epoch": 2.09,
"learning_rate": 5.815295815295816e-05,
"loss": 1.5908,
"step": 2900
},
{
"epoch": 2.16,
"learning_rate": 5.6709956709956715e-05,
"loss": 1.5692,
"step": 3000
},
{
"epoch": 2.24,
"learning_rate": 5.526695526695527e-05,
"loss": 1.5753,
"step": 3100
},
{
"epoch": 2.31,
"learning_rate": 5.382395382395382e-05,
"loss": 1.5802,
"step": 3200
},
{
"epoch": 2.38,
"learning_rate": 5.2380952380952384e-05,
"loss": 1.5546,
"step": 3300
},
{
"epoch": 2.45,
"learning_rate": 5.093795093795094e-05,
"loss": 1.5665,
"step": 3400
},
{
"epoch": 2.53,
"learning_rate": 4.94949494949495e-05,
"loss": 1.5381,
"step": 3500
},
{
"epoch": 2.6,
"learning_rate": 4.8051948051948054e-05,
"loss": 1.5634,
"step": 3600
},
{
"epoch": 2.67,
"learning_rate": 4.6608946608946615e-05,
"loss": 1.5554,
"step": 3700
},
{
"epoch": 2.74,
"learning_rate": 4.516594516594517e-05,
"loss": 1.5645,
"step": 3800
},
{
"epoch": 2.81,
"learning_rate": 4.3722943722943724e-05,
"loss": 1.5652,
"step": 3900
},
{
"epoch": 2.89,
"learning_rate": 4.227994227994228e-05,
"loss": 1.5615,
"step": 4000
},
{
"epoch": 2.96,
"learning_rate": 4.083694083694084e-05,
"loss": 1.5489,
"step": 4100
},
{
"epoch": 3.0,
"eval_loss": 1.5325491428375244,
"eval_runtime": 33.1544,
"eval_samples_per_second": 196.384,
"eval_steps_per_second": 3.077,
"step": 4158
},
{
"epoch": 3.03,
"learning_rate": 3.939393939393939e-05,
"loss": 1.5307,
"step": 4200
},
{
"epoch": 3.1,
"learning_rate": 3.7950937950937954e-05,
"loss": 1.4781,
"step": 4300
},
{
"epoch": 3.17,
"learning_rate": 3.650793650793651e-05,
"loss": 1.4875,
"step": 4400
},
{
"epoch": 3.25,
"learning_rate": 3.506493506493507e-05,
"loss": 1.4927,
"step": 4500
},
{
"epoch": 3.32,
"learning_rate": 3.3621933621933624e-05,
"loss": 1.4904,
"step": 4600
},
{
"epoch": 3.39,
"learning_rate": 3.217893217893218e-05,
"loss": 1.5048,
"step": 4700
},
{
"epoch": 3.46,
"learning_rate": 3.073593073593073e-05,
"loss": 1.4868,
"step": 4800
},
{
"epoch": 3.54,
"learning_rate": 2.9292929292929294e-05,
"loss": 1.4631,
"step": 4900
},
{
"epoch": 3.61,
"learning_rate": 2.7849927849927855e-05,
"loss": 1.4957,
"step": 5000
},
{
"epoch": 3.68,
"learning_rate": 2.640692640692641e-05,
"loss": 1.488,
"step": 5100
},
{
"epoch": 3.75,
"learning_rate": 2.4963924963924963e-05,
"loss": 1.4898,
"step": 5200
},
{
"epoch": 3.82,
"learning_rate": 2.352092352092352e-05,
"loss": 1.4818,
"step": 5300
},
{
"epoch": 3.9,
"learning_rate": 2.207792207792208e-05,
"loss": 1.4967,
"step": 5400
},
{
"epoch": 3.97,
"learning_rate": 2.0634920634920636e-05,
"loss": 1.4926,
"step": 5500
},
{
"epoch": 4.0,
"eval_loss": 1.5155326128005981,
"eval_runtime": 33.2094,
"eval_samples_per_second": 196.059,
"eval_steps_per_second": 3.071,
"step": 5544
},
{
"epoch": 4.04,
"learning_rate": 1.919191919191919e-05,
"loss": 1.4528,
"step": 5600
},
{
"epoch": 4.11,
"learning_rate": 1.7748917748917752e-05,
"loss": 1.4396,
"step": 5700
},
{
"epoch": 4.18,
"learning_rate": 1.630591630591631e-05,
"loss": 1.4457,
"step": 5800
},
{
"epoch": 4.26,
"learning_rate": 1.4862914862914865e-05,
"loss": 1.4471,
"step": 5900
},
{
"epoch": 4.33,
"learning_rate": 1.3419913419913421e-05,
"loss": 1.4543,
"step": 6000
},
{
"epoch": 4.4,
"learning_rate": 1.1976911976911977e-05,
"loss": 1.4612,
"step": 6100
},
{
"epoch": 4.47,
"learning_rate": 1.0533910533910535e-05,
"loss": 1.4536,
"step": 6200
},
{
"epoch": 4.55,
"learning_rate": 9.090909090909091e-06,
"loss": 1.4451,
"step": 6300
},
{
"epoch": 4.62,
"learning_rate": 7.647907647907649e-06,
"loss": 1.4491,
"step": 6400
},
{
"epoch": 4.69,
"learning_rate": 6.204906204906205e-06,
"loss": 1.4394,
"step": 6500
},
{
"epoch": 4.76,
"learning_rate": 4.7619047619047615e-06,
"loss": 1.4367,
"step": 6600
},
{
"epoch": 4.83,
"learning_rate": 3.318903318903319e-06,
"loss": 1.445,
"step": 6700
},
{
"epoch": 4.91,
"learning_rate": 1.875901875901876e-06,
"loss": 1.4478,
"step": 6800
},
{
"epoch": 4.98,
"learning_rate": 4.329004329004329e-07,
"loss": 1.4687,
"step": 6900
},
{
"epoch": 5.0,
"eval_loss": 1.5195423364639282,
"eval_runtime": 33.3056,
"eval_samples_per_second": 195.493,
"eval_steps_per_second": 3.063,
"step": 6930
},
{
"epoch": 5.0,
"step": 6930,
"total_flos": 2.6579255601659904e+17,
"train_loss": 1.7969476851205977,
"train_runtime": 4653.9626,
"train_samples_per_second": 47.63,
"train_steps_per_second": 1.489
}
],
"logging_steps": 100,
"max_steps": 6930,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 2.6579255601659904e+17,
"trial_name": null,
"trial_params": null
}