t5_small-qg-ctx-a / trainer_state.json
tiagoblima's picture
End of training
98f3990
raw
history blame
No virus
13.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9999616380550493,
"eval_steps": 500,
"global_step": 52134,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 9.904093298039668e-05,
"loss": 2.2133,
"step": 500
},
{
"epoch": 0.04,
"learning_rate": 9.808186596079335e-05,
"loss": 1.7686,
"step": 1000
},
{
"epoch": 0.06,
"learning_rate": 9.712279894119002e-05,
"loss": 1.6543,
"step": 1500
},
{
"epoch": 0.08,
"learning_rate": 9.616373192158669e-05,
"loss": 1.5913,
"step": 2000
},
{
"epoch": 0.1,
"learning_rate": 9.520466490198336e-05,
"loss": 1.5555,
"step": 2500
},
{
"epoch": 0.12,
"learning_rate": 9.424559788238001e-05,
"loss": 1.5263,
"step": 3000
},
{
"epoch": 0.13,
"learning_rate": 9.32865308627767e-05,
"loss": 1.541,
"step": 3500
},
{
"epoch": 0.15,
"learning_rate": 9.232746384317337e-05,
"loss": 1.502,
"step": 4000
},
{
"epoch": 0.17,
"learning_rate": 9.136839682357004e-05,
"loss": 1.5094,
"step": 4500
},
{
"epoch": 0.19,
"learning_rate": 9.04093298039667e-05,
"loss": 1.4669,
"step": 5000
},
{
"epoch": 0.21,
"learning_rate": 8.945026278436338e-05,
"loss": 1.4625,
"step": 5500
},
{
"epoch": 0.23,
"learning_rate": 8.849119576476005e-05,
"loss": 1.439,
"step": 6000
},
{
"epoch": 0.25,
"learning_rate": 8.753212874515672e-05,
"loss": 1.4443,
"step": 6500
},
{
"epoch": 0.27,
"learning_rate": 8.657306172555338e-05,
"loss": 1.4184,
"step": 7000
},
{
"epoch": 0.29,
"learning_rate": 8.561399470595005e-05,
"loss": 1.4656,
"step": 7500
},
{
"epoch": 0.31,
"learning_rate": 8.465492768634673e-05,
"loss": 1.4078,
"step": 8000
},
{
"epoch": 0.33,
"learning_rate": 8.36958606667434e-05,
"loss": 1.4266,
"step": 8500
},
{
"epoch": 0.35,
"learning_rate": 8.273679364714006e-05,
"loss": 1.4053,
"step": 9000
},
{
"epoch": 0.36,
"learning_rate": 8.177772662753673e-05,
"loss": 1.4144,
"step": 9500
},
{
"epoch": 0.38,
"learning_rate": 8.081865960793342e-05,
"loss": 1.3931,
"step": 10000
},
{
"epoch": 0.4,
"learning_rate": 7.985959258833007e-05,
"loss": 1.3731,
"step": 10500
},
{
"epoch": 0.42,
"learning_rate": 7.890052556872674e-05,
"loss": 1.3473,
"step": 11000
},
{
"epoch": 0.44,
"learning_rate": 7.794145854912341e-05,
"loss": 1.3953,
"step": 11500
},
{
"epoch": 0.46,
"learning_rate": 7.698239152952008e-05,
"loss": 1.4022,
"step": 12000
},
{
"epoch": 0.48,
"learning_rate": 7.602332450991675e-05,
"loss": 1.3876,
"step": 12500
},
{
"epoch": 0.5,
"learning_rate": 7.506425749031343e-05,
"loss": 1.355,
"step": 13000
},
{
"epoch": 0.52,
"learning_rate": 7.41051904707101e-05,
"loss": 1.3965,
"step": 13500
},
{
"epoch": 0.54,
"learning_rate": 7.314612345110677e-05,
"loss": 1.3699,
"step": 14000
},
{
"epoch": 0.56,
"learning_rate": 7.218705643150344e-05,
"loss": 1.3564,
"step": 14500
},
{
"epoch": 0.58,
"learning_rate": 7.122798941190011e-05,
"loss": 1.3441,
"step": 15000
},
{
"epoch": 0.59,
"learning_rate": 7.026892239229678e-05,
"loss": 1.3664,
"step": 15500
},
{
"epoch": 0.61,
"learning_rate": 6.930985537269345e-05,
"loss": 1.3171,
"step": 16000
},
{
"epoch": 0.63,
"learning_rate": 6.835078835309012e-05,
"loss": 1.3574,
"step": 16500
},
{
"epoch": 0.65,
"learning_rate": 6.739172133348679e-05,
"loss": 1.3537,
"step": 17000
},
{
"epoch": 0.67,
"learning_rate": 6.643265431388346e-05,
"loss": 1.3226,
"step": 17500
},
{
"epoch": 0.69,
"learning_rate": 6.547358729428013e-05,
"loss": 1.3274,
"step": 18000
},
{
"epoch": 0.71,
"learning_rate": 6.451452027467679e-05,
"loss": 1.3106,
"step": 18500
},
{
"epoch": 0.73,
"learning_rate": 6.355545325507347e-05,
"loss": 1.3084,
"step": 19000
},
{
"epoch": 0.75,
"learning_rate": 6.259638623547014e-05,
"loss": 1.3071,
"step": 19500
},
{
"epoch": 0.77,
"learning_rate": 6.163731921586681e-05,
"loss": 1.3135,
"step": 20000
},
{
"epoch": 0.79,
"learning_rate": 6.067825219626348e-05,
"loss": 1.2827,
"step": 20500
},
{
"epoch": 0.81,
"learning_rate": 5.9719185176660154e-05,
"loss": 1.305,
"step": 21000
},
{
"epoch": 0.82,
"learning_rate": 5.876011815705682e-05,
"loss": 1.3273,
"step": 21500
},
{
"epoch": 0.84,
"learning_rate": 5.780105113745349e-05,
"loss": 1.2855,
"step": 22000
},
{
"epoch": 0.86,
"learning_rate": 5.684198411785015e-05,
"loss": 1.313,
"step": 22500
},
{
"epoch": 0.88,
"learning_rate": 5.588291709824682e-05,
"loss": 1.3025,
"step": 23000
},
{
"epoch": 0.9,
"learning_rate": 5.49238500786435e-05,
"loss": 1.3051,
"step": 23500
},
{
"epoch": 0.92,
"learning_rate": 5.396478305904017e-05,
"loss": 1.354,
"step": 24000
},
{
"epoch": 0.94,
"learning_rate": 5.3005716039436834e-05,
"loss": 1.294,
"step": 24500
},
{
"epoch": 0.96,
"learning_rate": 5.2046649019833505e-05,
"loss": 1.2994,
"step": 25000
},
{
"epoch": 0.98,
"learning_rate": 5.108758200023018e-05,
"loss": 1.262,
"step": 25500
},
{
"epoch": 1.0,
"learning_rate": 5.012851498062685e-05,
"loss": 1.2515,
"step": 26000
},
{
"epoch": 1.02,
"learning_rate": 4.9169447961023517e-05,
"loss": 1.2339,
"step": 26500
},
{
"epoch": 1.04,
"learning_rate": 4.8210380941420194e-05,
"loss": 1.2182,
"step": 27000
},
{
"epoch": 1.05,
"learning_rate": 4.725131392181686e-05,
"loss": 1.2314,
"step": 27500
},
{
"epoch": 1.07,
"learning_rate": 4.629224690221353e-05,
"loss": 1.245,
"step": 28000
},
{
"epoch": 1.09,
"learning_rate": 4.53331798826102e-05,
"loss": 1.2328,
"step": 28500
},
{
"epoch": 1.11,
"learning_rate": 4.437411286300687e-05,
"loss": 1.2263,
"step": 29000
},
{
"epoch": 1.13,
"learning_rate": 4.341504584340354e-05,
"loss": 1.2093,
"step": 29500
},
{
"epoch": 1.15,
"learning_rate": 4.245597882380021e-05,
"loss": 1.2192,
"step": 30000
},
{
"epoch": 1.17,
"learning_rate": 4.149691180419688e-05,
"loss": 1.236,
"step": 30500
},
{
"epoch": 1.19,
"learning_rate": 4.053784478459355e-05,
"loss": 1.2455,
"step": 31000
},
{
"epoch": 1.21,
"learning_rate": 3.957877776499022e-05,
"loss": 1.2021,
"step": 31500
},
{
"epoch": 1.23,
"learning_rate": 3.8619710745386886e-05,
"loss": 1.2401,
"step": 32000
},
{
"epoch": 1.25,
"learning_rate": 3.766064372578356e-05,
"loss": 1.2256,
"step": 32500
},
{
"epoch": 1.27,
"learning_rate": 3.670157670618023e-05,
"loss": 1.2342,
"step": 33000
},
{
"epoch": 1.29,
"learning_rate": 3.57425096865769e-05,
"loss": 1.2115,
"step": 33500
},
{
"epoch": 1.3,
"learning_rate": 3.478344266697357e-05,
"loss": 1.2227,
"step": 34000
},
{
"epoch": 1.32,
"learning_rate": 3.382437564737024e-05,
"loss": 1.2467,
"step": 34500
},
{
"epoch": 1.34,
"learning_rate": 3.286530862776691e-05,
"loss": 1.206,
"step": 35000
},
{
"epoch": 1.36,
"learning_rate": 3.190624160816358e-05,
"loss": 1.1981,
"step": 35500
},
{
"epoch": 1.38,
"learning_rate": 3.094717458856025e-05,
"loss": 1.224,
"step": 36000
},
{
"epoch": 1.4,
"learning_rate": 2.998810756895692e-05,
"loss": 1.2236,
"step": 36500
},
{
"epoch": 1.42,
"learning_rate": 2.902904054935359e-05,
"loss": 1.2348,
"step": 37000
},
{
"epoch": 1.44,
"learning_rate": 2.806997352975026e-05,
"loss": 1.2012,
"step": 37500
},
{
"epoch": 1.46,
"learning_rate": 2.7110906510146932e-05,
"loss": 1.2015,
"step": 38000
},
{
"epoch": 1.48,
"learning_rate": 2.61518394905436e-05,
"loss": 1.2002,
"step": 38500
},
{
"epoch": 1.5,
"learning_rate": 2.5192772470940267e-05,
"loss": 1.2298,
"step": 39000
},
{
"epoch": 1.52,
"learning_rate": 2.423370545133694e-05,
"loss": 1.2269,
"step": 39500
},
{
"epoch": 1.53,
"learning_rate": 2.327463843173361e-05,
"loss": 1.2181,
"step": 40000
},
{
"epoch": 1.55,
"learning_rate": 2.231557141213028e-05,
"loss": 1.2213,
"step": 40500
},
{
"epoch": 1.57,
"learning_rate": 2.1356504392526952e-05,
"loss": 1.2233,
"step": 41000
},
{
"epoch": 1.59,
"learning_rate": 2.039743737292362e-05,
"loss": 1.2186,
"step": 41500
},
{
"epoch": 1.61,
"learning_rate": 1.943837035332029e-05,
"loss": 1.1961,
"step": 42000
},
{
"epoch": 1.63,
"learning_rate": 1.847930333371696e-05,
"loss": 1.2368,
"step": 42500
},
{
"epoch": 1.65,
"learning_rate": 1.752023631411363e-05,
"loss": 1.1773,
"step": 43000
},
{
"epoch": 1.67,
"learning_rate": 1.65611692945103e-05,
"loss": 1.2225,
"step": 43500
},
{
"epoch": 1.69,
"learning_rate": 1.5602102274906972e-05,
"loss": 1.2317,
"step": 44000
},
{
"epoch": 1.71,
"learning_rate": 1.4643035255303641e-05,
"loss": 1.1884,
"step": 44500
},
{
"epoch": 1.73,
"learning_rate": 1.3683968235700311e-05,
"loss": 1.219,
"step": 45000
},
{
"epoch": 1.75,
"learning_rate": 1.2724901216096982e-05,
"loss": 1.1947,
"step": 45500
},
{
"epoch": 1.76,
"learning_rate": 1.176583419649365e-05,
"loss": 1.1989,
"step": 46000
},
{
"epoch": 1.78,
"learning_rate": 1.0806767176890321e-05,
"loss": 1.2412,
"step": 46500
},
{
"epoch": 1.8,
"learning_rate": 9.847700157286992e-06,
"loss": 1.2042,
"step": 47000
},
{
"epoch": 1.82,
"learning_rate": 8.888633137683662e-06,
"loss": 1.21,
"step": 47500
},
{
"epoch": 1.84,
"learning_rate": 7.929566118080333e-06,
"loss": 1.2041,
"step": 48000
},
{
"epoch": 1.86,
"learning_rate": 6.970499098477001e-06,
"loss": 1.1775,
"step": 48500
},
{
"epoch": 1.88,
"learning_rate": 6.0114320788736716e-06,
"loss": 1.1626,
"step": 49000
},
{
"epoch": 1.9,
"learning_rate": 5.052365059270342e-06,
"loss": 1.1835,
"step": 49500
},
{
"epoch": 1.92,
"learning_rate": 4.093298039667013e-06,
"loss": 1.1925,
"step": 50000
},
{
"epoch": 1.94,
"learning_rate": 3.134231020063682e-06,
"loss": 1.1509,
"step": 50500
},
{
"epoch": 1.96,
"learning_rate": 2.1751640004603525e-06,
"loss": 1.2008,
"step": 51000
},
{
"epoch": 1.98,
"learning_rate": 1.2160969808570224e-06,
"loss": 1.1643,
"step": 51500
},
{
"epoch": 1.99,
"learning_rate": 2.5702996125369244e-07,
"loss": 1.2051,
"step": 52000
},
{
"epoch": 2.0,
"step": 52134,
"total_flos": 1.0583864216911872e+16,
"train_loss": 1.3101274637649682,
"train_runtime": 7625.5065,
"train_samples_per_second": 13.674,
"train_steps_per_second": 6.837
}
],
"logging_steps": 500,
"max_steps": 52134,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 1.0583864216911872e+16,
"trial_name": null,
"trial_params": null
}