easyrec-base / trainer_state.json
jibala-1022's picture
Upload local model
84a7b61 verified
raw
history blame
10.2 kB
{
"best_metric": 0.019987082897612984,
"best_model_checkpoint": "./checkpoints/easyrec-roberta-base",
"epoch": 1.7954432887568101,
"eval_steps": 1000,
"global_step": 29000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.030955918771669144,
"grad_norm": 37.25,
"learning_rate": 4.9484068020472185e-05,
"loss": 2.3504,
"step": 500
},
{
"epoch": 0.06191183754333829,
"grad_norm": 27.625,
"learning_rate": 4.896813604094436e-05,
"loss": 2.0019,
"step": 1000
},
{
"epoch": 0.09286775631500743,
"grad_norm": 43.75,
"learning_rate": 4.8452204061416544e-05,
"loss": 1.9156,
"step": 1500
},
{
"epoch": 0.12382367508667658,
"grad_norm": 36.75,
"learning_rate": 4.793627208188873e-05,
"loss": 1.8431,
"step": 2000
},
{
"epoch": 0.15477959385834572,
"grad_norm": 31.25,
"learning_rate": 4.74203401023609e-05,
"loss": 1.8579,
"step": 2500
},
{
"epoch": 0.18573551263001487,
"grad_norm": 27.375,
"learning_rate": 4.6904408122833086e-05,
"loss": 1.8486,
"step": 3000
},
{
"epoch": 0.216691431401684,
"grad_norm": 28.5,
"learning_rate": 4.638847614330527e-05,
"loss": 1.771,
"step": 3500
},
{
"epoch": 0.24764735017335315,
"grad_norm": 39.75,
"learning_rate": 4.587254416377745e-05,
"loss": 1.7856,
"step": 4000
},
{
"epoch": 0.2786032689450223,
"grad_norm": 31.0,
"learning_rate": 4.5356612184249634e-05,
"loss": 1.7762,
"step": 4500
},
{
"epoch": 0.30955918771669144,
"grad_norm": 50.75,
"learning_rate": 4.484068020472182e-05,
"loss": 1.7399,
"step": 5000
},
{
"epoch": 0.3405151064883606,
"grad_norm": 32.5,
"learning_rate": 4.432474822519399e-05,
"loss": 1.7679,
"step": 5500
},
{
"epoch": 0.37147102526002973,
"grad_norm": 31.625,
"learning_rate": 4.3808816245666175e-05,
"loss": 1.7481,
"step": 6000
},
{
"epoch": 0.4024269440316989,
"grad_norm": 54.0,
"learning_rate": 4.329288426613836e-05,
"loss": 1.7463,
"step": 6500
},
{
"epoch": 0.433382862803368,
"grad_norm": 40.75,
"learning_rate": 4.2776952286610534e-05,
"loss": 1.7458,
"step": 7000
},
{
"epoch": 0.46433878157503716,
"grad_norm": 46.5,
"learning_rate": 4.226102030708272e-05,
"loss": 1.7173,
"step": 7500
},
{
"epoch": 0.4952947003467063,
"grad_norm": 68.5,
"learning_rate": 4.17450883275549e-05,
"loss": 1.7502,
"step": 8000
},
{
"epoch": 0.5262506191183755,
"grad_norm": 41.25,
"learning_rate": 4.1229156348027076e-05,
"loss": 1.7286,
"step": 8500
},
{
"epoch": 0.5572065378900446,
"grad_norm": 30.25,
"learning_rate": 4.071322436849926e-05,
"loss": 1.7159,
"step": 9000
},
{
"epoch": 0.5881624566617137,
"grad_norm": 35.5,
"learning_rate": 4.019729238897144e-05,
"loss": 1.6927,
"step": 9500
},
{
"epoch": 0.6191183754333829,
"grad_norm": 29.75,
"learning_rate": 3.968136040944362e-05,
"loss": 1.7077,
"step": 10000
},
{
"epoch": 0.650074294205052,
"grad_norm": 40.5,
"learning_rate": 3.91654284299158e-05,
"loss": 1.6932,
"step": 10500
},
{
"epoch": 0.6810302129767212,
"grad_norm": 30.25,
"learning_rate": 3.864949645038798e-05,
"loss": 1.7078,
"step": 11000
},
{
"epoch": 0.7119861317483903,
"grad_norm": 31.25,
"learning_rate": 3.813356447086016e-05,
"loss": 1.7004,
"step": 11500
},
{
"epoch": 0.7429420505200595,
"grad_norm": 28.75,
"learning_rate": 3.761763249133234e-05,
"loss": 1.6726,
"step": 12000
},
{
"epoch": 0.7738979692917286,
"grad_norm": 52.0,
"learning_rate": 3.7101700511804524e-05,
"loss": 1.7026,
"step": 12500
},
{
"epoch": 0.8048538880633977,
"grad_norm": 30.375,
"learning_rate": 3.658576853227671e-05,
"loss": 1.6645,
"step": 13000
},
{
"epoch": 0.8358098068350669,
"grad_norm": 33.75,
"learning_rate": 3.606983655274889e-05,
"loss": 1.6833,
"step": 13500
},
{
"epoch": 0.866765725606736,
"grad_norm": 32.75,
"learning_rate": 3.555390457322107e-05,
"loss": 1.6909,
"step": 14000
},
{
"epoch": 0.8977216443784052,
"grad_norm": 49.0,
"learning_rate": 3.503797259369325e-05,
"loss": 1.684,
"step": 14500
},
{
"epoch": 0.9286775631500743,
"grad_norm": 52.0,
"learning_rate": 3.452204061416543e-05,
"loss": 1.681,
"step": 15000
},
{
"epoch": 0.9596334819217435,
"grad_norm": 38.0,
"learning_rate": 3.4006108634637614e-05,
"loss": 1.672,
"step": 15500
},
{
"epoch": 0.9905894006934126,
"grad_norm": 32.75,
"learning_rate": 3.349017665510979e-05,
"loss": 1.6553,
"step": 16000
},
{
"epoch": 1.0215453194650816,
"grad_norm": 35.75,
"learning_rate": 3.297424467558197e-05,
"loss": 1.6618,
"step": 16500
},
{
"epoch": 1.052501238236751,
"grad_norm": 37.5,
"learning_rate": 3.2458312696054156e-05,
"loss": 1.657,
"step": 17000
},
{
"epoch": 1.08345715700842,
"grad_norm": 30.25,
"learning_rate": 3.194238071652633e-05,
"loss": 1.6701,
"step": 17500
},
{
"epoch": 1.1144130757800892,
"grad_norm": 32.25,
"learning_rate": 3.1426448736998515e-05,
"loss": 1.6779,
"step": 18000
},
{
"epoch": 1.1453689945517582,
"grad_norm": 37.5,
"learning_rate": 3.09105167574707e-05,
"loss": 1.6635,
"step": 18500
},
{
"epoch": 1.1763249133234275,
"grad_norm": 30.75,
"learning_rate": 3.0394584777942874e-05,
"loss": 1.664,
"step": 19000
},
{
"epoch": 1.2072808320950965,
"grad_norm": 55.75,
"learning_rate": 2.9878652798415056e-05,
"loss": 1.6398,
"step": 19500
},
{
"epoch": 1.2382367508667658,
"grad_norm": 51.0,
"learning_rate": 2.936272081888724e-05,
"loss": 1.6424,
"step": 20000
},
{
"epoch": 1.2691926696384348,
"grad_norm": 32.0,
"learning_rate": 2.884678883935942e-05,
"loss": 1.6251,
"step": 20500
},
{
"epoch": 1.300148588410104,
"grad_norm": 31.5,
"learning_rate": 2.83308568598316e-05,
"loss": 1.6295,
"step": 21000
},
{
"epoch": 1.331104507181773,
"grad_norm": 38.75,
"learning_rate": 2.7814924880303784e-05,
"loss": 1.6628,
"step": 21500
},
{
"epoch": 1.3620604259534423,
"grad_norm": 54.5,
"learning_rate": 2.729899290077596e-05,
"loss": 1.6579,
"step": 22000
},
{
"epoch": 1.3930163447251114,
"grad_norm": 64.5,
"learning_rate": 2.6783060921248143e-05,
"loss": 1.6185,
"step": 22500
},
{
"epoch": 1.4239722634967806,
"grad_norm": 30.875,
"learning_rate": 2.6267128941720326e-05,
"loss": 1.6521,
"step": 23000
},
{
"epoch": 1.4549281822684497,
"grad_norm": 34.75,
"learning_rate": 2.57511969621925e-05,
"loss": 1.6372,
"step": 23500
},
{
"epoch": 1.485884101040119,
"grad_norm": 48.5,
"learning_rate": 2.5235264982664684e-05,
"loss": 1.6597,
"step": 24000
},
{
"epoch": 1.516840019811788,
"grad_norm": 31.75,
"learning_rate": 2.4719333003136867e-05,
"loss": 1.6437,
"step": 24500
},
{
"epoch": 1.5477959385834572,
"grad_norm": 18.75,
"learning_rate": 2.420340102360905e-05,
"loss": 1.6191,
"step": 25000
},
{
"epoch": 1.5787518573551265,
"grad_norm": 26.5,
"learning_rate": 2.368746904408123e-05,
"loss": 1.6564,
"step": 25500
},
{
"epoch": 1.6097077761267955,
"grad_norm": 37.5,
"learning_rate": 2.3171537064553412e-05,
"loss": 1.6498,
"step": 26000
},
{
"epoch": 1.6406636948984645,
"grad_norm": 42.75,
"learning_rate": 2.265560508502559e-05,
"loss": 1.6201,
"step": 26500
},
{
"epoch": 1.6716196136701336,
"grad_norm": 35.5,
"learning_rate": 2.213967310549777e-05,
"loss": 1.6459,
"step": 27000
},
{
"epoch": 1.7025755324418028,
"grad_norm": 27.875,
"learning_rate": 2.1623741125969954e-05,
"loss": 1.6636,
"step": 27500
},
{
"epoch": 1.733531451213472,
"grad_norm": 33.75,
"learning_rate": 2.1107809146442133e-05,
"loss": 1.6359,
"step": 28000
},
{
"epoch": 1.764487369985141,
"grad_norm": 34.75,
"learning_rate": 2.0591877166914312e-05,
"loss": 1.6679,
"step": 28500
},
{
"epoch": 1.7954432887568101,
"grad_norm": 27.0,
"learning_rate": 2.0075945187386495e-05,
"loss": 1.6409,
"step": 29000
}
],
"logging_steps": 500,
"max_steps": 48456,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}