vit-base-beans / trainer_state.json
amyeroberts's picture
amyeroberts HF staff
End of training
4805391 verified
raw
history blame
13.4 kB
{
"best_metric": 1.0323867797851562,
"best_model_checkpoint": "./beans_outputs/checkpoint-650",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 650,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07692307692307693,
"grad_norm": 2.1331980228424072,
"learning_rate": 1.9692307692307696e-05,
"loss": 1.1239,
"step": 10
},
{
"epoch": 0.15384615384615385,
"grad_norm": 1.7910937070846558,
"learning_rate": 1.9384615384615386e-05,
"loss": 1.1221,
"step": 20
},
{
"epoch": 0.23076923076923078,
"grad_norm": 2.01678729057312,
"learning_rate": 1.907692307692308e-05,
"loss": 1.1164,
"step": 30
},
{
"epoch": 0.3076923076923077,
"grad_norm": 2.4709253311157227,
"learning_rate": 1.876923076923077e-05,
"loss": 1.1046,
"step": 40
},
{
"epoch": 0.38461538461538464,
"grad_norm": 1.6879830360412598,
"learning_rate": 1.8461538461538465e-05,
"loss": 1.1084,
"step": 50
},
{
"epoch": 0.46153846153846156,
"grad_norm": 1.4891974925994873,
"learning_rate": 1.8153846153846155e-05,
"loss": 1.1047,
"step": 60
},
{
"epoch": 0.5384615384615384,
"grad_norm": 2.6175765991210938,
"learning_rate": 1.784615384615385e-05,
"loss": 1.1033,
"step": 70
},
{
"epoch": 0.6153846153846154,
"grad_norm": 2.685450792312622,
"learning_rate": 1.753846153846154e-05,
"loss": 1.0914,
"step": 80
},
{
"epoch": 0.6923076923076923,
"grad_norm": 2.0266225337982178,
"learning_rate": 1.7230769230769234e-05,
"loss": 1.095,
"step": 90
},
{
"epoch": 0.7692307692307693,
"grad_norm": 2.4788904190063477,
"learning_rate": 1.6923076923076924e-05,
"loss": 1.0926,
"step": 100
},
{
"epoch": 0.8461538461538461,
"grad_norm": 1.8994899988174438,
"learning_rate": 1.6615384615384618e-05,
"loss": 1.0953,
"step": 110
},
{
"epoch": 0.9230769230769231,
"grad_norm": 2.286781072616577,
"learning_rate": 1.630769230769231e-05,
"loss": 1.1013,
"step": 120
},
{
"epoch": 1.0,
"grad_norm": 4.015382766723633,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.0884,
"step": 130
},
{
"epoch": 1.0,
"eval_accuracy": 0.40601503759398494,
"eval_loss": 1.090308427810669,
"eval_runtime": 1.5658,
"eval_samples_per_second": 84.941,
"eval_steps_per_second": 10.857,
"step": 130
},
{
"epoch": 1.0769230769230769,
"grad_norm": 2.888793468475342,
"learning_rate": 1.5692307692307693e-05,
"loss": 1.0808,
"step": 140
},
{
"epoch": 1.1538461538461537,
"grad_norm": 2.544851303100586,
"learning_rate": 1.5384615384615387e-05,
"loss": 1.0753,
"step": 150
},
{
"epoch": 1.2307692307692308,
"grad_norm": 1.5049091577529907,
"learning_rate": 1.5076923076923078e-05,
"loss": 1.0799,
"step": 160
},
{
"epoch": 1.3076923076923077,
"grad_norm": 2.3966455459594727,
"learning_rate": 1.4769230769230772e-05,
"loss": 1.0957,
"step": 170
},
{
"epoch": 1.3846153846153846,
"grad_norm": 2.1688547134399414,
"learning_rate": 1.4461538461538462e-05,
"loss": 1.0837,
"step": 180
},
{
"epoch": 1.4615384615384617,
"grad_norm": 2.218165159225464,
"learning_rate": 1.4153846153846156e-05,
"loss": 1.0834,
"step": 190
},
{
"epoch": 1.5384615384615383,
"grad_norm": 1.7778892517089844,
"learning_rate": 1.3846153846153847e-05,
"loss": 1.0815,
"step": 200
},
{
"epoch": 1.6153846153846154,
"grad_norm": 1.6511861085891724,
"learning_rate": 1.353846153846154e-05,
"loss": 1.0721,
"step": 210
},
{
"epoch": 1.6923076923076923,
"grad_norm": 3.5080718994140625,
"learning_rate": 1.3230769230769231e-05,
"loss": 1.075,
"step": 220
},
{
"epoch": 1.7692307692307692,
"grad_norm": 1.9993922710418701,
"learning_rate": 1.2923076923076925e-05,
"loss": 1.077,
"step": 230
},
{
"epoch": 1.8461538461538463,
"grad_norm": 2.002586841583252,
"learning_rate": 1.2615384615384616e-05,
"loss": 1.0924,
"step": 240
},
{
"epoch": 1.9230769230769231,
"grad_norm": 1.9362667798995972,
"learning_rate": 1.230769230769231e-05,
"loss": 1.0756,
"step": 250
},
{
"epoch": 2.0,
"grad_norm": 2.8682315349578857,
"learning_rate": 1.2e-05,
"loss": 1.0721,
"step": 260
},
{
"epoch": 2.0,
"eval_accuracy": 0.518796992481203,
"eval_loss": 1.068122386932373,
"eval_runtime": 1.1118,
"eval_samples_per_second": 119.625,
"eval_steps_per_second": 15.29,
"step": 260
},
{
"epoch": 2.076923076923077,
"grad_norm": 1.8234657049179077,
"learning_rate": 1.1692307692307694e-05,
"loss": 1.0784,
"step": 270
},
{
"epoch": 2.1538461538461537,
"grad_norm": 2.394803047180176,
"learning_rate": 1.1384615384615385e-05,
"loss": 1.0671,
"step": 280
},
{
"epoch": 2.230769230769231,
"grad_norm": 2.4006338119506836,
"learning_rate": 1.1076923076923079e-05,
"loss": 1.0674,
"step": 290
},
{
"epoch": 2.3076923076923075,
"grad_norm": 2.40977144241333,
"learning_rate": 1.076923076923077e-05,
"loss": 1.0545,
"step": 300
},
{
"epoch": 2.3846153846153846,
"grad_norm": 1.989514946937561,
"learning_rate": 1.0461538461538463e-05,
"loss": 1.0702,
"step": 310
},
{
"epoch": 2.4615384615384617,
"grad_norm": 1.9289751052856445,
"learning_rate": 1.0153846153846154e-05,
"loss": 1.0594,
"step": 320
},
{
"epoch": 2.5384615384615383,
"grad_norm": 1.4276608228683472,
"learning_rate": 9.846153846153848e-06,
"loss": 1.0461,
"step": 330
},
{
"epoch": 2.6153846153846154,
"grad_norm": 2.1731224060058594,
"learning_rate": 9.53846153846154e-06,
"loss": 1.0621,
"step": 340
},
{
"epoch": 2.6923076923076925,
"grad_norm": 1.9502283334732056,
"learning_rate": 9.230769230769232e-06,
"loss": 1.0639,
"step": 350
},
{
"epoch": 2.769230769230769,
"grad_norm": 1.8020390272140503,
"learning_rate": 8.923076923076925e-06,
"loss": 1.0589,
"step": 360
},
{
"epoch": 2.8461538461538463,
"grad_norm": 1.9864907264709473,
"learning_rate": 8.615384615384617e-06,
"loss": 1.0536,
"step": 370
},
{
"epoch": 2.9230769230769234,
"grad_norm": 1.8596194982528687,
"learning_rate": 8.307692307692309e-06,
"loss": 1.0449,
"step": 380
},
{
"epoch": 3.0,
"grad_norm": 4.991027355194092,
"learning_rate": 8.000000000000001e-06,
"loss": 1.0623,
"step": 390
},
{
"epoch": 3.0,
"eval_accuracy": 0.6390977443609023,
"eval_loss": 1.0459624528884888,
"eval_runtime": 1.1732,
"eval_samples_per_second": 113.364,
"eval_steps_per_second": 14.49,
"step": 390
},
{
"epoch": 3.076923076923077,
"grad_norm": 2.3061418533325195,
"learning_rate": 7.692307692307694e-06,
"loss": 1.0617,
"step": 400
},
{
"epoch": 3.1538461538461537,
"grad_norm": 1.7080086469650269,
"learning_rate": 7.384615384615386e-06,
"loss": 1.0502,
"step": 410
},
{
"epoch": 3.230769230769231,
"grad_norm": 2.0486979484558105,
"learning_rate": 7.076923076923078e-06,
"loss": 1.062,
"step": 420
},
{
"epoch": 3.3076923076923075,
"grad_norm": 2.1800215244293213,
"learning_rate": 6.76923076923077e-06,
"loss": 1.0468,
"step": 430
},
{
"epoch": 3.3846153846153846,
"grad_norm": 2.1321449279785156,
"learning_rate": 6.461538461538463e-06,
"loss": 1.0492,
"step": 440
},
{
"epoch": 3.4615384615384617,
"grad_norm": 1.7226018905639648,
"learning_rate": 6.153846153846155e-06,
"loss": 1.0571,
"step": 450
},
{
"epoch": 3.5384615384615383,
"grad_norm": 2.405348539352417,
"learning_rate": 5.846153846153847e-06,
"loss": 1.0583,
"step": 460
},
{
"epoch": 3.6153846153846154,
"grad_norm": 1.523526906967163,
"learning_rate": 5.538461538461539e-06,
"loss": 1.036,
"step": 470
},
{
"epoch": 3.6923076923076925,
"grad_norm": 2.7491352558135986,
"learning_rate": 5.230769230769232e-06,
"loss": 1.0416,
"step": 480
},
{
"epoch": 3.769230769230769,
"grad_norm": 1.5548614263534546,
"learning_rate": 4.923076923076924e-06,
"loss": 1.0353,
"step": 490
},
{
"epoch": 3.8461538461538463,
"grad_norm": 1.448201060295105,
"learning_rate": 4.615384615384616e-06,
"loss": 1.0562,
"step": 500
},
{
"epoch": 3.9230769230769234,
"grad_norm": 1.9137156009674072,
"learning_rate": 4.307692307692308e-06,
"loss": 1.0363,
"step": 510
},
{
"epoch": 4.0,
"grad_norm": 5.4221014976501465,
"learning_rate": 4.000000000000001e-06,
"loss": 1.052,
"step": 520
},
{
"epoch": 4.0,
"eval_accuracy": 0.6165413533834586,
"eval_loss": 1.040980339050293,
"eval_runtime": 1.088,
"eval_samples_per_second": 122.247,
"eval_steps_per_second": 15.626,
"step": 520
},
{
"epoch": 4.076923076923077,
"grad_norm": 2.5872342586517334,
"learning_rate": 3.692307692307693e-06,
"loss": 1.052,
"step": 530
},
{
"epoch": 4.153846153846154,
"grad_norm": 1.7396414279937744,
"learning_rate": 3.384615384615385e-06,
"loss": 1.0499,
"step": 540
},
{
"epoch": 4.230769230769231,
"grad_norm": 2.633549928665161,
"learning_rate": 3.0769230769230774e-06,
"loss": 1.0563,
"step": 550
},
{
"epoch": 4.3076923076923075,
"grad_norm": 2.2198593616485596,
"learning_rate": 2.7692307692307697e-06,
"loss": 1.0623,
"step": 560
},
{
"epoch": 4.384615384615385,
"grad_norm": 1.9397387504577637,
"learning_rate": 2.461538461538462e-06,
"loss": 1.0341,
"step": 570
},
{
"epoch": 4.461538461538462,
"grad_norm": 2.3226265907287598,
"learning_rate": 2.153846153846154e-06,
"loss": 1.0402,
"step": 580
},
{
"epoch": 4.538461538461538,
"grad_norm": 2.3199572563171387,
"learning_rate": 1.8461538461538465e-06,
"loss": 1.0364,
"step": 590
},
{
"epoch": 4.615384615384615,
"grad_norm": 1.7650482654571533,
"learning_rate": 1.5384615384615387e-06,
"loss": 1.0353,
"step": 600
},
{
"epoch": 4.6923076923076925,
"grad_norm": 3.022646427154541,
"learning_rate": 1.230769230769231e-06,
"loss": 1.0518,
"step": 610
},
{
"epoch": 4.769230769230769,
"grad_norm": 3.9652676582336426,
"learning_rate": 9.230769230769232e-07,
"loss": 1.0519,
"step": 620
},
{
"epoch": 4.846153846153846,
"grad_norm": 1.8114593029022217,
"learning_rate": 6.153846153846155e-07,
"loss": 1.0386,
"step": 630
},
{
"epoch": 4.923076923076923,
"grad_norm": 2.25443434715271,
"learning_rate": 3.0769230769230774e-07,
"loss": 1.038,
"step": 640
},
{
"epoch": 5.0,
"grad_norm": 4.6620893478393555,
"learning_rate": 0.0,
"loss": 1.0519,
"step": 650
},
{
"epoch": 5.0,
"eval_accuracy": 0.6917293233082706,
"eval_loss": 1.0323867797851562,
"eval_runtime": 1.1019,
"eval_samples_per_second": 120.702,
"eval_steps_per_second": 15.428,
"step": 650
},
{
"epoch": 5.0,
"step": 650,
"total_flos": 5.219455168198656e+16,
"train_loss": 1.068038198030912,
"train_runtime": 89.4433,
"train_samples_per_second": 57.802,
"train_steps_per_second": 7.267
}
],
"logging_steps": 10,
"max_steps": 650,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.219455168198656e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}