eeeebbb2's picture
Training in progress, step 46, checkpoint
b306e45 verified
{
"best_metric": 0.29814502596855164,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 3.041322314049587,
"eval_steps": 25,
"global_step": 46,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06611570247933884,
"grad_norm": 1.2821037769317627,
"learning_rate": 5e-05,
"loss": 0.5949,
"step": 1
},
{
"epoch": 0.06611570247933884,
"eval_loss": 0.8276475667953491,
"eval_runtime": 4.7766,
"eval_samples_per_second": 10.468,
"eval_steps_per_second": 2.722,
"step": 1
},
{
"epoch": 0.1322314049586777,
"grad_norm": 1.515925407409668,
"learning_rate": 0.0001,
"loss": 0.6771,
"step": 2
},
{
"epoch": 0.19834710743801653,
"grad_norm": 1.3467015027999878,
"learning_rate": 9.98853451574614e-05,
"loss": 0.6128,
"step": 3
},
{
"epoch": 0.2644628099173554,
"grad_norm": 0.8077853918075562,
"learning_rate": 9.954196488464198e-05,
"loss": 0.4131,
"step": 4
},
{
"epoch": 0.3305785123966942,
"grad_norm": 0.7690338492393494,
"learning_rate": 9.897160896870218e-05,
"loss": 0.3669,
"step": 5
},
{
"epoch": 0.39669421487603307,
"grad_norm": 0.9872344136238098,
"learning_rate": 9.81771838126524e-05,
"loss": 0.3699,
"step": 6
},
{
"epoch": 0.4628099173553719,
"grad_norm": 0.543779730796814,
"learning_rate": 9.716273762498929e-05,
"loss": 0.3246,
"step": 7
},
{
"epoch": 0.5289256198347108,
"grad_norm": 0.5106104016304016,
"learning_rate": 9.593343979095333e-05,
"loss": 0.3045,
"step": 8
},
{
"epoch": 0.5950413223140496,
"grad_norm": 0.4468647539615631,
"learning_rate": 9.449555453052651e-05,
"loss": 0.3354,
"step": 9
},
{
"epoch": 0.6611570247933884,
"grad_norm": 0.48299410939216614,
"learning_rate": 9.285640897740315e-05,
"loss": 0.3475,
"step": 10
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.5404636263847351,
"learning_rate": 9.102435584159622e-05,
"loss": 0.3114,
"step": 11
},
{
"epoch": 0.7933884297520661,
"grad_norm": 0.3328273296356201,
"learning_rate": 8.900873084594164e-05,
"loss": 0.2682,
"step": 12
},
{
"epoch": 0.859504132231405,
"grad_norm": 0.36546558141708374,
"learning_rate": 8.681980515339464e-05,
"loss": 0.2919,
"step": 13
},
{
"epoch": 0.9256198347107438,
"grad_norm": 0.38495934009552,
"learning_rate": 8.446873302753784e-05,
"loss": 0.2714,
"step": 14
},
{
"epoch": 0.9917355371900827,
"grad_norm": 0.4691920876502991,
"learning_rate": 8.19674949930106e-05,
"loss": 0.2495,
"step": 15
},
{
"epoch": 1.0578512396694215,
"grad_norm": 0.6476026177406311,
"learning_rate": 7.93288367855019e-05,
"loss": 0.3272,
"step": 16
},
{
"epoch": 1.1239669421487604,
"grad_norm": 0.3131757080554962,
"learning_rate": 7.656620440240257e-05,
"loss": 0.2197,
"step": 17
},
{
"epoch": 1.1900826446280992,
"grad_norm": 0.3334848880767822,
"learning_rate": 7.369367558508489e-05,
"loss": 0.2098,
"step": 18
},
{
"epoch": 1.256198347107438,
"grad_norm": 0.3057920038700104,
"learning_rate": 7.072588808195944e-05,
"loss": 0.1671,
"step": 19
},
{
"epoch": 1.322314049586777,
"grad_norm": 0.31117555499076843,
"learning_rate": 6.767796505786435e-05,
"loss": 0.2383,
"step": 20
},
{
"epoch": 1.3884297520661157,
"grad_norm": 0.32219311594963074,
"learning_rate": 6.456543802988395e-05,
"loss": 0.1966,
"step": 21
},
{
"epoch": 1.4545454545454546,
"grad_norm": 0.3343944847583771,
"learning_rate": 6.140416772229784e-05,
"loss": 0.1738,
"step": 22
},
{
"epoch": 1.5206611570247934,
"grad_norm": 0.30831432342529297,
"learning_rate": 5.821026324396546e-05,
"loss": 0.1812,
"step": 23
},
{
"epoch": 1.5867768595041323,
"grad_norm": 0.3504087030887604,
"learning_rate": 5.500000000000001e-05,
"loss": 0.217,
"step": 24
},
{
"epoch": 1.6528925619834711,
"grad_norm": 0.3314357101917267,
"learning_rate": 5.178973675603456e-05,
"loss": 0.2003,
"step": 25
},
{
"epoch": 1.6528925619834711,
"eval_loss": 0.29814502596855164,
"eval_runtime": 4.8436,
"eval_samples_per_second": 10.323,
"eval_steps_per_second": 2.684,
"step": 25
},
{
"epoch": 1.71900826446281,
"grad_norm": 0.38563114404678345,
"learning_rate": 4.859583227770218e-05,
"loss": 0.1666,
"step": 26
},
{
"epoch": 1.7851239669421488,
"grad_norm": 0.32108181715011597,
"learning_rate": 4.543456197011605e-05,
"loss": 0.185,
"step": 27
},
{
"epoch": 1.8512396694214877,
"grad_norm": 0.3681396245956421,
"learning_rate": 4.232203494213567e-05,
"loss": 0.2199,
"step": 28
},
{
"epoch": 1.9173553719008265,
"grad_norm": 0.37222185730934143,
"learning_rate": 3.927411191804058e-05,
"loss": 0.2032,
"step": 29
},
{
"epoch": 1.9834710743801653,
"grad_norm": 0.4296535551548004,
"learning_rate": 3.630632441491512e-05,
"loss": 0.1851,
"step": 30
},
{
"epoch": 2.049586776859504,
"grad_norm": 0.8122077584266663,
"learning_rate": 3.343379559759746e-05,
"loss": 0.2605,
"step": 31
},
{
"epoch": 2.115702479338843,
"grad_norm": 0.29376327991485596,
"learning_rate": 3.067116321449813e-05,
"loss": 0.1641,
"step": 32
},
{
"epoch": 2.1818181818181817,
"grad_norm": 0.3262518048286438,
"learning_rate": 2.803250500698939e-05,
"loss": 0.1464,
"step": 33
},
{
"epoch": 2.2479338842975207,
"grad_norm": 0.2500488758087158,
"learning_rate": 2.5531266972462177e-05,
"loss": 0.0992,
"step": 34
},
{
"epoch": 2.3140495867768593,
"grad_norm": 0.3410912752151489,
"learning_rate": 2.3180194846605367e-05,
"loss": 0.1822,
"step": 35
},
{
"epoch": 2.3801652892561984,
"grad_norm": 0.28179067373275757,
"learning_rate": 2.0991269154058385e-05,
"loss": 0.1224,
"step": 36
},
{
"epoch": 2.446280991735537,
"grad_norm": 0.30862489342689514,
"learning_rate": 1.897564415840379e-05,
"loss": 0.1116,
"step": 37
},
{
"epoch": 2.512396694214876,
"grad_norm": 0.2770218253135681,
"learning_rate": 1.7143591022596845e-05,
"loss": 0.1074,
"step": 38
},
{
"epoch": 2.5785123966942147,
"grad_norm": 0.3372029662132263,
"learning_rate": 1.5504445469473496e-05,
"loss": 0.1526,
"step": 39
},
{
"epoch": 2.644628099173554,
"grad_norm": 0.3446227014064789,
"learning_rate": 1.4066560209046673e-05,
"loss": 0.1372,
"step": 40
},
{
"epoch": 2.7107438016528924,
"grad_norm": 0.34640777111053467,
"learning_rate": 1.2837262375010731e-05,
"loss": 0.1075,
"step": 41
},
{
"epoch": 2.7768595041322315,
"grad_norm": 0.30039989948272705,
"learning_rate": 1.1822816187347623e-05,
"loss": 0.1153,
"step": 42
},
{
"epoch": 2.84297520661157,
"grad_norm": 0.34811943769454956,
"learning_rate": 1.1028391031297826e-05,
"loss": 0.1557,
"step": 43
},
{
"epoch": 2.909090909090909,
"grad_norm": 0.34190842509269714,
"learning_rate": 1.0458035115358032e-05,
"loss": 0.1301,
"step": 44
},
{
"epoch": 2.975206611570248,
"grad_norm": 0.3699391782283783,
"learning_rate": 1.0114654842538593e-05,
"loss": 0.0992,
"step": 45
},
{
"epoch": 3.041322314049587,
"grad_norm": 0.8737526535987854,
"learning_rate": 1e-05,
"loss": 0.2512,
"step": 46
}
],
"logging_steps": 1,
"max_steps": 46,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.6805209969105306e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}