lesso06's picture
Training in progress, step 100, checkpoint
1417731 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.013522650439486139,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0001352265043948614,
"grad_norm": 0.13028183579444885,
"learning_rate": 1e-05,
"loss": 10.367,
"step": 1
},
{
"epoch": 0.0001352265043948614,
"eval_loss": 10.374638557434082,
"eval_runtime": 39.838,
"eval_samples_per_second": 156.333,
"eval_steps_per_second": 19.554,
"step": 1
},
{
"epoch": 0.0002704530087897228,
"grad_norm": 0.10174716264009476,
"learning_rate": 2e-05,
"loss": 10.3809,
"step": 2
},
{
"epoch": 0.00040567951318458417,
"grad_norm": 0.0866115540266037,
"learning_rate": 3e-05,
"loss": 10.373,
"step": 3
},
{
"epoch": 0.0005409060175794456,
"grad_norm": 0.06329571455717087,
"learning_rate": 4e-05,
"loss": 10.3767,
"step": 4
},
{
"epoch": 0.000676132521974307,
"grad_norm": 0.10207287222146988,
"learning_rate": 5e-05,
"loss": 10.3819,
"step": 5
},
{
"epoch": 0.0008113590263691683,
"grad_norm": 0.14628176391124725,
"learning_rate": 6e-05,
"loss": 10.3764,
"step": 6
},
{
"epoch": 0.0009465855307640297,
"grad_norm": 0.06459755450487137,
"learning_rate": 7e-05,
"loss": 10.3845,
"step": 7
},
{
"epoch": 0.0010818120351588911,
"grad_norm": 0.06547542661428452,
"learning_rate": 8e-05,
"loss": 10.3759,
"step": 8
},
{
"epoch": 0.0012170385395537525,
"grad_norm": 0.10897202789783478,
"learning_rate": 9e-05,
"loss": 10.378,
"step": 9
},
{
"epoch": 0.0012170385395537525,
"eval_loss": 10.37391185760498,
"eval_runtime": 41.1364,
"eval_samples_per_second": 151.399,
"eval_steps_per_second": 18.937,
"step": 9
},
{
"epoch": 0.001352265043948614,
"grad_norm": 0.1761985719203949,
"learning_rate": 0.0001,
"loss": 10.3579,
"step": 10
},
{
"epoch": 0.0014874915483434753,
"grad_norm": 0.0966603010892868,
"learning_rate": 9.99695413509548e-05,
"loss": 10.3665,
"step": 11
},
{
"epoch": 0.0016227180527383367,
"grad_norm": 0.11158464848995209,
"learning_rate": 9.987820251299122e-05,
"loss": 10.3696,
"step": 12
},
{
"epoch": 0.001757944557133198,
"grad_norm": 0.14970263838768005,
"learning_rate": 9.972609476841367e-05,
"loss": 10.37,
"step": 13
},
{
"epoch": 0.0018931710615280595,
"grad_norm": 0.10012516379356384,
"learning_rate": 9.951340343707852e-05,
"loss": 10.3647,
"step": 14
},
{
"epoch": 0.002028397565922921,
"grad_norm": 0.1887078732252121,
"learning_rate": 9.924038765061042e-05,
"loss": 10.3722,
"step": 15
},
{
"epoch": 0.0021636240703177823,
"grad_norm": 0.1151905506849289,
"learning_rate": 9.890738003669029e-05,
"loss": 10.3691,
"step": 16
},
{
"epoch": 0.0022988505747126436,
"grad_norm": 0.08883685618638992,
"learning_rate": 9.851478631379982e-05,
"loss": 10.3667,
"step": 17
},
{
"epoch": 0.002434077079107505,
"grad_norm": 0.09848842024803162,
"learning_rate": 9.806308479691595e-05,
"loss": 10.3694,
"step": 18
},
{
"epoch": 0.002434077079107505,
"eval_loss": 10.371732711791992,
"eval_runtime": 40.9155,
"eval_samples_per_second": 152.216,
"eval_steps_per_second": 19.039,
"step": 18
},
{
"epoch": 0.0025693035835023664,
"grad_norm": 0.19662371277809143,
"learning_rate": 9.755282581475769e-05,
"loss": 10.3869,
"step": 19
},
{
"epoch": 0.002704530087897228,
"grad_norm": 0.12802760303020477,
"learning_rate": 9.698463103929542e-05,
"loss": 10.3803,
"step": 20
},
{
"epoch": 0.002839756592292089,
"grad_norm": 0.15228529274463654,
"learning_rate": 9.635919272833938e-05,
"loss": 10.3695,
"step": 21
},
{
"epoch": 0.0029749830966869506,
"grad_norm": 0.09475445747375488,
"learning_rate": 9.567727288213005e-05,
"loss": 10.3693,
"step": 22
},
{
"epoch": 0.003110209601081812,
"grad_norm": 0.10489476472139359,
"learning_rate": 9.493970231495835e-05,
"loss": 10.364,
"step": 23
},
{
"epoch": 0.0032454361054766734,
"grad_norm": 0.03964424505829811,
"learning_rate": 9.414737964294636e-05,
"loss": 10.3695,
"step": 24
},
{
"epoch": 0.0033806626098715348,
"grad_norm": 0.09453272819519043,
"learning_rate": 9.330127018922194e-05,
"loss": 10.3699,
"step": 25
},
{
"epoch": 0.003515889114266396,
"grad_norm": 0.07760817557573318,
"learning_rate": 9.24024048078213e-05,
"loss": 10.3778,
"step": 26
},
{
"epoch": 0.0036511156186612576,
"grad_norm": 0.12330657988786697,
"learning_rate": 9.145187862775209e-05,
"loss": 10.3614,
"step": 27
},
{
"epoch": 0.0036511156186612576,
"eval_loss": 10.369364738464355,
"eval_runtime": 40.853,
"eval_samples_per_second": 152.449,
"eval_steps_per_second": 19.068,
"step": 27
},
{
"epoch": 0.003786342123056119,
"grad_norm": 0.2079092264175415,
"learning_rate": 9.045084971874738e-05,
"loss": 10.3696,
"step": 28
},
{
"epoch": 0.00392156862745098,
"grad_norm": 0.19546237587928772,
"learning_rate": 8.940053768033609e-05,
"loss": 10.3679,
"step": 29
},
{
"epoch": 0.004056795131845842,
"grad_norm": 0.1694408506155014,
"learning_rate": 8.83022221559489e-05,
"loss": 10.3609,
"step": 30
},
{
"epoch": 0.004192021636240703,
"grad_norm": 0.07508226484060287,
"learning_rate": 8.715724127386972e-05,
"loss": 10.3676,
"step": 31
},
{
"epoch": 0.0043272481406355645,
"grad_norm": 0.12029743939638138,
"learning_rate": 8.596699001693255e-05,
"loss": 10.3714,
"step": 32
},
{
"epoch": 0.004462474645030426,
"grad_norm": 0.16662153601646423,
"learning_rate": 8.473291852294987e-05,
"loss": 10.3706,
"step": 33
},
{
"epoch": 0.004597701149425287,
"grad_norm": 0.06308508664369583,
"learning_rate": 8.345653031794292e-05,
"loss": 10.3701,
"step": 34
},
{
"epoch": 0.004732927653820149,
"grad_norm": 0.11743340641260147,
"learning_rate": 8.213938048432697e-05,
"loss": 10.3745,
"step": 35
},
{
"epoch": 0.00486815415821501,
"grad_norm": 0.09249456971883774,
"learning_rate": 8.07830737662829e-05,
"loss": 10.3673,
"step": 36
},
{
"epoch": 0.00486815415821501,
"eval_loss": 10.366896629333496,
"eval_runtime": 41.0089,
"eval_samples_per_second": 151.869,
"eval_steps_per_second": 18.996,
"step": 36
},
{
"epoch": 0.0050033806626098715,
"grad_norm": 0.24108552932739258,
"learning_rate": 7.938926261462366e-05,
"loss": 10.3796,
"step": 37
},
{
"epoch": 0.005138607167004733,
"grad_norm": 0.11644657701253891,
"learning_rate": 7.795964517353735e-05,
"loss": 10.377,
"step": 38
},
{
"epoch": 0.005273833671399594,
"grad_norm": 0.0865534096956253,
"learning_rate": 7.649596321166024e-05,
"loss": 10.3657,
"step": 39
},
{
"epoch": 0.005409060175794456,
"grad_norm": 0.21966713666915894,
"learning_rate": 7.500000000000001e-05,
"loss": 10.3647,
"step": 40
},
{
"epoch": 0.005544286680189317,
"grad_norm": 0.0750628337264061,
"learning_rate": 7.347357813929454e-05,
"loss": 10.3614,
"step": 41
},
{
"epoch": 0.005679513184584178,
"grad_norm": 0.14090754091739655,
"learning_rate": 7.191855733945387e-05,
"loss": 10.3662,
"step": 42
},
{
"epoch": 0.00581473968897904,
"grad_norm": 0.10155241936445236,
"learning_rate": 7.033683215379002e-05,
"loss": 10.3634,
"step": 43
},
{
"epoch": 0.005949966193373901,
"grad_norm": 0.0886780321598053,
"learning_rate": 6.873032967079561e-05,
"loss": 10.3604,
"step": 44
},
{
"epoch": 0.006085192697768763,
"grad_norm": 0.2659003436565399,
"learning_rate": 6.710100716628344e-05,
"loss": 10.3539,
"step": 45
},
{
"epoch": 0.006085192697768763,
"eval_loss": 10.364371299743652,
"eval_runtime": 41.0033,
"eval_samples_per_second": 151.89,
"eval_steps_per_second": 18.998,
"step": 45
},
{
"epoch": 0.006220419202163624,
"grad_norm": 0.18210439383983612,
"learning_rate": 6.545084971874738e-05,
"loss": 10.3612,
"step": 46
},
{
"epoch": 0.006355645706558485,
"grad_norm": 0.0754951685667038,
"learning_rate": 6.378186779084995e-05,
"loss": 10.3693,
"step": 47
},
{
"epoch": 0.006490872210953347,
"grad_norm": 0.08908693492412567,
"learning_rate": 6.209609477998338e-05,
"loss": 10.3614,
"step": 48
},
{
"epoch": 0.006626098715348208,
"grad_norm": 0.10534809529781342,
"learning_rate": 6.0395584540887963e-05,
"loss": 10.372,
"step": 49
},
{
"epoch": 0.0067613252197430695,
"grad_norm": 0.11808033287525177,
"learning_rate": 5.868240888334653e-05,
"loss": 10.366,
"step": 50
},
{
"epoch": 0.006896551724137931,
"grad_norm": 0.2752968370914459,
"learning_rate": 5.695865504800327e-05,
"loss": 10.3447,
"step": 51
},
{
"epoch": 0.007031778228532792,
"grad_norm": 0.14523477852344513,
"learning_rate": 5.522642316338268e-05,
"loss": 10.3655,
"step": 52
},
{
"epoch": 0.007167004732927654,
"grad_norm": 0.1733541488647461,
"learning_rate": 5.348782368720626e-05,
"loss": 10.3647,
"step": 53
},
{
"epoch": 0.007302231237322515,
"grad_norm": 0.1748340129852295,
"learning_rate": 5.174497483512506e-05,
"loss": 10.3705,
"step": 54
},
{
"epoch": 0.007302231237322515,
"eval_loss": 10.36203384399414,
"eval_runtime": 40.7225,
"eval_samples_per_second": 152.938,
"eval_steps_per_second": 19.129,
"step": 54
},
{
"epoch": 0.0074374577417173765,
"grad_norm": 0.12748098373413086,
"learning_rate": 5e-05,
"loss": 10.3647,
"step": 55
},
{
"epoch": 0.007572684246112238,
"grad_norm": 0.23120729625225067,
"learning_rate": 4.825502516487497e-05,
"loss": 10.3612,
"step": 56
},
{
"epoch": 0.007707910750507099,
"grad_norm": 0.08134011179208755,
"learning_rate": 4.6512176312793736e-05,
"loss": 10.3731,
"step": 57
},
{
"epoch": 0.00784313725490196,
"grad_norm": 0.2719850540161133,
"learning_rate": 4.477357683661734e-05,
"loss": 10.3667,
"step": 58
},
{
"epoch": 0.007978363759296822,
"grad_norm": 0.1496485471725464,
"learning_rate": 4.3041344951996746e-05,
"loss": 10.3562,
"step": 59
},
{
"epoch": 0.008113590263691683,
"grad_norm": 0.16650433838367462,
"learning_rate": 4.131759111665349e-05,
"loss": 10.3546,
"step": 60
},
{
"epoch": 0.008248816768086545,
"grad_norm": 0.15551519393920898,
"learning_rate": 3.960441545911204e-05,
"loss": 10.3711,
"step": 61
},
{
"epoch": 0.008384043272481406,
"grad_norm": 0.1664876788854599,
"learning_rate": 3.790390522001662e-05,
"loss": 10.3558,
"step": 62
},
{
"epoch": 0.008519269776876268,
"grad_norm": 0.22050045430660248,
"learning_rate": 3.6218132209150045e-05,
"loss": 10.3618,
"step": 63
},
{
"epoch": 0.008519269776876268,
"eval_loss": 10.359942436218262,
"eval_runtime": 40.9328,
"eval_samples_per_second": 152.152,
"eval_steps_per_second": 19.031,
"step": 63
},
{
"epoch": 0.008654496281271129,
"grad_norm": 0.19365984201431274,
"learning_rate": 3.4549150281252636e-05,
"loss": 10.3598,
"step": 64
},
{
"epoch": 0.00878972278566599,
"grad_norm": 0.13088227808475494,
"learning_rate": 3.289899283371657e-05,
"loss": 10.3575,
"step": 65
},
{
"epoch": 0.008924949290060852,
"grad_norm": 0.3167434334754944,
"learning_rate": 3.12696703292044e-05,
"loss": 10.3557,
"step": 66
},
{
"epoch": 0.009060175794455713,
"grad_norm": 0.12147484719753265,
"learning_rate": 2.9663167846209998e-05,
"loss": 10.3661,
"step": 67
},
{
"epoch": 0.009195402298850575,
"grad_norm": 0.36501771211624146,
"learning_rate": 2.8081442660546125e-05,
"loss": 10.3538,
"step": 68
},
{
"epoch": 0.009330628803245436,
"grad_norm": 0.24930152297019958,
"learning_rate": 2.6526421860705473e-05,
"loss": 10.3536,
"step": 69
},
{
"epoch": 0.009465855307640297,
"grad_norm": 0.1812044084072113,
"learning_rate": 2.500000000000001e-05,
"loss": 10.3599,
"step": 70
},
{
"epoch": 0.009601081812035159,
"grad_norm": 0.2339591383934021,
"learning_rate": 2.350403678833976e-05,
"loss": 10.3605,
"step": 71
},
{
"epoch": 0.00973630831643002,
"grad_norm": 0.23646140098571777,
"learning_rate": 2.2040354826462668e-05,
"loss": 10.3559,
"step": 72
},
{
"epoch": 0.00973630831643002,
"eval_loss": 10.358288764953613,
"eval_runtime": 40.9322,
"eval_samples_per_second": 152.154,
"eval_steps_per_second": 19.031,
"step": 72
},
{
"epoch": 0.009871534820824882,
"grad_norm": 0.15088936686515808,
"learning_rate": 2.061073738537635e-05,
"loss": 10.3794,
"step": 73
},
{
"epoch": 0.010006761325219743,
"grad_norm": 0.15647698938846588,
"learning_rate": 1.9216926233717085e-05,
"loss": 10.3718,
"step": 74
},
{
"epoch": 0.010141987829614604,
"grad_norm": 0.3020474910736084,
"learning_rate": 1.7860619515673033e-05,
"loss": 10.3613,
"step": 75
},
{
"epoch": 0.010277214334009466,
"grad_norm": 0.342067152261734,
"learning_rate": 1.6543469682057106e-05,
"loss": 10.3582,
"step": 76
},
{
"epoch": 0.010412440838404327,
"grad_norm": 0.3511400818824768,
"learning_rate": 1.526708147705013e-05,
"loss": 10.3546,
"step": 77
},
{
"epoch": 0.010547667342799188,
"grad_norm": 0.2721881568431854,
"learning_rate": 1.4033009983067452e-05,
"loss": 10.3608,
"step": 78
},
{
"epoch": 0.01068289384719405,
"grad_norm": 0.3455629348754883,
"learning_rate": 1.2842758726130283e-05,
"loss": 10.3484,
"step": 79
},
{
"epoch": 0.010818120351588911,
"grad_norm": 0.327305406332016,
"learning_rate": 1.1697777844051105e-05,
"loss": 10.3572,
"step": 80
},
{
"epoch": 0.010953346855983773,
"grad_norm": 0.18577629327774048,
"learning_rate": 1.0599462319663905e-05,
"loss": 10.3567,
"step": 81
},
{
"epoch": 0.010953346855983773,
"eval_loss": 10.357199668884277,
"eval_runtime": 40.778,
"eval_samples_per_second": 152.729,
"eval_steps_per_second": 19.103,
"step": 81
},
{
"epoch": 0.011088573360378634,
"grad_norm": 0.11704239249229431,
"learning_rate": 9.549150281252633e-06,
"loss": 10.3635,
"step": 82
},
{
"epoch": 0.011223799864773495,
"grad_norm": 0.19233828783035278,
"learning_rate": 8.548121372247918e-06,
"loss": 10.3677,
"step": 83
},
{
"epoch": 0.011359026369168357,
"grad_norm": 0.07823574542999268,
"learning_rate": 7.597595192178702e-06,
"loss": 10.3695,
"step": 84
},
{
"epoch": 0.011494252873563218,
"grad_norm": 0.13713940978050232,
"learning_rate": 6.698729810778065e-06,
"loss": 10.3596,
"step": 85
},
{
"epoch": 0.01162947937795808,
"grad_norm": 0.18941013514995575,
"learning_rate": 5.852620357053651e-06,
"loss": 10.3583,
"step": 86
},
{
"epoch": 0.011764705882352941,
"grad_norm": 0.30075106024742126,
"learning_rate": 5.060297685041659e-06,
"loss": 10.3597,
"step": 87
},
{
"epoch": 0.011899932386747802,
"grad_norm": 0.10994397848844528,
"learning_rate": 4.322727117869951e-06,
"loss": 10.3648,
"step": 88
},
{
"epoch": 0.012035158891142664,
"grad_norm": 0.13853776454925537,
"learning_rate": 3.6408072716606346e-06,
"loss": 10.3724,
"step": 89
},
{
"epoch": 0.012170385395537525,
"grad_norm": 0.191994771361351,
"learning_rate": 3.0153689607045845e-06,
"loss": 10.3556,
"step": 90
},
{
"epoch": 0.012170385395537525,
"eval_loss": 10.356754302978516,
"eval_runtime": 40.9895,
"eval_samples_per_second": 151.941,
"eval_steps_per_second": 19.005,
"step": 90
},
{
"epoch": 0.012305611899932387,
"grad_norm": 0.10114133358001709,
"learning_rate": 2.4471741852423237e-06,
"loss": 10.3638,
"step": 91
},
{
"epoch": 0.012440838404327248,
"grad_norm": 0.22643674910068512,
"learning_rate": 1.9369152030840556e-06,
"loss": 10.3453,
"step": 92
},
{
"epoch": 0.01257606490872211,
"grad_norm": 0.18599919974803925,
"learning_rate": 1.4852136862001764e-06,
"loss": 10.3606,
"step": 93
},
{
"epoch": 0.01271129141311697,
"grad_norm": 0.4384092092514038,
"learning_rate": 1.0926199633097157e-06,
"loss": 10.3458,
"step": 94
},
{
"epoch": 0.012846517917511832,
"grad_norm": 0.1412304788827896,
"learning_rate": 7.596123493895991e-07,
"loss": 10.3518,
"step": 95
},
{
"epoch": 0.012981744421906694,
"grad_norm": 0.20670315623283386,
"learning_rate": 4.865965629214819e-07,
"loss": 10.3595,
"step": 96
},
{
"epoch": 0.013116970926301555,
"grad_norm": 0.15490083396434784,
"learning_rate": 2.7390523158633554e-07,
"loss": 10.3575,
"step": 97
},
{
"epoch": 0.013252197430696416,
"grad_norm": 0.19258053600788116,
"learning_rate": 1.2179748700879012e-07,
"loss": 10.349,
"step": 98
},
{
"epoch": 0.013387423935091278,
"grad_norm": 0.15515723824501038,
"learning_rate": 3.04586490452119e-08,
"loss": 10.3637,
"step": 99
},
{
"epoch": 0.013387423935091278,
"eval_loss": 10.356693267822266,
"eval_runtime": 40.9088,
"eval_samples_per_second": 152.241,
"eval_steps_per_second": 19.042,
"step": 99
},
{
"epoch": 0.013522650439486139,
"grad_norm": 0.2686675488948822,
"learning_rate": 0.0,
"loss": 10.3569,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5230244659200.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}