lesso06's picture
Training in progress, step 100, checkpoint
bb40651 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.11474469305794607,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0011474469305794606,
"grad_norm": 0.38680994510650635,
"learning_rate": 1e-05,
"loss": 0.1883,
"step": 1
},
{
"epoch": 0.0011474469305794606,
"eval_loss": 0.1828075647354126,
"eval_runtime": 41.2292,
"eval_samples_per_second": 17.803,
"eval_steps_per_second": 2.231,
"step": 1
},
{
"epoch": 0.002294893861158921,
"grad_norm": 0.23735938966274261,
"learning_rate": 2e-05,
"loss": 0.1251,
"step": 2
},
{
"epoch": 0.0034423407917383822,
"grad_norm": 0.34126752614974976,
"learning_rate": 3e-05,
"loss": 0.1768,
"step": 3
},
{
"epoch": 0.004589787722317842,
"grad_norm": 0.28753432631492615,
"learning_rate": 4e-05,
"loss": 0.163,
"step": 4
},
{
"epoch": 0.005737234652897304,
"grad_norm": 0.3324357271194458,
"learning_rate": 5e-05,
"loss": 0.154,
"step": 5
},
{
"epoch": 0.0068846815834767644,
"grad_norm": 0.39487388730049133,
"learning_rate": 6e-05,
"loss": 0.1812,
"step": 6
},
{
"epoch": 0.008032128514056224,
"grad_norm": 0.30962827801704407,
"learning_rate": 7e-05,
"loss": 0.16,
"step": 7
},
{
"epoch": 0.009179575444635685,
"grad_norm": 0.2848067283630371,
"learning_rate": 8e-05,
"loss": 0.1206,
"step": 8
},
{
"epoch": 0.010327022375215147,
"grad_norm": 0.3864418864250183,
"learning_rate": 9e-05,
"loss": 0.1463,
"step": 9
},
{
"epoch": 0.010327022375215147,
"eval_loss": 0.11602957546710968,
"eval_runtime": 41.6147,
"eval_samples_per_second": 17.638,
"eval_steps_per_second": 2.211,
"step": 9
},
{
"epoch": 0.011474469305794608,
"grad_norm": 0.3642881214618683,
"learning_rate": 0.0001,
"loss": 0.1295,
"step": 10
},
{
"epoch": 0.012621916236374068,
"grad_norm": 0.2669277489185333,
"learning_rate": 9.99695413509548e-05,
"loss": 0.0931,
"step": 11
},
{
"epoch": 0.013769363166953529,
"grad_norm": 0.26793304085731506,
"learning_rate": 9.987820251299122e-05,
"loss": 0.0807,
"step": 12
},
{
"epoch": 0.01491681009753299,
"grad_norm": 0.206680566072464,
"learning_rate": 9.972609476841367e-05,
"loss": 0.0522,
"step": 13
},
{
"epoch": 0.01606425702811245,
"grad_norm": 0.285326212644577,
"learning_rate": 9.951340343707852e-05,
"loss": 0.0604,
"step": 14
},
{
"epoch": 0.01721170395869191,
"grad_norm": 0.20402389764785767,
"learning_rate": 9.924038765061042e-05,
"loss": 0.0536,
"step": 15
},
{
"epoch": 0.01835915088927137,
"grad_norm": 0.20810100436210632,
"learning_rate": 9.890738003669029e-05,
"loss": 0.0516,
"step": 16
},
{
"epoch": 0.019506597819850834,
"grad_norm": 0.23139941692352295,
"learning_rate": 9.851478631379982e-05,
"loss": 0.055,
"step": 17
},
{
"epoch": 0.020654044750430294,
"grad_norm": 0.256534218788147,
"learning_rate": 9.806308479691595e-05,
"loss": 0.0443,
"step": 18
},
{
"epoch": 0.020654044750430294,
"eval_loss": 0.04736759513616562,
"eval_runtime": 41.8479,
"eval_samples_per_second": 17.54,
"eval_steps_per_second": 2.198,
"step": 18
},
{
"epoch": 0.021801491681009755,
"grad_norm": 0.25051283836364746,
"learning_rate": 9.755282581475769e-05,
"loss": 0.0444,
"step": 19
},
{
"epoch": 0.022948938611589215,
"grad_norm": 0.17016364634037018,
"learning_rate": 9.698463103929542e-05,
"loss": 0.0412,
"step": 20
},
{
"epoch": 0.024096385542168676,
"grad_norm": 0.1935800015926361,
"learning_rate": 9.635919272833938e-05,
"loss": 0.0393,
"step": 21
},
{
"epoch": 0.025243832472748137,
"grad_norm": 0.232066810131073,
"learning_rate": 9.567727288213005e-05,
"loss": 0.0594,
"step": 22
},
{
"epoch": 0.026391279403327597,
"grad_norm": 0.19686104357242584,
"learning_rate": 9.493970231495835e-05,
"loss": 0.0262,
"step": 23
},
{
"epoch": 0.027538726333907058,
"grad_norm": 0.21382786333560944,
"learning_rate": 9.414737964294636e-05,
"loss": 0.0294,
"step": 24
},
{
"epoch": 0.02868617326448652,
"grad_norm": 0.17204894125461578,
"learning_rate": 9.330127018922194e-05,
"loss": 0.0404,
"step": 25
},
{
"epoch": 0.02983362019506598,
"grad_norm": 0.21312940120697021,
"learning_rate": 9.24024048078213e-05,
"loss": 0.0374,
"step": 26
},
{
"epoch": 0.03098106712564544,
"grad_norm": 0.22618572413921356,
"learning_rate": 9.145187862775209e-05,
"loss": 0.0365,
"step": 27
},
{
"epoch": 0.03098106712564544,
"eval_loss": 0.035865794867277145,
"eval_runtime": 41.9598,
"eval_samples_per_second": 17.493,
"eval_steps_per_second": 2.193,
"step": 27
},
{
"epoch": 0.0321285140562249,
"grad_norm": 0.3117932081222534,
"learning_rate": 9.045084971874738e-05,
"loss": 0.032,
"step": 28
},
{
"epoch": 0.03327596098680436,
"grad_norm": 0.46760082244873047,
"learning_rate": 8.940053768033609e-05,
"loss": 0.05,
"step": 29
},
{
"epoch": 0.03442340791738382,
"grad_norm": 0.23990069329738617,
"learning_rate": 8.83022221559489e-05,
"loss": 0.0301,
"step": 30
},
{
"epoch": 0.03557085484796328,
"grad_norm": 0.2261684387922287,
"learning_rate": 8.715724127386972e-05,
"loss": 0.0329,
"step": 31
},
{
"epoch": 0.03671830177854274,
"grad_norm": 0.24604563415050507,
"learning_rate": 8.596699001693255e-05,
"loss": 0.0472,
"step": 32
},
{
"epoch": 0.0378657487091222,
"grad_norm": 0.24846573173999786,
"learning_rate": 8.473291852294987e-05,
"loss": 0.0262,
"step": 33
},
{
"epoch": 0.03901319563970167,
"grad_norm": 0.17995569109916687,
"learning_rate": 8.345653031794292e-05,
"loss": 0.0224,
"step": 34
},
{
"epoch": 0.040160642570281124,
"grad_norm": 0.20148605108261108,
"learning_rate": 8.213938048432697e-05,
"loss": 0.0374,
"step": 35
},
{
"epoch": 0.04130808950086059,
"grad_norm": 0.30979934334754944,
"learning_rate": 8.07830737662829e-05,
"loss": 0.0442,
"step": 36
},
{
"epoch": 0.04130808950086059,
"eval_loss": 0.02968555875122547,
"eval_runtime": 41.7385,
"eval_samples_per_second": 17.586,
"eval_steps_per_second": 2.204,
"step": 36
},
{
"epoch": 0.042455536431440045,
"grad_norm": 0.17189563810825348,
"learning_rate": 7.938926261462366e-05,
"loss": 0.0296,
"step": 37
},
{
"epoch": 0.04360298336201951,
"grad_norm": 0.21490830183029175,
"learning_rate": 7.795964517353735e-05,
"loss": 0.0275,
"step": 38
},
{
"epoch": 0.04475043029259897,
"grad_norm": 0.16300365328788757,
"learning_rate": 7.649596321166024e-05,
"loss": 0.0229,
"step": 39
},
{
"epoch": 0.04589787722317843,
"grad_norm": 0.19255292415618896,
"learning_rate": 7.500000000000001e-05,
"loss": 0.02,
"step": 40
},
{
"epoch": 0.04704532415375789,
"grad_norm": 0.2177131623029709,
"learning_rate": 7.347357813929454e-05,
"loss": 0.0259,
"step": 41
},
{
"epoch": 0.04819277108433735,
"grad_norm": 0.1485728919506073,
"learning_rate": 7.191855733945387e-05,
"loss": 0.0197,
"step": 42
},
{
"epoch": 0.04934021801491681,
"grad_norm": 0.1863524615764618,
"learning_rate": 7.033683215379002e-05,
"loss": 0.0288,
"step": 43
},
{
"epoch": 0.05048766494549627,
"grad_norm": 0.2662622034549713,
"learning_rate": 6.873032967079561e-05,
"loss": 0.0342,
"step": 44
},
{
"epoch": 0.05163511187607573,
"grad_norm": 0.1945541799068451,
"learning_rate": 6.710100716628344e-05,
"loss": 0.0282,
"step": 45
},
{
"epoch": 0.05163511187607573,
"eval_loss": 0.026803946122527122,
"eval_runtime": 41.84,
"eval_samples_per_second": 17.543,
"eval_steps_per_second": 2.199,
"step": 45
},
{
"epoch": 0.052782558806655194,
"grad_norm": 0.18604616820812225,
"learning_rate": 6.545084971874738e-05,
"loss": 0.0327,
"step": 46
},
{
"epoch": 0.05393000573723465,
"grad_norm": 0.18346279859542847,
"learning_rate": 6.378186779084995e-05,
"loss": 0.0252,
"step": 47
},
{
"epoch": 0.055077452667814115,
"grad_norm": 0.11086254566907883,
"learning_rate": 6.209609477998338e-05,
"loss": 0.02,
"step": 48
},
{
"epoch": 0.05622489959839357,
"grad_norm": 0.1600019931793213,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.0261,
"step": 49
},
{
"epoch": 0.05737234652897304,
"grad_norm": 0.19246770441532135,
"learning_rate": 5.868240888334653e-05,
"loss": 0.0247,
"step": 50
},
{
"epoch": 0.058519793459552494,
"grad_norm": 0.18079611659049988,
"learning_rate": 5.695865504800327e-05,
"loss": 0.0215,
"step": 51
},
{
"epoch": 0.05966724039013196,
"grad_norm": 0.12951500713825226,
"learning_rate": 5.522642316338268e-05,
"loss": 0.0297,
"step": 52
},
{
"epoch": 0.060814687320711415,
"grad_norm": 0.12222294509410858,
"learning_rate": 5.348782368720626e-05,
"loss": 0.0202,
"step": 53
},
{
"epoch": 0.06196213425129088,
"grad_norm": 0.13354460895061493,
"learning_rate": 5.174497483512506e-05,
"loss": 0.0278,
"step": 54
},
{
"epoch": 0.06196213425129088,
"eval_loss": 0.025066262111067772,
"eval_runtime": 41.5419,
"eval_samples_per_second": 17.669,
"eval_steps_per_second": 2.215,
"step": 54
},
{
"epoch": 0.06310958118187034,
"grad_norm": 0.18413366377353668,
"learning_rate": 5e-05,
"loss": 0.0195,
"step": 55
},
{
"epoch": 0.0642570281124498,
"grad_norm": 0.25024259090423584,
"learning_rate": 4.825502516487497e-05,
"loss": 0.0287,
"step": 56
},
{
"epoch": 0.06540447504302926,
"grad_norm": 0.12252403795719147,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.0268,
"step": 57
},
{
"epoch": 0.06655192197360872,
"grad_norm": 0.16308040916919708,
"learning_rate": 4.477357683661734e-05,
"loss": 0.0279,
"step": 58
},
{
"epoch": 0.06769936890418818,
"grad_norm": 0.13704562187194824,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.0157,
"step": 59
},
{
"epoch": 0.06884681583476764,
"grad_norm": 0.12924720346927643,
"learning_rate": 4.131759111665349e-05,
"loss": 0.0219,
"step": 60
},
{
"epoch": 0.0699942627653471,
"grad_norm": 0.12658867239952087,
"learning_rate": 3.960441545911204e-05,
"loss": 0.02,
"step": 61
},
{
"epoch": 0.07114170969592656,
"grad_norm": 0.22576895356178284,
"learning_rate": 3.790390522001662e-05,
"loss": 0.0258,
"step": 62
},
{
"epoch": 0.07228915662650602,
"grad_norm": 0.15099763870239258,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.0212,
"step": 63
},
{
"epoch": 0.07228915662650602,
"eval_loss": 0.024402011185884476,
"eval_runtime": 41.4179,
"eval_samples_per_second": 17.722,
"eval_steps_per_second": 2.221,
"step": 63
},
{
"epoch": 0.07343660355708548,
"grad_norm": 0.1566763073205948,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.0128,
"step": 64
},
{
"epoch": 0.07458405048766495,
"grad_norm": 0.1659335196018219,
"learning_rate": 3.289899283371657e-05,
"loss": 0.0175,
"step": 65
},
{
"epoch": 0.0757314974182444,
"grad_norm": 0.210612952709198,
"learning_rate": 3.12696703292044e-05,
"loss": 0.0272,
"step": 66
},
{
"epoch": 0.07687894434882386,
"grad_norm": 0.13868023455142975,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.0214,
"step": 67
},
{
"epoch": 0.07802639127940333,
"grad_norm": 0.1765725314617157,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.0265,
"step": 68
},
{
"epoch": 0.07917383820998279,
"grad_norm": 0.24902477860450745,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.0297,
"step": 69
},
{
"epoch": 0.08032128514056225,
"grad_norm": 0.29836687445640564,
"learning_rate": 2.500000000000001e-05,
"loss": 0.0404,
"step": 70
},
{
"epoch": 0.0814687320711417,
"grad_norm": 0.19871705770492554,
"learning_rate": 2.350403678833976e-05,
"loss": 0.026,
"step": 71
},
{
"epoch": 0.08261617900172118,
"grad_norm": 0.19763556122779846,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.0242,
"step": 72
},
{
"epoch": 0.08261617900172118,
"eval_loss": 0.023439526557922363,
"eval_runtime": 41.6199,
"eval_samples_per_second": 17.636,
"eval_steps_per_second": 2.21,
"step": 72
},
{
"epoch": 0.08376362593230063,
"grad_norm": 0.19204814732074738,
"learning_rate": 2.061073738537635e-05,
"loss": 0.0244,
"step": 73
},
{
"epoch": 0.08491107286288009,
"grad_norm": 0.09625480324029922,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.0158,
"step": 74
},
{
"epoch": 0.08605851979345955,
"grad_norm": 0.2161574512720108,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.0282,
"step": 75
},
{
"epoch": 0.08720596672403902,
"grad_norm": 0.15878649055957794,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.0163,
"step": 76
},
{
"epoch": 0.08835341365461848,
"grad_norm": 0.1350821554660797,
"learning_rate": 1.526708147705013e-05,
"loss": 0.0142,
"step": 77
},
{
"epoch": 0.08950086058519793,
"grad_norm": 0.23871316015720367,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.0273,
"step": 78
},
{
"epoch": 0.09064830751577739,
"grad_norm": 0.1733366698026657,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.0254,
"step": 79
},
{
"epoch": 0.09179575444635686,
"grad_norm": 0.2403794825077057,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.0262,
"step": 80
},
{
"epoch": 0.09294320137693632,
"grad_norm": 0.2295757383108139,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.0312,
"step": 81
},
{
"epoch": 0.09294320137693632,
"eval_loss": 0.022903697565197945,
"eval_runtime": 41.322,
"eval_samples_per_second": 17.763,
"eval_steps_per_second": 2.226,
"step": 81
},
{
"epoch": 0.09409064830751578,
"grad_norm": 0.17668524384498596,
"learning_rate": 9.549150281252633e-06,
"loss": 0.019,
"step": 82
},
{
"epoch": 0.09523809523809523,
"grad_norm": 0.21054485440254211,
"learning_rate": 8.548121372247918e-06,
"loss": 0.0203,
"step": 83
},
{
"epoch": 0.0963855421686747,
"grad_norm": 0.11086580157279968,
"learning_rate": 7.597595192178702e-06,
"loss": 0.0166,
"step": 84
},
{
"epoch": 0.09753298909925416,
"grad_norm": 0.16344071924686432,
"learning_rate": 6.698729810778065e-06,
"loss": 0.0274,
"step": 85
},
{
"epoch": 0.09868043602983362,
"grad_norm": 0.2489679902791977,
"learning_rate": 5.852620357053651e-06,
"loss": 0.0383,
"step": 86
},
{
"epoch": 0.09982788296041308,
"grad_norm": 0.18206056952476501,
"learning_rate": 5.060297685041659e-06,
"loss": 0.0257,
"step": 87
},
{
"epoch": 0.10097532989099255,
"grad_norm": 0.407734215259552,
"learning_rate": 4.322727117869951e-06,
"loss": 0.0322,
"step": 88
},
{
"epoch": 0.102122776821572,
"grad_norm": 0.30174148082733154,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.022,
"step": 89
},
{
"epoch": 0.10327022375215146,
"grad_norm": 0.17461110651493073,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.0224,
"step": 90
},
{
"epoch": 0.10327022375215146,
"eval_loss": 0.022746024653315544,
"eval_runtime": 42.4871,
"eval_samples_per_second": 17.276,
"eval_steps_per_second": 2.165,
"step": 90
},
{
"epoch": 0.10441767068273092,
"grad_norm": 0.18326325714588165,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.0138,
"step": 91
},
{
"epoch": 0.10556511761331039,
"grad_norm": 0.12514610588550568,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.014,
"step": 92
},
{
"epoch": 0.10671256454388985,
"grad_norm": 0.25040769577026367,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.0246,
"step": 93
},
{
"epoch": 0.1078600114744693,
"grad_norm": 0.19364497065544128,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.0211,
"step": 94
},
{
"epoch": 0.10900745840504876,
"grad_norm": 0.15431857109069824,
"learning_rate": 7.596123493895991e-07,
"loss": 0.0268,
"step": 95
},
{
"epoch": 0.11015490533562823,
"grad_norm": 0.28512442111968994,
"learning_rate": 4.865965629214819e-07,
"loss": 0.0514,
"step": 96
},
{
"epoch": 0.11130235226620769,
"grad_norm": 0.15576301515102386,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.0255,
"step": 97
},
{
"epoch": 0.11244979919678715,
"grad_norm": 0.24590234458446503,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.0286,
"step": 98
},
{
"epoch": 0.11359724612736662,
"grad_norm": 0.18634286522865295,
"learning_rate": 3.04586490452119e-08,
"loss": 0.0212,
"step": 99
},
{
"epoch": 0.11359724612736662,
"eval_loss": 0.022740166634321213,
"eval_runtime": 41.5125,
"eval_samples_per_second": 17.681,
"eval_steps_per_second": 2.216,
"step": 99
},
{
"epoch": 0.11474469305794607,
"grad_norm": 0.19473059475421906,
"learning_rate": 0.0,
"loss": 0.025,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.6402897554112512e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}