lesso02's picture
Training in progress, step 100, checkpoint
cd30f6f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.4319654427645788,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004319654427645789,
"grad_norm": 0.9524998068809509,
"learning_rate": 1e-05,
"loss": 0.2163,
"step": 1
},
{
"epoch": 0.004319654427645789,
"eval_loss": 0.2543673515319824,
"eval_runtime": 44.0589,
"eval_samples_per_second": 4.426,
"eval_steps_per_second": 0.567,
"step": 1
},
{
"epoch": 0.008639308855291577,
"grad_norm": 1.4569298028945923,
"learning_rate": 2e-05,
"loss": 0.2263,
"step": 2
},
{
"epoch": 0.012958963282937365,
"grad_norm": 1.389264702796936,
"learning_rate": 3e-05,
"loss": 0.2592,
"step": 3
},
{
"epoch": 0.017278617710583154,
"grad_norm": 0.33641600608825684,
"learning_rate": 4e-05,
"loss": 0.1356,
"step": 4
},
{
"epoch": 0.02159827213822894,
"grad_norm": 0.5771126747131348,
"learning_rate": 5e-05,
"loss": 0.1613,
"step": 5
},
{
"epoch": 0.02591792656587473,
"grad_norm": 1.5870953798294067,
"learning_rate": 6e-05,
"loss": 0.3035,
"step": 6
},
{
"epoch": 0.03023758099352052,
"grad_norm": 0.6865023374557495,
"learning_rate": 7e-05,
"loss": 0.122,
"step": 7
},
{
"epoch": 0.03455723542116631,
"grad_norm": 0.5479922890663147,
"learning_rate": 8e-05,
"loss": 0.0986,
"step": 8
},
{
"epoch": 0.038876889848812095,
"grad_norm": 0.6865485906600952,
"learning_rate": 9e-05,
"loss": 0.1016,
"step": 9
},
{
"epoch": 0.038876889848812095,
"eval_loss": 0.07106531411409378,
"eval_runtime": 42.6177,
"eval_samples_per_second": 4.576,
"eval_steps_per_second": 0.587,
"step": 9
},
{
"epoch": 0.04319654427645788,
"grad_norm": 0.37363576889038086,
"learning_rate": 0.0001,
"loss": 0.0888,
"step": 10
},
{
"epoch": 0.047516198704103674,
"grad_norm": 0.28562089800834656,
"learning_rate": 9.99695413509548e-05,
"loss": 0.0604,
"step": 11
},
{
"epoch": 0.05183585313174946,
"grad_norm": 0.23990653455257416,
"learning_rate": 9.987820251299122e-05,
"loss": 0.0482,
"step": 12
},
{
"epoch": 0.056155507559395246,
"grad_norm": 0.39102837443351746,
"learning_rate": 9.972609476841367e-05,
"loss": 0.0524,
"step": 13
},
{
"epoch": 0.06047516198704104,
"grad_norm": 1.527226209640503,
"learning_rate": 9.951340343707852e-05,
"loss": 0.0633,
"step": 14
},
{
"epoch": 0.06479481641468683,
"grad_norm": 0.4168100953102112,
"learning_rate": 9.924038765061042e-05,
"loss": 0.0409,
"step": 15
},
{
"epoch": 0.06911447084233262,
"grad_norm": 0.263187050819397,
"learning_rate": 9.890738003669029e-05,
"loss": 0.0261,
"step": 16
},
{
"epoch": 0.0734341252699784,
"grad_norm": 0.28248730301856995,
"learning_rate": 9.851478631379982e-05,
"loss": 0.0258,
"step": 17
},
{
"epoch": 0.07775377969762419,
"grad_norm": 0.20124828815460205,
"learning_rate": 9.806308479691595e-05,
"loss": 0.0309,
"step": 18
},
{
"epoch": 0.07775377969762419,
"eval_loss": 0.02298855595290661,
"eval_runtime": 42.6058,
"eval_samples_per_second": 4.577,
"eval_steps_per_second": 0.587,
"step": 18
},
{
"epoch": 0.08207343412526998,
"grad_norm": 0.17783138155937195,
"learning_rate": 9.755282581475769e-05,
"loss": 0.0225,
"step": 19
},
{
"epoch": 0.08639308855291576,
"grad_norm": 0.20173536241054535,
"learning_rate": 9.698463103929542e-05,
"loss": 0.022,
"step": 20
},
{
"epoch": 0.09071274298056156,
"grad_norm": 0.1374434232711792,
"learning_rate": 9.635919272833938e-05,
"loss": 0.0227,
"step": 21
},
{
"epoch": 0.09503239740820735,
"grad_norm": 0.1510484218597412,
"learning_rate": 9.567727288213005e-05,
"loss": 0.0207,
"step": 22
},
{
"epoch": 0.09935205183585313,
"grad_norm": 0.2596878409385681,
"learning_rate": 9.493970231495835e-05,
"loss": 0.0244,
"step": 23
},
{
"epoch": 0.10367170626349892,
"grad_norm": 0.26210731267929077,
"learning_rate": 9.414737964294636e-05,
"loss": 0.0163,
"step": 24
},
{
"epoch": 0.1079913606911447,
"grad_norm": 0.20492668449878693,
"learning_rate": 9.330127018922194e-05,
"loss": 0.0285,
"step": 25
},
{
"epoch": 0.11231101511879049,
"grad_norm": 0.14693544805049896,
"learning_rate": 9.24024048078213e-05,
"loss": 0.015,
"step": 26
},
{
"epoch": 0.11663066954643629,
"grad_norm": 0.07715924829244614,
"learning_rate": 9.145187862775209e-05,
"loss": 0.0086,
"step": 27
},
{
"epoch": 0.11663066954643629,
"eval_loss": 0.011002009734511375,
"eval_runtime": 42.615,
"eval_samples_per_second": 4.576,
"eval_steps_per_second": 0.587,
"step": 27
},
{
"epoch": 0.12095032397408208,
"grad_norm": 0.14009982347488403,
"learning_rate": 9.045084971874738e-05,
"loss": 0.0083,
"step": 28
},
{
"epoch": 0.12526997840172785,
"grad_norm": 0.09885896742343903,
"learning_rate": 8.940053768033609e-05,
"loss": 0.0094,
"step": 29
},
{
"epoch": 0.12958963282937366,
"grad_norm": 0.5052633881568909,
"learning_rate": 8.83022221559489e-05,
"loss": 0.0134,
"step": 30
},
{
"epoch": 0.13390928725701945,
"grad_norm": 0.11304973810911179,
"learning_rate": 8.715724127386972e-05,
"loss": 0.0151,
"step": 31
},
{
"epoch": 0.13822894168466524,
"grad_norm": 0.09011288732290268,
"learning_rate": 8.596699001693255e-05,
"loss": 0.0083,
"step": 32
},
{
"epoch": 0.14254859611231102,
"grad_norm": 0.11702302098274231,
"learning_rate": 8.473291852294987e-05,
"loss": 0.0093,
"step": 33
},
{
"epoch": 0.1468682505399568,
"grad_norm": 0.09262354671955109,
"learning_rate": 8.345653031794292e-05,
"loss": 0.0117,
"step": 34
},
{
"epoch": 0.1511879049676026,
"grad_norm": 0.11039795726537704,
"learning_rate": 8.213938048432697e-05,
"loss": 0.0173,
"step": 35
},
{
"epoch": 0.15550755939524838,
"grad_norm": 0.18931828439235687,
"learning_rate": 8.07830737662829e-05,
"loss": 0.0053,
"step": 36
},
{
"epoch": 0.15550755939524838,
"eval_loss": 0.00827693473547697,
"eval_runtime": 42.6233,
"eval_samples_per_second": 4.575,
"eval_steps_per_second": 0.587,
"step": 36
},
{
"epoch": 0.15982721382289417,
"grad_norm": 0.1090330183506012,
"learning_rate": 7.938926261462366e-05,
"loss": 0.0073,
"step": 37
},
{
"epoch": 0.16414686825053995,
"grad_norm": 0.08737006038427353,
"learning_rate": 7.795964517353735e-05,
"loss": 0.0071,
"step": 38
},
{
"epoch": 0.16846652267818574,
"grad_norm": 0.13259300589561462,
"learning_rate": 7.649596321166024e-05,
"loss": 0.0076,
"step": 39
},
{
"epoch": 0.17278617710583152,
"grad_norm": 0.22442105412483215,
"learning_rate": 7.500000000000001e-05,
"loss": 0.0122,
"step": 40
},
{
"epoch": 0.1771058315334773,
"grad_norm": 0.08325769752264023,
"learning_rate": 7.347357813929454e-05,
"loss": 0.0048,
"step": 41
},
{
"epoch": 0.18142548596112312,
"grad_norm": 0.12202583253383636,
"learning_rate": 7.191855733945387e-05,
"loss": 0.0055,
"step": 42
},
{
"epoch": 0.1857451403887689,
"grad_norm": 0.0481962226331234,
"learning_rate": 7.033683215379002e-05,
"loss": 0.0046,
"step": 43
},
{
"epoch": 0.1900647948164147,
"grad_norm": 0.06810636073350906,
"learning_rate": 6.873032967079561e-05,
"loss": 0.0056,
"step": 44
},
{
"epoch": 0.19438444924406048,
"grad_norm": 0.07894303649663925,
"learning_rate": 6.710100716628344e-05,
"loss": 0.0055,
"step": 45
},
{
"epoch": 0.19438444924406048,
"eval_loss": 0.005382665432989597,
"eval_runtime": 42.6181,
"eval_samples_per_second": 4.576,
"eval_steps_per_second": 0.587,
"step": 45
},
{
"epoch": 0.19870410367170627,
"grad_norm": 0.0615725964307785,
"learning_rate": 6.545084971874738e-05,
"loss": 0.0054,
"step": 46
},
{
"epoch": 0.20302375809935205,
"grad_norm": 0.24329596757888794,
"learning_rate": 6.378186779084995e-05,
"loss": 0.0132,
"step": 47
},
{
"epoch": 0.20734341252699784,
"grad_norm": 0.0349898636341095,
"learning_rate": 6.209609477998338e-05,
"loss": 0.0034,
"step": 48
},
{
"epoch": 0.21166306695464362,
"grad_norm": 0.05601835995912552,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.0062,
"step": 49
},
{
"epoch": 0.2159827213822894,
"grad_norm": 0.06486399471759796,
"learning_rate": 5.868240888334653e-05,
"loss": 0.0043,
"step": 50
},
{
"epoch": 0.2203023758099352,
"grad_norm": 0.1450735628604889,
"learning_rate": 5.695865504800327e-05,
"loss": 0.0079,
"step": 51
},
{
"epoch": 0.22462203023758098,
"grad_norm": 0.09253419190645218,
"learning_rate": 5.522642316338268e-05,
"loss": 0.0064,
"step": 52
},
{
"epoch": 0.22894168466522677,
"grad_norm": 0.1478482335805893,
"learning_rate": 5.348782368720626e-05,
"loss": 0.0056,
"step": 53
},
{
"epoch": 0.23326133909287258,
"grad_norm": 0.0666055679321289,
"learning_rate": 5.174497483512506e-05,
"loss": 0.0034,
"step": 54
},
{
"epoch": 0.23326133909287258,
"eval_loss": 0.0044831158593297005,
"eval_runtime": 42.6508,
"eval_samples_per_second": 4.572,
"eval_steps_per_second": 0.586,
"step": 54
},
{
"epoch": 0.23758099352051837,
"grad_norm": 0.05718037858605385,
"learning_rate": 5e-05,
"loss": 0.0036,
"step": 55
},
{
"epoch": 0.24190064794816415,
"grad_norm": 0.047772157937288284,
"learning_rate": 4.825502516487497e-05,
"loss": 0.0033,
"step": 56
},
{
"epoch": 0.24622030237580994,
"grad_norm": 0.1433199644088745,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.0066,
"step": 57
},
{
"epoch": 0.2505399568034557,
"grad_norm": 0.14454364776611328,
"learning_rate": 4.477357683661734e-05,
"loss": 0.003,
"step": 58
},
{
"epoch": 0.2548596112311015,
"grad_norm": 0.0677032396197319,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.0022,
"step": 59
},
{
"epoch": 0.2591792656587473,
"grad_norm": 0.056752149015665054,
"learning_rate": 4.131759111665349e-05,
"loss": 0.0053,
"step": 60
},
{
"epoch": 0.2634989200863931,
"grad_norm": 0.05664772540330887,
"learning_rate": 3.960441545911204e-05,
"loss": 0.0027,
"step": 61
},
{
"epoch": 0.2678185745140389,
"grad_norm": 0.0625712126493454,
"learning_rate": 3.790390522001662e-05,
"loss": 0.0045,
"step": 62
},
{
"epoch": 0.27213822894168466,
"grad_norm": 0.08698461949825287,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.0039,
"step": 63
},
{
"epoch": 0.27213822894168466,
"eval_loss": 0.0033045976888388395,
"eval_runtime": 42.6105,
"eval_samples_per_second": 4.576,
"eval_steps_per_second": 0.587,
"step": 63
},
{
"epoch": 0.27645788336933047,
"grad_norm": 0.31703194975852966,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.0054,
"step": 64
},
{
"epoch": 0.28077753779697623,
"grad_norm": 0.057455211877822876,
"learning_rate": 3.289899283371657e-05,
"loss": 0.0045,
"step": 65
},
{
"epoch": 0.28509719222462204,
"grad_norm": 0.043769922107458115,
"learning_rate": 3.12696703292044e-05,
"loss": 0.0022,
"step": 66
},
{
"epoch": 0.2894168466522678,
"grad_norm": 0.06640162318944931,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.0022,
"step": 67
},
{
"epoch": 0.2937365010799136,
"grad_norm": 0.18320414423942566,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.002,
"step": 68
},
{
"epoch": 0.2980561555075594,
"grad_norm": 0.05843820422887802,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.0038,
"step": 69
},
{
"epoch": 0.3023758099352052,
"grad_norm": 0.041529346257448196,
"learning_rate": 2.500000000000001e-05,
"loss": 0.0043,
"step": 70
},
{
"epoch": 0.30669546436285094,
"grad_norm": 0.0774838998913765,
"learning_rate": 2.350403678833976e-05,
"loss": 0.0042,
"step": 71
},
{
"epoch": 0.31101511879049676,
"grad_norm": 0.02668030932545662,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.0016,
"step": 72
},
{
"epoch": 0.31101511879049676,
"eval_loss": 0.0033292018342763186,
"eval_runtime": 42.6419,
"eval_samples_per_second": 4.573,
"eval_steps_per_second": 0.586,
"step": 72
},
{
"epoch": 0.31533477321814257,
"grad_norm": 0.062281470745801926,
"learning_rate": 2.061073738537635e-05,
"loss": 0.0051,
"step": 73
},
{
"epoch": 0.31965442764578833,
"grad_norm": 0.03590645641088486,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.0024,
"step": 74
},
{
"epoch": 0.32397408207343414,
"grad_norm": 0.08888695389032364,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.0049,
"step": 75
},
{
"epoch": 0.3282937365010799,
"grad_norm": 0.0329754464328289,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.0014,
"step": 76
},
{
"epoch": 0.3326133909287257,
"grad_norm": 0.06644655019044876,
"learning_rate": 1.526708147705013e-05,
"loss": 0.005,
"step": 77
},
{
"epoch": 0.3369330453563715,
"grad_norm": 0.071534663438797,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.0032,
"step": 78
},
{
"epoch": 0.3412526997840173,
"grad_norm": 0.048516895622015,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.0039,
"step": 79
},
{
"epoch": 0.34557235421166305,
"grad_norm": 0.05907434597611427,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.0029,
"step": 80
},
{
"epoch": 0.34989200863930886,
"grad_norm": 0.04661243036389351,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.0039,
"step": 81
},
{
"epoch": 0.34989200863930886,
"eval_loss": 0.0029163085855543613,
"eval_runtime": 42.6384,
"eval_samples_per_second": 4.573,
"eval_steps_per_second": 0.586,
"step": 81
},
{
"epoch": 0.3542116630669546,
"grad_norm": 0.04970655217766762,
"learning_rate": 9.549150281252633e-06,
"loss": 0.0035,
"step": 82
},
{
"epoch": 0.35853131749460043,
"grad_norm": 0.05986812338232994,
"learning_rate": 8.548121372247918e-06,
"loss": 0.0069,
"step": 83
},
{
"epoch": 0.36285097192224625,
"grad_norm": 0.04365411773324013,
"learning_rate": 7.597595192178702e-06,
"loss": 0.0029,
"step": 84
},
{
"epoch": 0.367170626349892,
"grad_norm": 0.13277940452098846,
"learning_rate": 6.698729810778065e-06,
"loss": 0.0056,
"step": 85
},
{
"epoch": 0.3714902807775378,
"grad_norm": 0.05331737548112869,
"learning_rate": 5.852620357053651e-06,
"loss": 0.0032,
"step": 86
},
{
"epoch": 0.3758099352051836,
"grad_norm": 0.034497883170843124,
"learning_rate": 5.060297685041659e-06,
"loss": 0.0034,
"step": 87
},
{
"epoch": 0.3801295896328294,
"grad_norm": 0.024258598685264587,
"learning_rate": 4.322727117869951e-06,
"loss": 0.0021,
"step": 88
},
{
"epoch": 0.38444924406047515,
"grad_norm": 0.06567387282848358,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.0071,
"step": 89
},
{
"epoch": 0.38876889848812096,
"grad_norm": 0.05820886418223381,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.004,
"step": 90
},
{
"epoch": 0.38876889848812096,
"eval_loss": 0.002866251626983285,
"eval_runtime": 42.5996,
"eval_samples_per_second": 4.578,
"eval_steps_per_second": 0.587,
"step": 90
},
{
"epoch": 0.3930885529157667,
"grad_norm": 0.07091183215379715,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.0051,
"step": 91
},
{
"epoch": 0.39740820734341253,
"grad_norm": 0.07214067131280899,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.0031,
"step": 92
},
{
"epoch": 0.4017278617710583,
"grad_norm": 0.047936685383319855,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.0033,
"step": 93
},
{
"epoch": 0.4060475161987041,
"grad_norm": 0.04225027561187744,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.0041,
"step": 94
},
{
"epoch": 0.4103671706263499,
"grad_norm": 0.13431866466999054,
"learning_rate": 7.596123493895991e-07,
"loss": 0.0038,
"step": 95
},
{
"epoch": 0.4146868250539957,
"grad_norm": 0.07035449892282486,
"learning_rate": 4.865965629214819e-07,
"loss": 0.0038,
"step": 96
},
{
"epoch": 0.4190064794816415,
"grad_norm": 0.05573286861181259,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.0036,
"step": 97
},
{
"epoch": 0.42332613390928725,
"grad_norm": 0.09693485498428345,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.0048,
"step": 98
},
{
"epoch": 0.42764578833693306,
"grad_norm": 0.02684597484767437,
"learning_rate": 3.04586490452119e-08,
"loss": 0.0014,
"step": 99
},
{
"epoch": 0.42764578833693306,
"eval_loss": 0.002881319960579276,
"eval_runtime": 42.5998,
"eval_samples_per_second": 4.577,
"eval_steps_per_second": 0.587,
"step": 99
},
{
"epoch": 0.4319654427645788,
"grad_norm": 0.02570461854338646,
"learning_rate": 0.0,
"loss": 0.0019,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4652273850318848e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}