lesso01's picture
Training in progress, step 100, checkpoint
a1579ce verified
raw
history blame
20.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6006006006006006,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006006006006006006,
"grad_norm": 0.4190921187400818,
"learning_rate": 1e-05,
"loss": 2.8248,
"step": 1
},
{
"epoch": 0.006006006006006006,
"eval_loss": 1.4474337100982666,
"eval_runtime": 20.7102,
"eval_samples_per_second": 6.76,
"eval_steps_per_second": 0.869,
"step": 1
},
{
"epoch": 0.012012012012012012,
"grad_norm": 0.4117864668369293,
"learning_rate": 2e-05,
"loss": 2.8999,
"step": 2
},
{
"epoch": 0.018018018018018018,
"grad_norm": 0.47476091980934143,
"learning_rate": 3e-05,
"loss": 2.9642,
"step": 3
},
{
"epoch": 0.024024024024024024,
"grad_norm": 0.5182833671569824,
"learning_rate": 4e-05,
"loss": 3.0016,
"step": 4
},
{
"epoch": 0.03003003003003003,
"grad_norm": 0.5595167279243469,
"learning_rate": 5e-05,
"loss": 2.9978,
"step": 5
},
{
"epoch": 0.036036036036036036,
"grad_norm": 0.4953429400920868,
"learning_rate": 6e-05,
"loss": 2.9403,
"step": 6
},
{
"epoch": 0.042042042042042045,
"grad_norm": 0.5607187747955322,
"learning_rate": 7e-05,
"loss": 2.8352,
"step": 7
},
{
"epoch": 0.04804804804804805,
"grad_norm": 0.5155525803565979,
"learning_rate": 8e-05,
"loss": 2.8618,
"step": 8
},
{
"epoch": 0.05405405405405406,
"grad_norm": 0.5875859260559082,
"learning_rate": 9e-05,
"loss": 2.7985,
"step": 9
},
{
"epoch": 0.05405405405405406,
"eval_loss": 1.3532243967056274,
"eval_runtime": 20.8925,
"eval_samples_per_second": 6.701,
"eval_steps_per_second": 0.862,
"step": 9
},
{
"epoch": 0.06006006006006006,
"grad_norm": 0.5471853613853455,
"learning_rate": 0.0001,
"loss": 2.7472,
"step": 10
},
{
"epoch": 0.06606606606606606,
"grad_norm": 0.6831285953521729,
"learning_rate": 9.99695413509548e-05,
"loss": 2.7426,
"step": 11
},
{
"epoch": 0.07207207207207207,
"grad_norm": 0.5598995089530945,
"learning_rate": 9.987820251299122e-05,
"loss": 2.4549,
"step": 12
},
{
"epoch": 0.07807807807807808,
"grad_norm": 0.6646794080734253,
"learning_rate": 9.972609476841367e-05,
"loss": 2.4884,
"step": 13
},
{
"epoch": 0.08408408408408409,
"grad_norm": 0.8190558552742004,
"learning_rate": 9.951340343707852e-05,
"loss": 2.5538,
"step": 14
},
{
"epoch": 0.09009009009009009,
"grad_norm": 0.6523075699806213,
"learning_rate": 9.924038765061042e-05,
"loss": 2.6107,
"step": 15
},
{
"epoch": 0.0960960960960961,
"grad_norm": 0.6743414402008057,
"learning_rate": 9.890738003669029e-05,
"loss": 2.4982,
"step": 16
},
{
"epoch": 0.1021021021021021,
"grad_norm": 0.6463999152183533,
"learning_rate": 9.851478631379982e-05,
"loss": 2.3463,
"step": 17
},
{
"epoch": 0.10810810810810811,
"grad_norm": 0.6486225724220276,
"learning_rate": 9.806308479691595e-05,
"loss": 2.4509,
"step": 18
},
{
"epoch": 0.10810810810810811,
"eval_loss": 1.1851264238357544,
"eval_runtime": 20.8737,
"eval_samples_per_second": 6.707,
"eval_steps_per_second": 0.862,
"step": 18
},
{
"epoch": 0.11411411411411411,
"grad_norm": 0.6646873354911804,
"learning_rate": 9.755282581475769e-05,
"loss": 2.5231,
"step": 19
},
{
"epoch": 0.12012012012012012,
"grad_norm": 0.6953087449073792,
"learning_rate": 9.698463103929542e-05,
"loss": 2.3943,
"step": 20
},
{
"epoch": 0.12612612612612611,
"grad_norm": 0.6595270037651062,
"learning_rate": 9.635919272833938e-05,
"loss": 2.3606,
"step": 21
},
{
"epoch": 0.13213213213213212,
"grad_norm": 0.64484703540802,
"learning_rate": 9.567727288213005e-05,
"loss": 2.3778,
"step": 22
},
{
"epoch": 0.13813813813813813,
"grad_norm": 0.8236026763916016,
"learning_rate": 9.493970231495835e-05,
"loss": 2.2202,
"step": 23
},
{
"epoch": 0.14414414414414414,
"grad_norm": 0.6265228986740112,
"learning_rate": 9.414737964294636e-05,
"loss": 2.2827,
"step": 24
},
{
"epoch": 0.15015015015015015,
"grad_norm": 0.6501413583755493,
"learning_rate": 9.330127018922194e-05,
"loss": 2.3664,
"step": 25
},
{
"epoch": 0.15615615615615616,
"grad_norm": 0.6066960096359253,
"learning_rate": 9.24024048078213e-05,
"loss": 2.2977,
"step": 26
},
{
"epoch": 0.16216216216216217,
"grad_norm": 0.682429850101471,
"learning_rate": 9.145187862775209e-05,
"loss": 2.2791,
"step": 27
},
{
"epoch": 0.16216216216216217,
"eval_loss": 1.1289162635803223,
"eval_runtime": 20.9937,
"eval_samples_per_second": 6.669,
"eval_steps_per_second": 0.857,
"step": 27
},
{
"epoch": 0.16816816816816818,
"grad_norm": 0.5853718519210815,
"learning_rate": 9.045084971874738e-05,
"loss": 2.2462,
"step": 28
},
{
"epoch": 0.17417417417417416,
"grad_norm": 0.681626558303833,
"learning_rate": 8.940053768033609e-05,
"loss": 2.2603,
"step": 29
},
{
"epoch": 0.18018018018018017,
"grad_norm": 0.6157286763191223,
"learning_rate": 8.83022221559489e-05,
"loss": 2.2548,
"step": 30
},
{
"epoch": 0.18618618618618618,
"grad_norm": 0.5568789839744568,
"learning_rate": 8.715724127386972e-05,
"loss": 2.1607,
"step": 31
},
{
"epoch": 0.1921921921921922,
"grad_norm": 0.7827078700065613,
"learning_rate": 8.596699001693255e-05,
"loss": 2.268,
"step": 32
},
{
"epoch": 0.1981981981981982,
"grad_norm": 0.6743062734603882,
"learning_rate": 8.473291852294987e-05,
"loss": 2.0744,
"step": 33
},
{
"epoch": 0.2042042042042042,
"grad_norm": 0.7264645099639893,
"learning_rate": 8.345653031794292e-05,
"loss": 2.1957,
"step": 34
},
{
"epoch": 0.21021021021021022,
"grad_norm": 0.7725199460983276,
"learning_rate": 8.213938048432697e-05,
"loss": 2.2592,
"step": 35
},
{
"epoch": 0.21621621621621623,
"grad_norm": 0.6977213025093079,
"learning_rate": 8.07830737662829e-05,
"loss": 2.1857,
"step": 36
},
{
"epoch": 0.21621621621621623,
"eval_loss": 1.095693588256836,
"eval_runtime": 20.957,
"eval_samples_per_second": 6.68,
"eval_steps_per_second": 0.859,
"step": 36
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.5948267579078674,
"learning_rate": 7.938926261462366e-05,
"loss": 2.1454,
"step": 37
},
{
"epoch": 0.22822822822822822,
"grad_norm": 0.5648744106292725,
"learning_rate": 7.795964517353735e-05,
"loss": 2.0381,
"step": 38
},
{
"epoch": 0.23423423423423423,
"grad_norm": 0.6770027279853821,
"learning_rate": 7.649596321166024e-05,
"loss": 2.2106,
"step": 39
},
{
"epoch": 0.24024024024024024,
"grad_norm": 0.6534614562988281,
"learning_rate": 7.500000000000001e-05,
"loss": 2.1378,
"step": 40
},
{
"epoch": 0.24624624624624625,
"grad_norm": 0.5991220474243164,
"learning_rate": 7.347357813929454e-05,
"loss": 2.2575,
"step": 41
},
{
"epoch": 0.25225225225225223,
"grad_norm": 0.6903526186943054,
"learning_rate": 7.191855733945387e-05,
"loss": 2.1784,
"step": 42
},
{
"epoch": 0.25825825825825827,
"grad_norm": 0.5913608074188232,
"learning_rate": 7.033683215379002e-05,
"loss": 2.1488,
"step": 43
},
{
"epoch": 0.26426426426426425,
"grad_norm": 0.6624292135238647,
"learning_rate": 6.873032967079561e-05,
"loss": 2.1639,
"step": 44
},
{
"epoch": 0.2702702702702703,
"grad_norm": 0.8672691583633423,
"learning_rate": 6.710100716628344e-05,
"loss": 2.2637,
"step": 45
},
{
"epoch": 0.2702702702702703,
"eval_loss": 1.072501540184021,
"eval_runtime": 20.9263,
"eval_samples_per_second": 6.69,
"eval_steps_per_second": 0.86,
"step": 45
},
{
"epoch": 0.27627627627627627,
"grad_norm": 0.6573371887207031,
"learning_rate": 6.545084971874738e-05,
"loss": 2.1309,
"step": 46
},
{
"epoch": 0.2822822822822823,
"grad_norm": 0.7428563833236694,
"learning_rate": 6.378186779084995e-05,
"loss": 2.1847,
"step": 47
},
{
"epoch": 0.2882882882882883,
"grad_norm": 0.5846123695373535,
"learning_rate": 6.209609477998338e-05,
"loss": 2.2138,
"step": 48
},
{
"epoch": 0.29429429429429427,
"grad_norm": 0.6415207982063293,
"learning_rate": 6.0395584540887963e-05,
"loss": 2.2448,
"step": 49
},
{
"epoch": 0.3003003003003003,
"grad_norm": 0.5973365902900696,
"learning_rate": 5.868240888334653e-05,
"loss": 2.0774,
"step": 50
},
{
"epoch": 0.3063063063063063,
"grad_norm": 0.8249204754829407,
"learning_rate": 5.695865504800327e-05,
"loss": 2.0988,
"step": 51
},
{
"epoch": 0.3123123123123123,
"grad_norm": 0.630953848361969,
"learning_rate": 5.522642316338268e-05,
"loss": 2.0808,
"step": 52
},
{
"epoch": 0.3183183183183183,
"grad_norm": 0.6519582867622375,
"learning_rate": 5.348782368720626e-05,
"loss": 2.0949,
"step": 53
},
{
"epoch": 0.32432432432432434,
"grad_norm": 0.6942300200462341,
"learning_rate": 5.174497483512506e-05,
"loss": 2.1692,
"step": 54
},
{
"epoch": 0.32432432432432434,
"eval_loss": 1.0634726285934448,
"eval_runtime": 20.9032,
"eval_samples_per_second": 6.698,
"eval_steps_per_second": 0.861,
"step": 54
},
{
"epoch": 0.3303303303303303,
"grad_norm": 0.6861055493354797,
"learning_rate": 5e-05,
"loss": 2.1522,
"step": 55
},
{
"epoch": 0.33633633633633636,
"grad_norm": 0.776831865310669,
"learning_rate": 4.825502516487497e-05,
"loss": 2.1213,
"step": 56
},
{
"epoch": 0.34234234234234234,
"grad_norm": 0.747948944568634,
"learning_rate": 4.6512176312793736e-05,
"loss": 2.3263,
"step": 57
},
{
"epoch": 0.3483483483483483,
"grad_norm": 0.7790858149528503,
"learning_rate": 4.477357683661734e-05,
"loss": 2.1892,
"step": 58
},
{
"epoch": 0.35435435435435436,
"grad_norm": 0.6770322918891907,
"learning_rate": 4.3041344951996746e-05,
"loss": 2.1512,
"step": 59
},
{
"epoch": 0.36036036036036034,
"grad_norm": 0.7639994621276855,
"learning_rate": 4.131759111665349e-05,
"loss": 2.1758,
"step": 60
},
{
"epoch": 0.3663663663663664,
"grad_norm": 0.7350506782531738,
"learning_rate": 3.960441545911204e-05,
"loss": 2.2394,
"step": 61
},
{
"epoch": 0.37237237237237236,
"grad_norm": 0.7804369926452637,
"learning_rate": 3.790390522001662e-05,
"loss": 2.0869,
"step": 62
},
{
"epoch": 0.3783783783783784,
"grad_norm": 0.6832085847854614,
"learning_rate": 3.6218132209150045e-05,
"loss": 2.1623,
"step": 63
},
{
"epoch": 0.3783783783783784,
"eval_loss": 1.0578171014785767,
"eval_runtime": 20.8929,
"eval_samples_per_second": 6.701,
"eval_steps_per_second": 0.862,
"step": 63
},
{
"epoch": 0.3843843843843844,
"grad_norm": 0.6266286969184875,
"learning_rate": 3.4549150281252636e-05,
"loss": 1.9084,
"step": 64
},
{
"epoch": 0.39039039039039036,
"grad_norm": 0.7004335522651672,
"learning_rate": 3.289899283371657e-05,
"loss": 2.0813,
"step": 65
},
{
"epoch": 0.3963963963963964,
"grad_norm": 0.7086822390556335,
"learning_rate": 3.12696703292044e-05,
"loss": 2.0428,
"step": 66
},
{
"epoch": 0.4024024024024024,
"grad_norm": 0.6715360879898071,
"learning_rate": 2.9663167846209998e-05,
"loss": 2.237,
"step": 67
},
{
"epoch": 0.4084084084084084,
"grad_norm": 0.681358814239502,
"learning_rate": 2.8081442660546125e-05,
"loss": 2.2593,
"step": 68
},
{
"epoch": 0.4144144144144144,
"grad_norm": 0.9270521998405457,
"learning_rate": 2.6526421860705473e-05,
"loss": 2.0822,
"step": 69
},
{
"epoch": 0.42042042042042044,
"grad_norm": 0.6439203023910522,
"learning_rate": 2.500000000000001e-05,
"loss": 2.0663,
"step": 70
},
{
"epoch": 0.4264264264264264,
"grad_norm": 0.6978247761726379,
"learning_rate": 2.350403678833976e-05,
"loss": 2.1257,
"step": 71
},
{
"epoch": 0.43243243243243246,
"grad_norm": 0.7858160734176636,
"learning_rate": 2.2040354826462668e-05,
"loss": 2.1499,
"step": 72
},
{
"epoch": 0.43243243243243246,
"eval_loss": 1.0535194873809814,
"eval_runtime": 20.8455,
"eval_samples_per_second": 6.716,
"eval_steps_per_second": 0.863,
"step": 72
},
{
"epoch": 0.43843843843843844,
"grad_norm": 0.7479972839355469,
"learning_rate": 2.061073738537635e-05,
"loss": 2.1146,
"step": 73
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.6571378111839294,
"learning_rate": 1.9216926233717085e-05,
"loss": 2.1681,
"step": 74
},
{
"epoch": 0.45045045045045046,
"grad_norm": 0.771787166595459,
"learning_rate": 1.7860619515673033e-05,
"loss": 2.1283,
"step": 75
},
{
"epoch": 0.45645645645645644,
"grad_norm": 0.7262662649154663,
"learning_rate": 1.6543469682057106e-05,
"loss": 2.069,
"step": 76
},
{
"epoch": 0.4624624624624625,
"grad_norm": 0.7589470744132996,
"learning_rate": 1.526708147705013e-05,
"loss": 2.1764,
"step": 77
},
{
"epoch": 0.46846846846846846,
"grad_norm": 0.807276725769043,
"learning_rate": 1.4033009983067452e-05,
"loss": 2.2972,
"step": 78
},
{
"epoch": 0.4744744744744745,
"grad_norm": 0.7238224148750305,
"learning_rate": 1.2842758726130283e-05,
"loss": 2.209,
"step": 79
},
{
"epoch": 0.4804804804804805,
"grad_norm": 0.6892739534378052,
"learning_rate": 1.1697777844051105e-05,
"loss": 2.1741,
"step": 80
},
{
"epoch": 0.4864864864864865,
"grad_norm": 0.7180541157722473,
"learning_rate": 1.0599462319663905e-05,
"loss": 2.035,
"step": 81
},
{
"epoch": 0.4864864864864865,
"eval_loss": 1.0486092567443848,
"eval_runtime": 20.8685,
"eval_samples_per_second": 6.709,
"eval_steps_per_second": 0.863,
"step": 81
},
{
"epoch": 0.4924924924924925,
"grad_norm": 0.8617774844169617,
"learning_rate": 9.549150281252633e-06,
"loss": 2.1781,
"step": 82
},
{
"epoch": 0.4984984984984985,
"grad_norm": 0.6612197160720825,
"learning_rate": 8.548121372247918e-06,
"loss": 2.1554,
"step": 83
},
{
"epoch": 0.5045045045045045,
"grad_norm": 0.7757105231285095,
"learning_rate": 7.597595192178702e-06,
"loss": 2.1361,
"step": 84
},
{
"epoch": 0.5105105105105106,
"grad_norm": 0.5905804634094238,
"learning_rate": 6.698729810778065e-06,
"loss": 2.0213,
"step": 85
},
{
"epoch": 0.5165165165165165,
"grad_norm": 0.7919793725013733,
"learning_rate": 5.852620357053651e-06,
"loss": 2.0551,
"step": 86
},
{
"epoch": 0.5225225225225225,
"grad_norm": 0.8845735788345337,
"learning_rate": 5.060297685041659e-06,
"loss": 2.1245,
"step": 87
},
{
"epoch": 0.5285285285285285,
"grad_norm": 0.7294681072235107,
"learning_rate": 4.322727117869951e-06,
"loss": 2.2661,
"step": 88
},
{
"epoch": 0.5345345345345346,
"grad_norm": 0.6653247475624084,
"learning_rate": 3.6408072716606346e-06,
"loss": 2.089,
"step": 89
},
{
"epoch": 0.5405405405405406,
"grad_norm": 0.7532846331596375,
"learning_rate": 3.0153689607045845e-06,
"loss": 2.2887,
"step": 90
},
{
"epoch": 0.5405405405405406,
"eval_loss": 1.0472609996795654,
"eval_runtime": 20.9061,
"eval_samples_per_second": 6.697,
"eval_steps_per_second": 0.861,
"step": 90
},
{
"epoch": 0.5465465465465466,
"grad_norm": 0.7485697865486145,
"learning_rate": 2.4471741852423237e-06,
"loss": 2.0987,
"step": 91
},
{
"epoch": 0.5525525525525525,
"grad_norm": 0.7291666269302368,
"learning_rate": 1.9369152030840556e-06,
"loss": 2.155,
"step": 92
},
{
"epoch": 0.5585585585585585,
"grad_norm": 0.6621387600898743,
"learning_rate": 1.4852136862001764e-06,
"loss": 2.1415,
"step": 93
},
{
"epoch": 0.5645645645645646,
"grad_norm": 0.7771839499473572,
"learning_rate": 1.0926199633097157e-06,
"loss": 2.0344,
"step": 94
},
{
"epoch": 0.5705705705705706,
"grad_norm": 0.6351449489593506,
"learning_rate": 7.596123493895991e-07,
"loss": 1.9787,
"step": 95
},
{
"epoch": 0.5765765765765766,
"grad_norm": 0.733138918876648,
"learning_rate": 4.865965629214819e-07,
"loss": 2.1232,
"step": 96
},
{
"epoch": 0.5825825825825826,
"grad_norm": 0.6741092205047607,
"learning_rate": 2.7390523158633554e-07,
"loss": 2.1437,
"step": 97
},
{
"epoch": 0.5885885885885885,
"grad_norm": 0.6701088547706604,
"learning_rate": 1.2179748700879012e-07,
"loss": 2.0486,
"step": 98
},
{
"epoch": 0.5945945945945946,
"grad_norm": 0.6762816905975342,
"learning_rate": 3.04586490452119e-08,
"loss": 2.2005,
"step": 99
},
{
"epoch": 0.5945945945945946,
"eval_loss": 1.0468921661376953,
"eval_runtime": 20.8659,
"eval_samples_per_second": 6.71,
"eval_steps_per_second": 0.863,
"step": 99
},
{
"epoch": 0.6006006006006006,
"grad_norm": 0.8482312560081482,
"learning_rate": 0.0,
"loss": 2.2367,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.5391158788096e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}