RoyJoy's picture
Training in progress, step 100, checkpoint
5188dc7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.3986013986013985,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.013986013986013986,
"grad_norm": 0.018208099529147148,
"learning_rate": 1e-05,
"loss": 10.3794,
"step": 1
},
{
"epoch": 0.013986013986013986,
"eval_loss": 10.378853797912598,
"eval_runtime": 0.2269,
"eval_samples_per_second": 1057.848,
"eval_steps_per_second": 35.262,
"step": 1
},
{
"epoch": 0.027972027972027972,
"grad_norm": 0.019829176366329193,
"learning_rate": 2e-05,
"loss": 10.3791,
"step": 2
},
{
"epoch": 0.04195804195804196,
"grad_norm": 0.015240170061588287,
"learning_rate": 3e-05,
"loss": 10.3763,
"step": 3
},
{
"epoch": 0.055944055944055944,
"grad_norm": 0.015529603697359562,
"learning_rate": 4e-05,
"loss": 10.3774,
"step": 4
},
{
"epoch": 0.06993006993006994,
"grad_norm": 0.01788165047764778,
"learning_rate": 5e-05,
"loss": 10.378,
"step": 5
},
{
"epoch": 0.08391608391608392,
"grad_norm": 0.01706252060830593,
"learning_rate": 6e-05,
"loss": 10.3777,
"step": 6
},
{
"epoch": 0.0979020979020979,
"grad_norm": 0.017586465924978256,
"learning_rate": 7e-05,
"loss": 10.3778,
"step": 7
},
{
"epoch": 0.11188811188811189,
"grad_norm": 0.019204173237085342,
"learning_rate": 8e-05,
"loss": 10.3803,
"step": 8
},
{
"epoch": 0.1258741258741259,
"grad_norm": 0.01797935739159584,
"learning_rate": 9e-05,
"loss": 10.3784,
"step": 9
},
{
"epoch": 0.1258741258741259,
"eval_loss": 10.378623008728027,
"eval_runtime": 0.2291,
"eval_samples_per_second": 1047.352,
"eval_steps_per_second": 34.912,
"step": 9
},
{
"epoch": 0.13986013986013987,
"grad_norm": 0.01941474713385105,
"learning_rate": 0.0001,
"loss": 10.3797,
"step": 10
},
{
"epoch": 0.15384615384615385,
"grad_norm": 0.01960493065416813,
"learning_rate": 9.99695413509548e-05,
"loss": 10.3767,
"step": 11
},
{
"epoch": 0.16783216783216784,
"grad_norm": 0.020062170922756195,
"learning_rate": 9.987820251299122e-05,
"loss": 10.3783,
"step": 12
},
{
"epoch": 0.18181818181818182,
"grad_norm": 0.01915978081524372,
"learning_rate": 9.972609476841367e-05,
"loss": 10.3771,
"step": 13
},
{
"epoch": 0.1958041958041958,
"grad_norm": 0.016005827113986015,
"learning_rate": 9.951340343707852e-05,
"loss": 10.3767,
"step": 14
},
{
"epoch": 0.2097902097902098,
"grad_norm": 0.016981465741991997,
"learning_rate": 9.924038765061042e-05,
"loss": 10.377,
"step": 15
},
{
"epoch": 0.22377622377622378,
"grad_norm": 0.019123638048768044,
"learning_rate": 9.890738003669029e-05,
"loss": 10.378,
"step": 16
},
{
"epoch": 0.23776223776223776,
"grad_norm": 0.017938820645213127,
"learning_rate": 9.851478631379982e-05,
"loss": 10.3774,
"step": 17
},
{
"epoch": 0.2517482517482518,
"grad_norm": 0.017268581315875053,
"learning_rate": 9.806308479691595e-05,
"loss": 10.3773,
"step": 18
},
{
"epoch": 0.2517482517482518,
"eval_loss": 10.378046989440918,
"eval_runtime": 0.2254,
"eval_samples_per_second": 1064.655,
"eval_steps_per_second": 35.488,
"step": 18
},
{
"epoch": 0.26573426573426573,
"grad_norm": 0.017280064523220062,
"learning_rate": 9.755282581475769e-05,
"loss": 10.3762,
"step": 19
},
{
"epoch": 0.27972027972027974,
"grad_norm": 0.019195018336176872,
"learning_rate": 9.698463103929542e-05,
"loss": 10.3777,
"step": 20
},
{
"epoch": 0.2937062937062937,
"grad_norm": 0.019608568400144577,
"learning_rate": 9.635919272833938e-05,
"loss": 10.3781,
"step": 21
},
{
"epoch": 0.3076923076923077,
"grad_norm": 0.018666790798306465,
"learning_rate": 9.567727288213005e-05,
"loss": 10.3768,
"step": 22
},
{
"epoch": 0.32167832167832167,
"grad_norm": 0.018083766102790833,
"learning_rate": 9.493970231495835e-05,
"loss": 10.3779,
"step": 23
},
{
"epoch": 0.3356643356643357,
"grad_norm": 0.017882829532027245,
"learning_rate": 9.414737964294636e-05,
"loss": 10.376,
"step": 24
},
{
"epoch": 0.34965034965034963,
"grad_norm": 0.020201081410050392,
"learning_rate": 9.330127018922194e-05,
"loss": 10.3759,
"step": 25
},
{
"epoch": 0.36363636363636365,
"grad_norm": 0.024009106680750847,
"learning_rate": 9.24024048078213e-05,
"loss": 10.3777,
"step": 26
},
{
"epoch": 0.3776223776223776,
"grad_norm": 0.021636882796883583,
"learning_rate": 9.145187862775209e-05,
"loss": 10.3774,
"step": 27
},
{
"epoch": 0.3776223776223776,
"eval_loss": 10.37741756439209,
"eval_runtime": 0.227,
"eval_samples_per_second": 1057.248,
"eval_steps_per_second": 35.242,
"step": 27
},
{
"epoch": 0.3916083916083916,
"grad_norm": 0.021926555782556534,
"learning_rate": 9.045084971874738e-05,
"loss": 10.377,
"step": 28
},
{
"epoch": 0.40559440559440557,
"grad_norm": 0.02251768298447132,
"learning_rate": 8.940053768033609e-05,
"loss": 10.375,
"step": 29
},
{
"epoch": 0.4195804195804196,
"grad_norm": 0.019395161420106888,
"learning_rate": 8.83022221559489e-05,
"loss": 10.3764,
"step": 30
},
{
"epoch": 0.43356643356643354,
"grad_norm": 0.02481013350188732,
"learning_rate": 8.715724127386972e-05,
"loss": 10.3767,
"step": 31
},
{
"epoch": 0.44755244755244755,
"grad_norm": 0.024715891107916832,
"learning_rate": 8.596699001693255e-05,
"loss": 10.3753,
"step": 32
},
{
"epoch": 0.46153846153846156,
"grad_norm": 0.02524632401764393,
"learning_rate": 8.473291852294987e-05,
"loss": 10.3773,
"step": 33
},
{
"epoch": 0.4755244755244755,
"grad_norm": 0.02561429888010025,
"learning_rate": 8.345653031794292e-05,
"loss": 10.3758,
"step": 34
},
{
"epoch": 0.48951048951048953,
"grad_norm": 0.025069493800401688,
"learning_rate": 8.213938048432697e-05,
"loss": 10.3755,
"step": 35
},
{
"epoch": 0.5034965034965035,
"grad_norm": 0.0294085294008255,
"learning_rate": 8.07830737662829e-05,
"loss": 10.3763,
"step": 36
},
{
"epoch": 0.5034965034965035,
"eval_loss": 10.376723289489746,
"eval_runtime": 0.2223,
"eval_samples_per_second": 1079.725,
"eval_steps_per_second": 35.991,
"step": 36
},
{
"epoch": 0.5174825174825175,
"grad_norm": 0.026126094162464142,
"learning_rate": 7.938926261462366e-05,
"loss": 10.3763,
"step": 37
},
{
"epoch": 0.5314685314685315,
"grad_norm": 0.02763998508453369,
"learning_rate": 7.795964517353735e-05,
"loss": 10.3742,
"step": 38
},
{
"epoch": 0.5454545454545454,
"grad_norm": 0.031683456152677536,
"learning_rate": 7.649596321166024e-05,
"loss": 10.3766,
"step": 39
},
{
"epoch": 0.5594405594405595,
"grad_norm": 0.028375782072544098,
"learning_rate": 7.500000000000001e-05,
"loss": 10.3754,
"step": 40
},
{
"epoch": 0.5734265734265734,
"grad_norm": 0.02905913069844246,
"learning_rate": 7.347357813929454e-05,
"loss": 10.3767,
"step": 41
},
{
"epoch": 0.5874125874125874,
"grad_norm": 0.02657276764512062,
"learning_rate": 7.191855733945387e-05,
"loss": 10.3752,
"step": 42
},
{
"epoch": 0.6013986013986014,
"grad_norm": 0.030018672347068787,
"learning_rate": 7.033683215379002e-05,
"loss": 10.3747,
"step": 43
},
{
"epoch": 0.6153846153846154,
"grad_norm": 0.02869054302573204,
"learning_rate": 6.873032967079561e-05,
"loss": 10.3757,
"step": 44
},
{
"epoch": 0.6293706293706294,
"grad_norm": 0.030864108353853226,
"learning_rate": 6.710100716628344e-05,
"loss": 10.3759,
"step": 45
},
{
"epoch": 0.6293706293706294,
"eval_loss": 10.375964164733887,
"eval_runtime": 0.2391,
"eval_samples_per_second": 1003.749,
"eval_steps_per_second": 33.458,
"step": 45
},
{
"epoch": 0.6433566433566433,
"grad_norm": 0.02986125275492668,
"learning_rate": 6.545084971874738e-05,
"loss": 10.3764,
"step": 46
},
{
"epoch": 0.6573426573426573,
"grad_norm": 0.03223278000950813,
"learning_rate": 6.378186779084995e-05,
"loss": 10.3748,
"step": 47
},
{
"epoch": 0.6713286713286714,
"grad_norm": 0.03329155221581459,
"learning_rate": 6.209609477998338e-05,
"loss": 10.3759,
"step": 48
},
{
"epoch": 0.6853146853146853,
"grad_norm": 0.03491205349564552,
"learning_rate": 6.0395584540887963e-05,
"loss": 10.3753,
"step": 49
},
{
"epoch": 0.6993006993006993,
"grad_norm": 0.03453943133354187,
"learning_rate": 5.868240888334653e-05,
"loss": 10.3749,
"step": 50
},
{
"epoch": 0.7132867132867133,
"grad_norm": 0.031059524044394493,
"learning_rate": 5.695865504800327e-05,
"loss": 10.3751,
"step": 51
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.037967417389154434,
"learning_rate": 5.522642316338268e-05,
"loss": 10.3764,
"step": 52
},
{
"epoch": 0.7412587412587412,
"grad_norm": 0.03357519581913948,
"learning_rate": 5.348782368720626e-05,
"loss": 10.3734,
"step": 53
},
{
"epoch": 0.7552447552447552,
"grad_norm": 0.03755338490009308,
"learning_rate": 5.174497483512506e-05,
"loss": 10.3767,
"step": 54
},
{
"epoch": 0.7552447552447552,
"eval_loss": 10.375212669372559,
"eval_runtime": 0.2375,
"eval_samples_per_second": 1010.492,
"eval_steps_per_second": 33.683,
"step": 54
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.036303672939538956,
"learning_rate": 5e-05,
"loss": 10.3755,
"step": 55
},
{
"epoch": 0.7832167832167832,
"grad_norm": 0.035945743322372437,
"learning_rate": 4.825502516487497e-05,
"loss": 10.3737,
"step": 56
},
{
"epoch": 0.7972027972027972,
"grad_norm": 0.03665671497583389,
"learning_rate": 4.6512176312793736e-05,
"loss": 10.3742,
"step": 57
},
{
"epoch": 0.8111888111888111,
"grad_norm": 0.03903292864561081,
"learning_rate": 4.477357683661734e-05,
"loss": 10.3754,
"step": 58
},
{
"epoch": 0.8251748251748252,
"grad_norm": 0.037477292120456696,
"learning_rate": 4.3041344951996746e-05,
"loss": 10.3737,
"step": 59
},
{
"epoch": 0.8391608391608392,
"grad_norm": 0.03761836886405945,
"learning_rate": 4.131759111665349e-05,
"loss": 10.3753,
"step": 60
},
{
"epoch": 0.8531468531468531,
"grad_norm": 0.03947959467768669,
"learning_rate": 3.960441545911204e-05,
"loss": 10.375,
"step": 61
},
{
"epoch": 0.8671328671328671,
"grad_norm": 0.04931560531258583,
"learning_rate": 3.790390522001662e-05,
"loss": 10.3738,
"step": 62
},
{
"epoch": 0.8811188811188811,
"grad_norm": 0.04289368912577629,
"learning_rate": 3.6218132209150045e-05,
"loss": 10.3736,
"step": 63
},
{
"epoch": 0.8811188811188811,
"eval_loss": 10.374542236328125,
"eval_runtime": 0.2332,
"eval_samples_per_second": 1029.334,
"eval_steps_per_second": 34.311,
"step": 63
},
{
"epoch": 0.8951048951048951,
"grad_norm": 0.04646448791027069,
"learning_rate": 3.4549150281252636e-05,
"loss": 10.374,
"step": 64
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.041442107409238815,
"learning_rate": 3.289899283371657e-05,
"loss": 10.3741,
"step": 65
},
{
"epoch": 0.9230769230769231,
"grad_norm": 0.0464664064347744,
"learning_rate": 3.12696703292044e-05,
"loss": 10.3735,
"step": 66
},
{
"epoch": 0.9370629370629371,
"grad_norm": 0.045736297965049744,
"learning_rate": 2.9663167846209998e-05,
"loss": 10.3745,
"step": 67
},
{
"epoch": 0.951048951048951,
"grad_norm": 0.04584592953324318,
"learning_rate": 2.8081442660546125e-05,
"loss": 10.3769,
"step": 68
},
{
"epoch": 0.965034965034965,
"grad_norm": 0.04951049014925957,
"learning_rate": 2.6526421860705473e-05,
"loss": 10.3726,
"step": 69
},
{
"epoch": 0.9790209790209791,
"grad_norm": 0.04482301324605942,
"learning_rate": 2.500000000000001e-05,
"loss": 10.3733,
"step": 70
},
{
"epoch": 0.993006993006993,
"grad_norm": 0.035766538232564926,
"learning_rate": 2.350403678833976e-05,
"loss": 10.3749,
"step": 71
},
{
"epoch": 1.006993006993007,
"grad_norm": 0.0610295832157135,
"learning_rate": 2.2040354826462668e-05,
"loss": 15.5208,
"step": 72
},
{
"epoch": 1.006993006993007,
"eval_loss": 10.374032020568848,
"eval_runtime": 0.2214,
"eval_samples_per_second": 1083.829,
"eval_steps_per_second": 36.128,
"step": 72
},
{
"epoch": 1.020979020979021,
"grad_norm": 0.05227012559771538,
"learning_rate": 2.061073738537635e-05,
"loss": 10.218,
"step": 73
},
{
"epoch": 1.034965034965035,
"grad_norm": 0.050558362156152725,
"learning_rate": 1.9216926233717085e-05,
"loss": 10.4773,
"step": 74
},
{
"epoch": 1.048951048951049,
"grad_norm": 0.04764160141348839,
"learning_rate": 1.7860619515673033e-05,
"loss": 10.0928,
"step": 75
},
{
"epoch": 1.062937062937063,
"grad_norm": 0.04595685005187988,
"learning_rate": 1.6543469682057106e-05,
"loss": 10.9266,
"step": 76
},
{
"epoch": 1.0769230769230769,
"grad_norm": 0.049153126776218414,
"learning_rate": 1.526708147705013e-05,
"loss": 10.3148,
"step": 77
},
{
"epoch": 1.0909090909090908,
"grad_norm": 0.05073670297861099,
"learning_rate": 1.4033009983067452e-05,
"loss": 10.641,
"step": 78
},
{
"epoch": 1.104895104895105,
"grad_norm": 0.044325049966573715,
"learning_rate": 1.2842758726130283e-05,
"loss": 10.0433,
"step": 79
},
{
"epoch": 1.118881118881119,
"grad_norm": 0.04824868589639664,
"learning_rate": 1.1697777844051105e-05,
"loss": 10.0382,
"step": 80
},
{
"epoch": 1.132867132867133,
"grad_norm": 0.050049543380737305,
"learning_rate": 1.0599462319663905e-05,
"loss": 10.8138,
"step": 81
},
{
"epoch": 1.132867132867133,
"eval_loss": 10.373720169067383,
"eval_runtime": 0.2474,
"eval_samples_per_second": 970.223,
"eval_steps_per_second": 32.341,
"step": 81
},
{
"epoch": 1.1468531468531469,
"grad_norm": 0.05412643030285835,
"learning_rate": 9.549150281252633e-06,
"loss": 10.0682,
"step": 82
},
{
"epoch": 1.1608391608391608,
"grad_norm": 0.04766753688454628,
"learning_rate": 8.548121372247918e-06,
"loss": 9.9111,
"step": 83
},
{
"epoch": 1.1748251748251748,
"grad_norm": 0.049063920974731445,
"learning_rate": 7.597595192178702e-06,
"loss": 10.8495,
"step": 84
},
{
"epoch": 1.1888111888111887,
"grad_norm": 0.05320300906896591,
"learning_rate": 6.698729810778065e-06,
"loss": 10.4015,
"step": 85
},
{
"epoch": 1.2027972027972027,
"grad_norm": 0.052162427455186844,
"learning_rate": 5.852620357053651e-06,
"loss": 10.1188,
"step": 86
},
{
"epoch": 1.2167832167832167,
"grad_norm": 0.05713256821036339,
"learning_rate": 5.060297685041659e-06,
"loss": 11.3542,
"step": 87
},
{
"epoch": 1.2307692307692308,
"grad_norm": 0.03999221324920654,
"learning_rate": 4.322727117869951e-06,
"loss": 10.3934,
"step": 88
},
{
"epoch": 1.2447552447552448,
"grad_norm": 0.04331766068935394,
"learning_rate": 3.6408072716606346e-06,
"loss": 8.8227,
"step": 89
},
{
"epoch": 1.2587412587412588,
"grad_norm": 0.061157867312431335,
"learning_rate": 3.0153689607045845e-06,
"loss": 11.7923,
"step": 90
},
{
"epoch": 1.2587412587412588,
"eval_loss": 10.373579025268555,
"eval_runtime": 0.227,
"eval_samples_per_second": 1057.372,
"eval_steps_per_second": 35.246,
"step": 90
},
{
"epoch": 1.2727272727272727,
"grad_norm": 0.04732262343168259,
"learning_rate": 2.4471741852423237e-06,
"loss": 9.998,
"step": 91
},
{
"epoch": 1.2867132867132867,
"grad_norm": 0.04708394780755043,
"learning_rate": 1.9369152030840556e-06,
"loss": 9.6389,
"step": 92
},
{
"epoch": 1.3006993006993006,
"grad_norm": 0.05312468856573105,
"learning_rate": 1.4852136862001764e-06,
"loss": 11.1191,
"step": 93
},
{
"epoch": 1.3146853146853146,
"grad_norm": 0.05056404694914818,
"learning_rate": 1.0926199633097157e-06,
"loss": 10.2935,
"step": 94
},
{
"epoch": 1.3286713286713288,
"grad_norm": 0.0460284948348999,
"learning_rate": 7.596123493895991e-07,
"loss": 10.076,
"step": 95
},
{
"epoch": 1.3426573426573427,
"grad_norm": 0.04575815796852112,
"learning_rate": 4.865965629214819e-07,
"loss": 9.8108,
"step": 96
},
{
"epoch": 1.3566433566433567,
"grad_norm": 0.05185329541563988,
"learning_rate": 2.7390523158633554e-07,
"loss": 11.363,
"step": 97
},
{
"epoch": 1.3706293706293706,
"grad_norm": 0.04732576757669449,
"learning_rate": 1.2179748700879012e-07,
"loss": 10.0063,
"step": 98
},
{
"epoch": 1.3846153846153846,
"grad_norm": 0.046071410179138184,
"learning_rate": 3.04586490452119e-08,
"loss": 10.6642,
"step": 99
},
{
"epoch": 1.3846153846153846,
"eval_loss": 10.373554229736328,
"eval_runtime": 0.2254,
"eval_samples_per_second": 1065.008,
"eval_steps_per_second": 35.5,
"step": 99
},
{
"epoch": 1.3986013986013985,
"grad_norm": 0.055754274129867554,
"learning_rate": 0.0,
"loss": 10.3559,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 31381467955200.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}