dimasik87's picture
Training in progress, step 50, checkpoint
d14fa3e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.014971180477580658,
"eval_steps": 4,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0002994236095516131,
"grad_norm": 16.735450744628906,
"learning_rate": 2e-05,
"loss": 10.969,
"step": 1
},
{
"epoch": 0.0002994236095516131,
"eval_loss": 11.57262897491455,
"eval_runtime": 82.9715,
"eval_samples_per_second": 8.485,
"eval_steps_per_second": 8.485,
"step": 1
},
{
"epoch": 0.0005988472191032262,
"grad_norm": 10.167157173156738,
"learning_rate": 4e-05,
"loss": 10.7129,
"step": 2
},
{
"epoch": 0.0008982708286548395,
"grad_norm": 15.215925216674805,
"learning_rate": 6e-05,
"loss": 10.434,
"step": 3
},
{
"epoch": 0.0011976944382064525,
"grad_norm": 15.04845142364502,
"learning_rate": 8e-05,
"loss": 9.5194,
"step": 4
},
{
"epoch": 0.0011976944382064525,
"eval_loss": 11.429813385009766,
"eval_runtime": 85.0662,
"eval_samples_per_second": 8.276,
"eval_steps_per_second": 8.276,
"step": 4
},
{
"epoch": 0.0014971180477580657,
"grad_norm": 17.0161075592041,
"learning_rate": 0.0001,
"loss": 10.8734,
"step": 5
},
{
"epoch": 0.001796541657309679,
"grad_norm": 17.905702590942383,
"learning_rate": 0.00012,
"loss": 11.8788,
"step": 6
},
{
"epoch": 0.002095965266861292,
"grad_norm": 21.928312301635742,
"learning_rate": 0.00014,
"loss": 10.4667,
"step": 7
},
{
"epoch": 0.002395388876412905,
"grad_norm": 13.790472030639648,
"learning_rate": 0.00016,
"loss": 9.8745,
"step": 8
},
{
"epoch": 0.002395388876412905,
"eval_loss": 9.637001037597656,
"eval_runtime": 85.1568,
"eval_samples_per_second": 8.267,
"eval_steps_per_second": 8.267,
"step": 8
},
{
"epoch": 0.002694812485964518,
"grad_norm": 12.036263465881348,
"learning_rate": 0.00018,
"loss": 7.8039,
"step": 9
},
{
"epoch": 0.0029942360955161314,
"grad_norm": 13.046463012695312,
"learning_rate": 0.0002,
"loss": 7.8042,
"step": 10
},
{
"epoch": 0.0032936597050677446,
"grad_norm": 24.538808822631836,
"learning_rate": 0.0001996917333733128,
"loss": 8.7757,
"step": 11
},
{
"epoch": 0.003593083314619358,
"grad_norm": 26.114961624145508,
"learning_rate": 0.00019876883405951377,
"loss": 5.6577,
"step": 12
},
{
"epoch": 0.003593083314619358,
"eval_loss": 5.31056022644043,
"eval_runtime": 85.1933,
"eval_samples_per_second": 8.264,
"eval_steps_per_second": 8.264,
"step": 12
},
{
"epoch": 0.003892506924170971,
"grad_norm": 17.937646865844727,
"learning_rate": 0.00019723699203976766,
"loss": 4.7382,
"step": 13
},
{
"epoch": 0.004191930533722584,
"grad_norm": 21.441980361938477,
"learning_rate": 0.00019510565162951537,
"loss": 5.3976,
"step": 14
},
{
"epoch": 0.0044913541432741975,
"grad_norm": 32.05086898803711,
"learning_rate": 0.0001923879532511287,
"loss": 3.6275,
"step": 15
},
{
"epoch": 0.00479077775282581,
"grad_norm": 43.66167449951172,
"learning_rate": 0.0001891006524188368,
"loss": 2.7349,
"step": 16
},
{
"epoch": 0.00479077775282581,
"eval_loss": 4.855716705322266,
"eval_runtime": 83.6415,
"eval_samples_per_second": 8.417,
"eval_steps_per_second": 8.417,
"step": 16
},
{
"epoch": 0.005090201362377423,
"grad_norm": 26.169757843017578,
"learning_rate": 0.00018526401643540922,
"loss": 4.8565,
"step": 17
},
{
"epoch": 0.005389624971929036,
"grad_norm": 27.142152786254883,
"learning_rate": 0.00018090169943749476,
"loss": 4.9723,
"step": 18
},
{
"epoch": 0.00568904858148065,
"grad_norm": 18.188777923583984,
"learning_rate": 0.0001760405965600031,
"loss": 2.6307,
"step": 19
},
{
"epoch": 0.005988472191032263,
"grad_norm": 21.539011001586914,
"learning_rate": 0.00017071067811865476,
"loss": 3.5522,
"step": 20
},
{
"epoch": 0.005988472191032263,
"eval_loss": 4.069602012634277,
"eval_runtime": 83.15,
"eval_samples_per_second": 8.467,
"eval_steps_per_second": 8.467,
"step": 20
},
{
"epoch": 0.006287895800583876,
"grad_norm": 18.60074806213379,
"learning_rate": 0.00016494480483301836,
"loss": 4.4333,
"step": 21
},
{
"epoch": 0.006587319410135489,
"grad_norm": 15.545724868774414,
"learning_rate": 0.00015877852522924732,
"loss": 3.6316,
"step": 22
},
{
"epoch": 0.0068867430196871025,
"grad_norm": 23.216941833496094,
"learning_rate": 0.0001522498564715949,
"loss": 3.2189,
"step": 23
},
{
"epoch": 0.007186166629238716,
"grad_norm": 16.716583251953125,
"learning_rate": 0.00014539904997395468,
"loss": 3.0378,
"step": 24
},
{
"epoch": 0.007186166629238716,
"eval_loss": 3.7071402072906494,
"eval_runtime": 83.2372,
"eval_samples_per_second": 8.458,
"eval_steps_per_second": 8.458,
"step": 24
},
{
"epoch": 0.007485590238790329,
"grad_norm": 18.92926597595215,
"learning_rate": 0.000138268343236509,
"loss": 2.2366,
"step": 25
},
{
"epoch": 0.007785013848341942,
"grad_norm": 30.20500946044922,
"learning_rate": 0.00013090169943749476,
"loss": 3.3568,
"step": 26
},
{
"epoch": 0.008084437457893555,
"grad_norm": 15.139212608337402,
"learning_rate": 0.00012334453638559057,
"loss": 3.0559,
"step": 27
},
{
"epoch": 0.008383861067445169,
"grad_norm": 20.157346725463867,
"learning_rate": 0.0001156434465040231,
"loss": 3.2366,
"step": 28
},
{
"epoch": 0.008383861067445169,
"eval_loss": 3.518523931503296,
"eval_runtime": 83.1412,
"eval_samples_per_second": 8.468,
"eval_steps_per_second": 8.468,
"step": 28
},
{
"epoch": 0.008683284676996781,
"grad_norm": 16.283584594726562,
"learning_rate": 0.0001078459095727845,
"loss": 3.5697,
"step": 29
},
{
"epoch": 0.008982708286548395,
"grad_norm": 32.41582489013672,
"learning_rate": 0.0001,
"loss": 3.4125,
"step": 30
},
{
"epoch": 0.009282131896100007,
"grad_norm": 26.877944946289062,
"learning_rate": 9.215409042721552e-05,
"loss": 3.3671,
"step": 31
},
{
"epoch": 0.00958155550565162,
"grad_norm": 27.856897354125977,
"learning_rate": 8.435655349597689e-05,
"loss": 3.5875,
"step": 32
},
{
"epoch": 0.00958155550565162,
"eval_loss": 3.401846408843994,
"eval_runtime": 83.4215,
"eval_samples_per_second": 8.439,
"eval_steps_per_second": 8.439,
"step": 32
},
{
"epoch": 0.009880979115203234,
"grad_norm": 41.4135627746582,
"learning_rate": 7.66554636144095e-05,
"loss": 3.9928,
"step": 33
},
{
"epoch": 0.010180402724754846,
"grad_norm": 29.63922882080078,
"learning_rate": 6.909830056250527e-05,
"loss": 2.1258,
"step": 34
},
{
"epoch": 0.01047982633430646,
"grad_norm": 34.11370086669922,
"learning_rate": 6.173165676349103e-05,
"loss": 2.8219,
"step": 35
},
{
"epoch": 0.010779249943858073,
"grad_norm": 19.921571731567383,
"learning_rate": 5.4600950026045326e-05,
"loss": 4.0769,
"step": 36
},
{
"epoch": 0.010779249943858073,
"eval_loss": 3.3533554077148438,
"eval_runtime": 83.2266,
"eval_samples_per_second": 8.459,
"eval_steps_per_second": 8.459,
"step": 36
},
{
"epoch": 0.011078673553409687,
"grad_norm": 21.9058895111084,
"learning_rate": 4.7750143528405126e-05,
"loss": 2.9877,
"step": 37
},
{
"epoch": 0.0113780971629613,
"grad_norm": 20.251365661621094,
"learning_rate": 4.12214747707527e-05,
"loss": 2.4354,
"step": 38
},
{
"epoch": 0.011677520772512913,
"grad_norm": 31.047348022460938,
"learning_rate": 3.5055195166981645e-05,
"loss": 3.9592,
"step": 39
},
{
"epoch": 0.011976944382064526,
"grad_norm": 20.160734176635742,
"learning_rate": 2.9289321881345254e-05,
"loss": 2.8831,
"step": 40
},
{
"epoch": 0.011976944382064526,
"eval_loss": 3.2888846397399902,
"eval_runtime": 83.3941,
"eval_samples_per_second": 8.442,
"eval_steps_per_second": 8.442,
"step": 40
},
{
"epoch": 0.01227636799161614,
"grad_norm": 22.80976104736328,
"learning_rate": 2.3959403439996907e-05,
"loss": 3.3648,
"step": 41
},
{
"epoch": 0.012575791601167752,
"grad_norm": 40.02175521850586,
"learning_rate": 1.9098300562505266e-05,
"loss": 3.0131,
"step": 42
},
{
"epoch": 0.012875215210719364,
"grad_norm": 40.88200759887695,
"learning_rate": 1.4735983564590783e-05,
"loss": 4.2676,
"step": 43
},
{
"epoch": 0.013174638820270979,
"grad_norm": 44.29335021972656,
"learning_rate": 1.0899347581163221e-05,
"loss": 3.5078,
"step": 44
},
{
"epoch": 0.013174638820270979,
"eval_loss": 3.2594785690307617,
"eval_runtime": 83.3165,
"eval_samples_per_second": 8.45,
"eval_steps_per_second": 8.45,
"step": 44
},
{
"epoch": 0.013474062429822591,
"grad_norm": 34.80168151855469,
"learning_rate": 7.612046748871327e-06,
"loss": 4.7818,
"step": 45
},
{
"epoch": 0.013773486039374205,
"grad_norm": 25.594444274902344,
"learning_rate": 4.8943483704846475e-06,
"loss": 3.0296,
"step": 46
},
{
"epoch": 0.014072909648925817,
"grad_norm": 39.9898567199707,
"learning_rate": 2.7630079602323442e-06,
"loss": 1.9811,
"step": 47
},
{
"epoch": 0.014372333258477431,
"grad_norm": 58.41755294799805,
"learning_rate": 1.231165940486234e-06,
"loss": 2.8259,
"step": 48
},
{
"epoch": 0.014372333258477431,
"eval_loss": 3.2478675842285156,
"eval_runtime": 83.2404,
"eval_samples_per_second": 8.457,
"eval_steps_per_second": 8.457,
"step": 48
},
{
"epoch": 0.014671756868029044,
"grad_norm": 47.00809860229492,
"learning_rate": 3.0826662668720364e-07,
"loss": 3.4149,
"step": 49
},
{
"epoch": 0.014971180477580658,
"grad_norm": 37.111209869384766,
"learning_rate": 0.0,
"loss": 3.3272,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 4,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.85471820890112e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}