dimasik2987's picture
Training in progress, step 50, checkpoint
697d323 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.01345985597954102,
"eval_steps": 4,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00026919711959082036,
"grad_norm": 7.305245876312256,
"learning_rate": 2e-05,
"loss": 6.0527,
"step": 1
},
{
"epoch": 0.00026919711959082036,
"eval_loss": 6.004375457763672,
"eval_runtime": 186.7841,
"eval_samples_per_second": 8.379,
"eval_steps_per_second": 4.192,
"step": 1
},
{
"epoch": 0.0005383942391816407,
"grad_norm": 6.773212432861328,
"learning_rate": 4e-05,
"loss": 6.151,
"step": 2
},
{
"epoch": 0.0008075913587724611,
"grad_norm": 7.554437160491943,
"learning_rate": 6e-05,
"loss": 6.398,
"step": 3
},
{
"epoch": 0.0010767884783632815,
"grad_norm": 6.408179759979248,
"learning_rate": 8e-05,
"loss": 5.7049,
"step": 4
},
{
"epoch": 0.0010767884783632815,
"eval_loss": 5.3220696449279785,
"eval_runtime": 187.1601,
"eval_samples_per_second": 8.362,
"eval_steps_per_second": 4.184,
"step": 4
},
{
"epoch": 0.0013459855979541019,
"grad_norm": 6.113898754119873,
"learning_rate": 0.0001,
"loss": 5.5139,
"step": 5
},
{
"epoch": 0.0016151827175449223,
"grad_norm": 4.3670973777771,
"learning_rate": 0.00012,
"loss": 4.8011,
"step": 6
},
{
"epoch": 0.0018843798371357427,
"grad_norm": 8.779766082763672,
"learning_rate": 0.00014,
"loss": 4.9951,
"step": 7
},
{
"epoch": 0.002153576956726563,
"grad_norm": 8.467183113098145,
"learning_rate": 0.00016,
"loss": 4.8208,
"step": 8
},
{
"epoch": 0.002153576956726563,
"eval_loss": 4.797426700592041,
"eval_runtime": 187.0275,
"eval_samples_per_second": 8.368,
"eval_steps_per_second": 4.187,
"step": 8
},
{
"epoch": 0.0024227740763173833,
"grad_norm": 6.028713226318359,
"learning_rate": 0.00018,
"loss": 4.9807,
"step": 9
},
{
"epoch": 0.0026919711959082038,
"grad_norm": 3.5283517837524414,
"learning_rate": 0.0002,
"loss": 4.4461,
"step": 10
},
{
"epoch": 0.002961168315499024,
"grad_norm": 3.948577880859375,
"learning_rate": 0.0001996917333733128,
"loss": 4.0834,
"step": 11
},
{
"epoch": 0.0032303654350898446,
"grad_norm": 3.531851053237915,
"learning_rate": 0.00019876883405951377,
"loss": 4.6573,
"step": 12
},
{
"epoch": 0.0032303654350898446,
"eval_loss": 4.350089073181152,
"eval_runtime": 186.8405,
"eval_samples_per_second": 8.376,
"eval_steps_per_second": 4.191,
"step": 12
},
{
"epoch": 0.003499562554680665,
"grad_norm": 3.562119483947754,
"learning_rate": 0.00019723699203976766,
"loss": 4.4314,
"step": 13
},
{
"epoch": 0.0037687596742714854,
"grad_norm": 3.721893548965454,
"learning_rate": 0.00019510565162951537,
"loss": 4.4538,
"step": 14
},
{
"epoch": 0.004037956793862305,
"grad_norm": 2.8506362438201904,
"learning_rate": 0.0001923879532511287,
"loss": 4.2002,
"step": 15
},
{
"epoch": 0.004307153913453126,
"grad_norm": 2.9634995460510254,
"learning_rate": 0.0001891006524188368,
"loss": 4.3413,
"step": 16
},
{
"epoch": 0.004307153913453126,
"eval_loss": 4.165503978729248,
"eval_runtime": 187.0045,
"eval_samples_per_second": 8.369,
"eval_steps_per_second": 4.187,
"step": 16
},
{
"epoch": 0.004576351033043946,
"grad_norm": 3.4099552631378174,
"learning_rate": 0.00018526401643540922,
"loss": 4.133,
"step": 17
},
{
"epoch": 0.004845548152634767,
"grad_norm": 3.074971914291382,
"learning_rate": 0.00018090169943749476,
"loss": 3.9521,
"step": 18
},
{
"epoch": 0.005114745272225587,
"grad_norm": 2.5733466148376465,
"learning_rate": 0.0001760405965600031,
"loss": 4.1701,
"step": 19
},
{
"epoch": 0.0053839423918164075,
"grad_norm": 3.7706105709075928,
"learning_rate": 0.00017071067811865476,
"loss": 3.9503,
"step": 20
},
{
"epoch": 0.0053839423918164075,
"eval_loss": 4.0563483238220215,
"eval_runtime": 187.0013,
"eval_samples_per_second": 8.369,
"eval_steps_per_second": 4.187,
"step": 20
},
{
"epoch": 0.005653139511407228,
"grad_norm": 2.7933688163757324,
"learning_rate": 0.00016494480483301836,
"loss": 3.8037,
"step": 21
},
{
"epoch": 0.005922336630998048,
"grad_norm": 2.6206228733062744,
"learning_rate": 0.00015877852522924732,
"loss": 4.1224,
"step": 22
},
{
"epoch": 0.006191533750588869,
"grad_norm": 2.956394910812378,
"learning_rate": 0.0001522498564715949,
"loss": 3.8059,
"step": 23
},
{
"epoch": 0.006460730870179689,
"grad_norm": 2.445277690887451,
"learning_rate": 0.00014539904997395468,
"loss": 3.8613,
"step": 24
},
{
"epoch": 0.006460730870179689,
"eval_loss": 3.9950814247131348,
"eval_runtime": 186.8191,
"eval_samples_per_second": 8.377,
"eval_steps_per_second": 4.191,
"step": 24
},
{
"epoch": 0.00672992798977051,
"grad_norm": 2.6795260906219482,
"learning_rate": 0.000138268343236509,
"loss": 3.646,
"step": 25
},
{
"epoch": 0.00699912510936133,
"grad_norm": 2.925718307495117,
"learning_rate": 0.00013090169943749476,
"loss": 4.1437,
"step": 26
},
{
"epoch": 0.0072683222289521504,
"grad_norm": 2.81306529045105,
"learning_rate": 0.00012334453638559057,
"loss": 3.9201,
"step": 27
},
{
"epoch": 0.007537519348542971,
"grad_norm": 2.4982492923736572,
"learning_rate": 0.0001156434465040231,
"loss": 3.8325,
"step": 28
},
{
"epoch": 0.007537519348542971,
"eval_loss": 3.943394660949707,
"eval_runtime": 187.1247,
"eval_samples_per_second": 8.363,
"eval_steps_per_second": 4.184,
"step": 28
},
{
"epoch": 0.007806716468133791,
"grad_norm": 2.46905255317688,
"learning_rate": 0.0001078459095727845,
"loss": 4.1957,
"step": 29
},
{
"epoch": 0.00807591358772461,
"grad_norm": 2.6972992420196533,
"learning_rate": 0.0001,
"loss": 3.7234,
"step": 30
},
{
"epoch": 0.008345110707315431,
"grad_norm": 2.946199655532837,
"learning_rate": 9.215409042721552e-05,
"loss": 3.8905,
"step": 31
},
{
"epoch": 0.008614307826906252,
"grad_norm": 2.631840944290161,
"learning_rate": 8.435655349597689e-05,
"loss": 3.8763,
"step": 32
},
{
"epoch": 0.008614307826906252,
"eval_loss": 3.9116568565368652,
"eval_runtime": 187.6034,
"eval_samples_per_second": 8.342,
"eval_steps_per_second": 4.174,
"step": 32
},
{
"epoch": 0.008883504946497072,
"grad_norm": 2.5778937339782715,
"learning_rate": 7.66554636144095e-05,
"loss": 3.8609,
"step": 33
},
{
"epoch": 0.009152702066087893,
"grad_norm": 2.673618793487549,
"learning_rate": 6.909830056250527e-05,
"loss": 3.7293,
"step": 34
},
{
"epoch": 0.009421899185678713,
"grad_norm": 2.699422597885132,
"learning_rate": 6.173165676349103e-05,
"loss": 4.1321,
"step": 35
},
{
"epoch": 0.009691096305269533,
"grad_norm": 2.799333095550537,
"learning_rate": 5.4600950026045326e-05,
"loss": 3.8541,
"step": 36
},
{
"epoch": 0.009691096305269533,
"eval_loss": 3.8941519260406494,
"eval_runtime": 187.6155,
"eval_samples_per_second": 8.342,
"eval_steps_per_second": 4.173,
"step": 36
},
{
"epoch": 0.009960293424860354,
"grad_norm": 2.519052267074585,
"learning_rate": 4.7750143528405126e-05,
"loss": 3.8805,
"step": 37
},
{
"epoch": 0.010229490544451174,
"grad_norm": 2.59364652633667,
"learning_rate": 4.12214747707527e-05,
"loss": 4.0401,
"step": 38
},
{
"epoch": 0.010498687664041995,
"grad_norm": 2.6932904720306396,
"learning_rate": 3.5055195166981645e-05,
"loss": 3.6313,
"step": 39
},
{
"epoch": 0.010767884783632815,
"grad_norm": 2.4584312438964844,
"learning_rate": 2.9289321881345254e-05,
"loss": 3.9238,
"step": 40
},
{
"epoch": 0.010767884783632815,
"eval_loss": 3.8838305473327637,
"eval_runtime": 188.8048,
"eval_samples_per_second": 8.289,
"eval_steps_per_second": 4.147,
"step": 40
},
{
"epoch": 0.011037081903223635,
"grad_norm": 2.763805389404297,
"learning_rate": 2.3959403439996907e-05,
"loss": 3.7855,
"step": 41
},
{
"epoch": 0.011306279022814456,
"grad_norm": 2.3128108978271484,
"learning_rate": 1.9098300562505266e-05,
"loss": 3.494,
"step": 42
},
{
"epoch": 0.011575476142405276,
"grad_norm": 3.1583259105682373,
"learning_rate": 1.4735983564590783e-05,
"loss": 4.1824,
"step": 43
},
{
"epoch": 0.011844673261996097,
"grad_norm": 2.4732134342193604,
"learning_rate": 1.0899347581163221e-05,
"loss": 3.8976,
"step": 44
},
{
"epoch": 0.011844673261996097,
"eval_loss": 3.8724188804626465,
"eval_runtime": 187.5642,
"eval_samples_per_second": 8.344,
"eval_steps_per_second": 4.175,
"step": 44
},
{
"epoch": 0.012113870381586917,
"grad_norm": 2.592027187347412,
"learning_rate": 7.612046748871327e-06,
"loss": 3.7669,
"step": 45
},
{
"epoch": 0.012383067501177738,
"grad_norm": 2.7980010509490967,
"learning_rate": 4.8943483704846475e-06,
"loss": 4.1847,
"step": 46
},
{
"epoch": 0.012652264620768558,
"grad_norm": 2.249249219894409,
"learning_rate": 2.7630079602323442e-06,
"loss": 3.7236,
"step": 47
},
{
"epoch": 0.012921461740359378,
"grad_norm": 2.2937920093536377,
"learning_rate": 1.231165940486234e-06,
"loss": 4.0901,
"step": 48
},
{
"epoch": 0.012921461740359378,
"eval_loss": 3.869504928588867,
"eval_runtime": 186.9808,
"eval_samples_per_second": 8.37,
"eval_steps_per_second": 4.188,
"step": 48
},
{
"epoch": 0.013190658859950199,
"grad_norm": 2.3129959106445312,
"learning_rate": 3.0826662668720364e-07,
"loss": 3.8164,
"step": 49
},
{
"epoch": 0.01345985597954102,
"grad_norm": 2.5126748085021973,
"learning_rate": 0.0,
"loss": 3.8391,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 4,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.83447180771328e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}