dzanbek's picture
Training in progress, step 75, checkpoint
dfe7758 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.36496350364963503,
"eval_steps": 25,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004866180048661801,
"grad_norm": 0.6534222364425659,
"learning_rate": 3.3333333333333335e-05,
"loss": 9.6643,
"step": 1
},
{
"epoch": 0.004866180048661801,
"eval_loss": 1.717971920967102,
"eval_runtime": 99.4449,
"eval_samples_per_second": 1.75,
"eval_steps_per_second": 0.875,
"step": 1
},
{
"epoch": 0.009732360097323601,
"grad_norm": 0.7636905908584595,
"learning_rate": 6.666666666666667e-05,
"loss": 10.4849,
"step": 2
},
{
"epoch": 0.014598540145985401,
"grad_norm": 0.8352400660514832,
"learning_rate": 0.0001,
"loss": 10.5691,
"step": 3
},
{
"epoch": 0.019464720194647202,
"grad_norm": 1.1163321733474731,
"learning_rate": 9.99524110790929e-05,
"loss": 10.8908,
"step": 4
},
{
"epoch": 0.024330900243309004,
"grad_norm": 1.1510467529296875,
"learning_rate": 9.980973490458728e-05,
"loss": 10.8367,
"step": 5
},
{
"epoch": 0.029197080291970802,
"grad_norm": 1.3899632692337036,
"learning_rate": 9.957224306869053e-05,
"loss": 10.3491,
"step": 6
},
{
"epoch": 0.0340632603406326,
"grad_norm": 1.2129096984863281,
"learning_rate": 9.924038765061042e-05,
"loss": 11.0093,
"step": 7
},
{
"epoch": 0.038929440389294405,
"grad_norm": 1.380240797996521,
"learning_rate": 9.881480035599667e-05,
"loss": 11.1512,
"step": 8
},
{
"epoch": 0.043795620437956206,
"grad_norm": 1.4904532432556152,
"learning_rate": 9.829629131445342e-05,
"loss": 11.2217,
"step": 9
},
{
"epoch": 0.04866180048661801,
"grad_norm": 1.634533166885376,
"learning_rate": 9.768584753741134e-05,
"loss": 10.5503,
"step": 10
},
{
"epoch": 0.0535279805352798,
"grad_norm": 1.5645819902420044,
"learning_rate": 9.698463103929542e-05,
"loss": 10.7389,
"step": 11
},
{
"epoch": 0.058394160583941604,
"grad_norm": 1.600929856300354,
"learning_rate": 9.619397662556435e-05,
"loss": 10.5976,
"step": 12
},
{
"epoch": 0.06326034063260341,
"grad_norm": 1.604222059249878,
"learning_rate": 9.53153893518325e-05,
"loss": 10.7785,
"step": 13
},
{
"epoch": 0.0681265206812652,
"grad_norm": 1.727508306503296,
"learning_rate": 9.435054165891109e-05,
"loss": 10.7356,
"step": 14
},
{
"epoch": 0.072992700729927,
"grad_norm": 1.7615082263946533,
"learning_rate": 9.330127018922194e-05,
"loss": 10.9871,
"step": 15
},
{
"epoch": 0.07785888077858881,
"grad_norm": 1.8271732330322266,
"learning_rate": 9.21695722906443e-05,
"loss": 11.0153,
"step": 16
},
{
"epoch": 0.0827250608272506,
"grad_norm": 1.8473275899887085,
"learning_rate": 9.09576022144496e-05,
"loss": 11.4744,
"step": 17
},
{
"epoch": 0.08759124087591241,
"grad_norm": 2.002676010131836,
"learning_rate": 8.966766701456177e-05,
"loss": 11.1621,
"step": 18
},
{
"epoch": 0.09245742092457421,
"grad_norm": 2.036090850830078,
"learning_rate": 8.83022221559489e-05,
"loss": 11.6886,
"step": 19
},
{
"epoch": 0.09732360097323602,
"grad_norm": 2.1093671321868896,
"learning_rate": 8.68638668405062e-05,
"loss": 11.2061,
"step": 20
},
{
"epoch": 0.10218978102189781,
"grad_norm": 2.150874614715576,
"learning_rate": 8.535533905932738e-05,
"loss": 11.1087,
"step": 21
},
{
"epoch": 0.1070559610705596,
"grad_norm": 2.380944013595581,
"learning_rate": 8.377951038078302e-05,
"loss": 10.9267,
"step": 22
},
{
"epoch": 0.11192214111922141,
"grad_norm": 2.3676857948303223,
"learning_rate": 8.213938048432697e-05,
"loss": 11.7442,
"step": 23
},
{
"epoch": 0.11678832116788321,
"grad_norm": 2.6119561195373535,
"learning_rate": 8.043807145043604e-05,
"loss": 11.3183,
"step": 24
},
{
"epoch": 0.12165450121654502,
"grad_norm": 2.365917921066284,
"learning_rate": 7.86788218175523e-05,
"loss": 10.8457,
"step": 25
},
{
"epoch": 0.12165450121654502,
"eval_loss": 1.371411919593811,
"eval_runtime": 99.9993,
"eval_samples_per_second": 1.74,
"eval_steps_per_second": 0.87,
"step": 25
},
{
"epoch": 0.12652068126520682,
"grad_norm": 2.573071241378784,
"learning_rate": 7.68649804173412e-05,
"loss": 10.9635,
"step": 26
},
{
"epoch": 0.13138686131386862,
"grad_norm": 2.5956778526306152,
"learning_rate": 7.500000000000001e-05,
"loss": 11.7278,
"step": 27
},
{
"epoch": 0.1362530413625304,
"grad_norm": 2.9840710163116455,
"learning_rate": 7.308743066175172e-05,
"loss": 11.3258,
"step": 28
},
{
"epoch": 0.1411192214111922,
"grad_norm": 2.692122459411621,
"learning_rate": 7.113091308703498e-05,
"loss": 11.1816,
"step": 29
},
{
"epoch": 0.145985401459854,
"grad_norm": 2.821690559387207,
"learning_rate": 6.91341716182545e-05,
"loss": 10.7083,
"step": 30
},
{
"epoch": 0.15085158150851583,
"grad_norm": 2.8995065689086914,
"learning_rate": 6.710100716628344e-05,
"loss": 11.7289,
"step": 31
},
{
"epoch": 0.15571776155717762,
"grad_norm": 2.86710524559021,
"learning_rate": 6.503528997521366e-05,
"loss": 11.5014,
"step": 32
},
{
"epoch": 0.16058394160583941,
"grad_norm": 3.1569883823394775,
"learning_rate": 6.294095225512603e-05,
"loss": 10.8144,
"step": 33
},
{
"epoch": 0.1654501216545012,
"grad_norm": 2.958052396774292,
"learning_rate": 6.0821980696905146e-05,
"loss": 11.5663,
"step": 34
},
{
"epoch": 0.170316301703163,
"grad_norm": 2.971606731414795,
"learning_rate": 5.868240888334653e-05,
"loss": 11.8629,
"step": 35
},
{
"epoch": 0.17518248175182483,
"grad_norm": 3.4037623405456543,
"learning_rate": 5.6526309611002594e-05,
"loss": 11.2805,
"step": 36
},
{
"epoch": 0.18004866180048662,
"grad_norm": 3.7285900115966797,
"learning_rate": 5.435778713738292e-05,
"loss": 11.6089,
"step": 37
},
{
"epoch": 0.18491484184914841,
"grad_norm": 3.6044297218322754,
"learning_rate": 5.218096936826681e-05,
"loss": 12.6734,
"step": 38
},
{
"epoch": 0.1897810218978102,
"grad_norm": 3.859436511993408,
"learning_rate": 5e-05,
"loss": 11.3451,
"step": 39
},
{
"epoch": 0.19464720194647203,
"grad_norm": 4.121180534362793,
"learning_rate": 4.781903063173321e-05,
"loss": 11.4139,
"step": 40
},
{
"epoch": 0.19951338199513383,
"grad_norm": 4.352361679077148,
"learning_rate": 4.564221286261709e-05,
"loss": 13.3278,
"step": 41
},
{
"epoch": 0.20437956204379562,
"grad_norm": 3.7588999271392822,
"learning_rate": 4.347369038899744e-05,
"loss": 10.9387,
"step": 42
},
{
"epoch": 0.20924574209245742,
"grad_norm": 4.562860012054443,
"learning_rate": 4.131759111665349e-05,
"loss": 12.128,
"step": 43
},
{
"epoch": 0.2141119221411192,
"grad_norm": 4.854923725128174,
"learning_rate": 3.917801930309486e-05,
"loss": 11.4224,
"step": 44
},
{
"epoch": 0.21897810218978103,
"grad_norm": 4.410684585571289,
"learning_rate": 3.705904774487396e-05,
"loss": 11.0198,
"step": 45
},
{
"epoch": 0.22384428223844283,
"grad_norm": 4.752728462219238,
"learning_rate": 3.4964710024786354e-05,
"loss": 11.2316,
"step": 46
},
{
"epoch": 0.22871046228710462,
"grad_norm": 4.889558792114258,
"learning_rate": 3.289899283371657e-05,
"loss": 10.6528,
"step": 47
},
{
"epoch": 0.23357664233576642,
"grad_norm": 6.103560924530029,
"learning_rate": 3.086582838174551e-05,
"loss": 11.6173,
"step": 48
},
{
"epoch": 0.2384428223844282,
"grad_norm": 6.686156749725342,
"learning_rate": 2.886908691296504e-05,
"loss": 11.7325,
"step": 49
},
{
"epoch": 0.24330900243309003,
"grad_norm": 8.535616874694824,
"learning_rate": 2.6912569338248315e-05,
"loss": 10.9532,
"step": 50
},
{
"epoch": 0.24330900243309003,
"eval_loss": 1.3366206884384155,
"eval_runtime": 100.0065,
"eval_samples_per_second": 1.74,
"eval_steps_per_second": 0.87,
"step": 50
},
{
"epoch": 0.24817518248175183,
"grad_norm": 1.754204511642456,
"learning_rate": 2.500000000000001e-05,
"loss": 8.2569,
"step": 51
},
{
"epoch": 0.25304136253041365,
"grad_norm": 2.188532829284668,
"learning_rate": 2.3135019582658802e-05,
"loss": 8.7464,
"step": 52
},
{
"epoch": 0.25790754257907544,
"grad_norm": 2.390721559524536,
"learning_rate": 2.132117818244771e-05,
"loss": 9.9108,
"step": 53
},
{
"epoch": 0.26277372262773724,
"grad_norm": 2.576392889022827,
"learning_rate": 1.9561928549563968e-05,
"loss": 10.1054,
"step": 54
},
{
"epoch": 0.26763990267639903,
"grad_norm": 2.147494077682495,
"learning_rate": 1.7860619515673033e-05,
"loss": 9.8218,
"step": 55
},
{
"epoch": 0.2725060827250608,
"grad_norm": 2.0287535190582275,
"learning_rate": 1.622048961921699e-05,
"loss": 10.3552,
"step": 56
},
{
"epoch": 0.2773722627737226,
"grad_norm": 2.4717705249786377,
"learning_rate": 1.4644660940672627e-05,
"loss": 10.4594,
"step": 57
},
{
"epoch": 0.2822384428223844,
"grad_norm": 2.5552027225494385,
"learning_rate": 1.3136133159493802e-05,
"loss": 10.1801,
"step": 58
},
{
"epoch": 0.2871046228710462,
"grad_norm": 2.0711898803710938,
"learning_rate": 1.1697777844051105e-05,
"loss": 10.1532,
"step": 59
},
{
"epoch": 0.291970802919708,
"grad_norm": 2.061006546020508,
"learning_rate": 1.0332332985438248e-05,
"loss": 10.4756,
"step": 60
},
{
"epoch": 0.29683698296836986,
"grad_norm": 2.3004233837127686,
"learning_rate": 9.042397785550405e-06,
"loss": 9.6621,
"step": 61
},
{
"epoch": 0.30170316301703165,
"grad_norm": 2.1852986812591553,
"learning_rate": 7.830427709355725e-06,
"loss": 10.3212,
"step": 62
},
{
"epoch": 0.30656934306569344,
"grad_norm": 2.386976957321167,
"learning_rate": 6.698729810778065e-06,
"loss": 10.2014,
"step": 63
},
{
"epoch": 0.31143552311435524,
"grad_norm": 2.3682126998901367,
"learning_rate": 5.649458341088915e-06,
"loss": 10.4178,
"step": 64
},
{
"epoch": 0.31630170316301703,
"grad_norm": 2.1567893028259277,
"learning_rate": 4.684610648167503e-06,
"loss": 9.9836,
"step": 65
},
{
"epoch": 0.32116788321167883,
"grad_norm": 2.226053476333618,
"learning_rate": 3.8060233744356633e-06,
"loss": 10.4578,
"step": 66
},
{
"epoch": 0.3260340632603406,
"grad_norm": 2.296086072921753,
"learning_rate": 3.0153689607045845e-06,
"loss": 10.5194,
"step": 67
},
{
"epoch": 0.3309002433090024,
"grad_norm": 5.2668776512146,
"learning_rate": 2.314152462588659e-06,
"loss": 10.2628,
"step": 68
},
{
"epoch": 0.3357664233576642,
"grad_norm": 2.316244602203369,
"learning_rate": 1.70370868554659e-06,
"loss": 9.9051,
"step": 69
},
{
"epoch": 0.340632603406326,
"grad_norm": 2.4800219535827637,
"learning_rate": 1.1851996440033319e-06,
"loss": 10.9728,
"step": 70
},
{
"epoch": 0.34549878345498786,
"grad_norm": 2.387953281402588,
"learning_rate": 7.596123493895991e-07,
"loss": 10.6898,
"step": 71
},
{
"epoch": 0.35036496350364965,
"grad_norm": 2.5870020389556885,
"learning_rate": 4.277569313094809e-07,
"loss": 11.2093,
"step": 72
},
{
"epoch": 0.35523114355231145,
"grad_norm": 2.600900888442993,
"learning_rate": 1.9026509541272275e-07,
"loss": 10.8837,
"step": 73
},
{
"epoch": 0.36009732360097324,
"grad_norm": 2.623093366622925,
"learning_rate": 4.7588920907110094e-08,
"loss": 10.6526,
"step": 74
},
{
"epoch": 0.36496350364963503,
"grad_norm": 2.7400941848754883,
"learning_rate": 0.0,
"loss": 11.0227,
"step": 75
},
{
"epoch": 0.36496350364963503,
"eval_loss": 1.3052926063537598,
"eval_runtime": 100.089,
"eval_samples_per_second": 1.738,
"eval_steps_per_second": 0.869,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.9422814666752e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}