RoyJoy's picture
Training in progress, step 100, checkpoint
ac2ee8e verified
raw
history blame
19.3 kB
{
"best_metric": 1.425083041191101,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 0.24848578971890045,
"eval_steps": 25,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0024848578971890046,
"grad_norm": 2.2064015865325928,
"learning_rate": 2e-05,
"loss": 1.3872,
"step": 1
},
{
"epoch": 0.0024848578971890046,
"eval_loss": 2.3529415130615234,
"eval_runtime": 0.5026,
"eval_samples_per_second": 99.477,
"eval_steps_per_second": 25.864,
"step": 1
},
{
"epoch": 0.004969715794378009,
"grad_norm": 2.8333449363708496,
"learning_rate": 4e-05,
"loss": 1.4444,
"step": 2
},
{
"epoch": 0.007454573691567014,
"grad_norm": 1.8522814512252808,
"learning_rate": 6e-05,
"loss": 1.5417,
"step": 3
},
{
"epoch": 0.009939431588756018,
"grad_norm": 0.7707993388175964,
"learning_rate": 8e-05,
"loss": 1.5648,
"step": 4
},
{
"epoch": 0.012424289485945022,
"grad_norm": 0.8538352251052856,
"learning_rate": 0.0001,
"loss": 1.6446,
"step": 5
},
{
"epoch": 0.014909147383134027,
"grad_norm": 1.292691946029663,
"learning_rate": 9.997539658034168e-05,
"loss": 1.8205,
"step": 6
},
{
"epoch": 0.017394005280323033,
"grad_norm": 1.3566771745681763,
"learning_rate": 9.990161322484486e-05,
"loss": 1.888,
"step": 7
},
{
"epoch": 0.019878863177512036,
"grad_norm": 1.3478772640228271,
"learning_rate": 9.977873061452552e-05,
"loss": 2.0025,
"step": 8
},
{
"epoch": 0.02236372107470104,
"grad_norm": 2.3724148273468018,
"learning_rate": 9.96068831197139e-05,
"loss": 2.3861,
"step": 9
},
{
"epoch": 0.024848578971890044,
"grad_norm": 2.5542187690734863,
"learning_rate": 9.938625865312251e-05,
"loss": 2.4441,
"step": 10
},
{
"epoch": 0.02733343686907905,
"grad_norm": 2.9612715244293213,
"learning_rate": 9.911709846436641e-05,
"loss": 2.4473,
"step": 11
},
{
"epoch": 0.029818294766268055,
"grad_norm": 3.946715831756592,
"learning_rate": 9.879969687616027e-05,
"loss": 2.796,
"step": 12
},
{
"epoch": 0.03230315266345706,
"grad_norm": 0.6914235949516296,
"learning_rate": 9.84344009624807e-05,
"loss": 1.2153,
"step": 13
},
{
"epoch": 0.034788010560646065,
"grad_norm": 0.5879468321800232,
"learning_rate": 9.80216101690461e-05,
"loss": 1.3014,
"step": 14
},
{
"epoch": 0.03727286845783507,
"grad_norm": 0.4194982051849365,
"learning_rate": 9.756177587652856e-05,
"loss": 1.3002,
"step": 15
},
{
"epoch": 0.03975772635502407,
"grad_norm": 0.43755513429641724,
"learning_rate": 9.705540090697575e-05,
"loss": 1.3616,
"step": 16
},
{
"epoch": 0.042242584252213077,
"grad_norm": 0.35957348346710205,
"learning_rate": 9.650303897398232e-05,
"loss": 1.3768,
"step": 17
},
{
"epoch": 0.04472744214940208,
"grad_norm": 0.4144904911518097,
"learning_rate": 9.590529407721231e-05,
"loss": 1.4022,
"step": 18
},
{
"epoch": 0.047212300046591084,
"grad_norm": 0.46732762455940247,
"learning_rate": 9.526281984193436e-05,
"loss": 1.4593,
"step": 19
},
{
"epoch": 0.04969715794378009,
"grad_norm": 0.5609594583511353,
"learning_rate": 9.4576318804292e-05,
"loss": 1.5165,
"step": 20
},
{
"epoch": 0.05218201584096909,
"grad_norm": 0.7970541715621948,
"learning_rate": 9.384654164309083e-05,
"loss": 1.7275,
"step": 21
},
{
"epoch": 0.0546668737381581,
"grad_norm": 1.0460487604141235,
"learning_rate": 9.30742863589421e-05,
"loss": 1.8141,
"step": 22
},
{
"epoch": 0.057151731635347106,
"grad_norm": 1.3908549547195435,
"learning_rate": 9.226039740166091e-05,
"loss": 1.8613,
"step": 23
},
{
"epoch": 0.05963658953253611,
"grad_norm": 1.8160243034362793,
"learning_rate": 9.140576474687264e-05,
"loss": 2.2734,
"step": 24
},
{
"epoch": 0.06212144742972511,
"grad_norm": 2.9735653400421143,
"learning_rate": 9.051132292283771e-05,
"loss": 2.7456,
"step": 25
},
{
"epoch": 0.06212144742972511,
"eval_loss": 1.5912128686904907,
"eval_runtime": 0.502,
"eval_samples_per_second": 99.599,
"eval_steps_per_second": 25.896,
"step": 25
},
{
"epoch": 0.06460630532691412,
"grad_norm": 0.4997400939464569,
"learning_rate": 8.957804998855866e-05,
"loss": 1.1591,
"step": 26
},
{
"epoch": 0.06709116322410312,
"grad_norm": 0.6131523847579956,
"learning_rate": 8.860696646428693e-05,
"loss": 1.2452,
"step": 27
},
{
"epoch": 0.06957602112129213,
"grad_norm": 0.5031818747520447,
"learning_rate": 8.759913421559902e-05,
"loss": 1.2703,
"step": 28
},
{
"epoch": 0.07206087901848113,
"grad_norm": 0.5141401886940002,
"learning_rate": 8.655565529226198e-05,
"loss": 1.3225,
"step": 29
},
{
"epoch": 0.07454573691567014,
"grad_norm": 0.4961668848991394,
"learning_rate": 8.547767072315835e-05,
"loss": 1.3459,
"step": 30
},
{
"epoch": 0.07703059481285914,
"grad_norm": 0.4645865857601166,
"learning_rate": 8.436635926858759e-05,
"loss": 1.3681,
"step": 31
},
{
"epoch": 0.07951545271004815,
"grad_norm": 0.4720369577407837,
"learning_rate": 8.322293613130917e-05,
"loss": 1.4398,
"step": 32
},
{
"epoch": 0.08200031060723714,
"grad_norm": 0.5064677596092224,
"learning_rate": 8.204865162773613e-05,
"loss": 1.4973,
"step": 33
},
{
"epoch": 0.08448516850442615,
"grad_norm": 0.7844948172569275,
"learning_rate": 8.084478982073247e-05,
"loss": 1.6429,
"step": 34
},
{
"epoch": 0.08697002640161516,
"grad_norm": 1.0937235355377197,
"learning_rate": 7.961266711550922e-05,
"loss": 1.655,
"step": 35
},
{
"epoch": 0.08945488429880416,
"grad_norm": 1.5023084878921509,
"learning_rate": 7.835363082015468e-05,
"loss": 1.9494,
"step": 36
},
{
"epoch": 0.09193974219599317,
"grad_norm": 1.9704169034957886,
"learning_rate": 7.706905767237288e-05,
"loss": 2.0052,
"step": 37
},
{
"epoch": 0.09442460009318217,
"grad_norm": 0.3317461609840393,
"learning_rate": 7.576035233404096e-05,
"loss": 1.1168,
"step": 38
},
{
"epoch": 0.09690945799037118,
"grad_norm": 0.3098306357860565,
"learning_rate": 7.442894585523218e-05,
"loss": 1.2064,
"step": 39
},
{
"epoch": 0.09939431588756018,
"grad_norm": 0.2434069812297821,
"learning_rate": 7.307629410938363e-05,
"loss": 1.2017,
"step": 40
},
{
"epoch": 0.10187917378474919,
"grad_norm": 0.2517208158969879,
"learning_rate": 7.170387620131993e-05,
"loss": 1.2654,
"step": 41
},
{
"epoch": 0.10436403168193818,
"grad_norm": 0.2552037537097931,
"learning_rate": 7.031319284987394e-05,
"loss": 1.3208,
"step": 42
},
{
"epoch": 0.1068488895791272,
"grad_norm": 0.2989708483219147,
"learning_rate": 6.890576474687263e-05,
"loss": 1.3169,
"step": 43
},
{
"epoch": 0.1093337474763162,
"grad_norm": 0.38415053486824036,
"learning_rate": 6.7483130894283e-05,
"loss": 1.3684,
"step": 44
},
{
"epoch": 0.1118186053735052,
"grad_norm": 0.40901339054107666,
"learning_rate": 6.604684692133597e-05,
"loss": 1.464,
"step": 45
},
{
"epoch": 0.11430346327069421,
"grad_norm": 0.5247498750686646,
"learning_rate": 6.459848338346861e-05,
"loss": 1.5311,
"step": 46
},
{
"epoch": 0.11678832116788321,
"grad_norm": 0.756410539150238,
"learning_rate": 6.313962404494496e-05,
"loss": 1.6284,
"step": 47
},
{
"epoch": 0.11927317906507222,
"grad_norm": 0.9791819453239441,
"learning_rate": 6.167186414703289e-05,
"loss": 1.6159,
"step": 48
},
{
"epoch": 0.12175803696226122,
"grad_norm": 1.455810546875,
"learning_rate": 6.019680866363139e-05,
"loss": 1.939,
"step": 49
},
{
"epoch": 0.12424289485945023,
"grad_norm": 2.2369606494903564,
"learning_rate": 5.8716070546254966e-05,
"loss": 2.421,
"step": 50
},
{
"epoch": 0.12424289485945023,
"eval_loss": 1.4801632165908813,
"eval_runtime": 0.5016,
"eval_samples_per_second": 99.69,
"eval_steps_per_second": 25.919,
"step": 50
},
{
"epoch": 0.12672775275663922,
"grad_norm": 0.2692347466945648,
"learning_rate": 5.7231268960295e-05,
"loss": 1.1845,
"step": 51
},
{
"epoch": 0.12921261065382825,
"grad_norm": 0.2334340512752533,
"learning_rate": 5.574402751448614e-05,
"loss": 1.2396,
"step": 52
},
{
"epoch": 0.13169746855101724,
"grad_norm": 0.23105919361114502,
"learning_rate": 5.425597248551387e-05,
"loss": 1.1817,
"step": 53
},
{
"epoch": 0.13418232644820624,
"grad_norm": 0.2562418580055237,
"learning_rate": 5.2768731039705e-05,
"loss": 1.2478,
"step": 54
},
{
"epoch": 0.13666718434539524,
"grad_norm": 0.2922269403934479,
"learning_rate": 5.128392945374505e-05,
"loss": 1.3124,
"step": 55
},
{
"epoch": 0.13915204224258426,
"grad_norm": 0.3430224061012268,
"learning_rate": 4.980319133636863e-05,
"loss": 1.3298,
"step": 56
},
{
"epoch": 0.14163690013977326,
"grad_norm": 0.37545251846313477,
"learning_rate": 4.83281358529671e-05,
"loss": 1.3167,
"step": 57
},
{
"epoch": 0.14412175803696226,
"grad_norm": 0.4372154474258423,
"learning_rate": 4.686037595505507e-05,
"loss": 1.5611,
"step": 58
},
{
"epoch": 0.14660661593415125,
"grad_norm": 0.675260603427887,
"learning_rate": 4.54015166165314e-05,
"loss": 1.5241,
"step": 59
},
{
"epoch": 0.14909147383134028,
"grad_norm": 0.8273709416389465,
"learning_rate": 4.395315307866405e-05,
"loss": 1.5969,
"step": 60
},
{
"epoch": 0.15157633172852927,
"grad_norm": 1.0075353384017944,
"learning_rate": 4.2516869105717004e-05,
"loss": 1.7115,
"step": 61
},
{
"epoch": 0.15406118962571827,
"grad_norm": 1.6686378717422485,
"learning_rate": 4.109423525312738e-05,
"loss": 1.9804,
"step": 62
},
{
"epoch": 0.1565460475229073,
"grad_norm": 0.19904345273971558,
"learning_rate": 3.968680715012606e-05,
"loss": 1.1648,
"step": 63
},
{
"epoch": 0.1590309054200963,
"grad_norm": 0.1799841821193695,
"learning_rate": 3.829612379868006e-05,
"loss": 1.1689,
"step": 64
},
{
"epoch": 0.1615157633172853,
"grad_norm": 0.19480910897254944,
"learning_rate": 3.692370589061639e-05,
"loss": 1.1933,
"step": 65
},
{
"epoch": 0.16400062121447428,
"grad_norm": 0.19751323759555817,
"learning_rate": 3.557105414476782e-05,
"loss": 1.2421,
"step": 66
},
{
"epoch": 0.1664854791116633,
"grad_norm": 0.21054387092590332,
"learning_rate": 3.423964766595906e-05,
"loss": 1.2749,
"step": 67
},
{
"epoch": 0.1689703370088523,
"grad_norm": 0.25906527042388916,
"learning_rate": 3.293094232762715e-05,
"loss": 1.3187,
"step": 68
},
{
"epoch": 0.1714551949060413,
"grad_norm": 0.3064195513725281,
"learning_rate": 3.164636917984534e-05,
"loss": 1.3584,
"step": 69
},
{
"epoch": 0.17394005280323033,
"grad_norm": 0.38761624693870544,
"learning_rate": 3.0387332884490805e-05,
"loss": 1.4219,
"step": 70
},
{
"epoch": 0.17642491070041932,
"grad_norm": 0.4902038276195526,
"learning_rate": 2.9155210179267546e-05,
"loss": 1.4855,
"step": 71
},
{
"epoch": 0.17890976859760832,
"grad_norm": 0.674696147441864,
"learning_rate": 2.7951348372263875e-05,
"loss": 1.611,
"step": 72
},
{
"epoch": 0.18139462649479732,
"grad_norm": 1.0593940019607544,
"learning_rate": 2.677706386869083e-05,
"loss": 1.71,
"step": 73
},
{
"epoch": 0.18387948439198634,
"grad_norm": 1.1859633922576904,
"learning_rate": 2.5633640731412412e-05,
"loss": 1.6977,
"step": 74
},
{
"epoch": 0.18636434228917534,
"grad_norm": 2.17313551902771,
"learning_rate": 2.4522329276841663e-05,
"loss": 2.2829,
"step": 75
},
{
"epoch": 0.18636434228917534,
"eval_loss": 1.4373173713684082,
"eval_runtime": 0.5019,
"eval_samples_per_second": 99.622,
"eval_steps_per_second": 25.902,
"step": 75
},
{
"epoch": 0.18884920018636434,
"grad_norm": 0.15144409239292145,
"learning_rate": 2.3444344707738015e-05,
"loss": 1.1409,
"step": 76
},
{
"epoch": 0.19133405808355336,
"grad_norm": 0.1701895296573639,
"learning_rate": 2.2400865784401e-05,
"loss": 1.2128,
"step": 77
},
{
"epoch": 0.19381891598074236,
"grad_norm": 0.1913847178220749,
"learning_rate": 2.1393033535713093e-05,
"loss": 1.2066,
"step": 78
},
{
"epoch": 0.19630377387793135,
"grad_norm": 0.209688201546669,
"learning_rate": 2.0421950011441354e-05,
"loss": 1.2567,
"step": 79
},
{
"epoch": 0.19878863177512035,
"grad_norm": 0.2626892328262329,
"learning_rate": 1.9488677077162295e-05,
"loss": 1.2534,
"step": 80
},
{
"epoch": 0.20127348967230937,
"grad_norm": 0.28468379378318787,
"learning_rate": 1.8594235253127375e-05,
"loss": 1.3711,
"step": 81
},
{
"epoch": 0.20375834756949837,
"grad_norm": 0.4000290036201477,
"learning_rate": 1.77396025983391e-05,
"loss": 1.3722,
"step": 82
},
{
"epoch": 0.20624320546668737,
"grad_norm": 0.5061511397361755,
"learning_rate": 1.6925713641057904e-05,
"loss": 1.5405,
"step": 83
},
{
"epoch": 0.20872806336387636,
"grad_norm": 0.615593671798706,
"learning_rate": 1.6153458356909176e-05,
"loss": 1.5413,
"step": 84
},
{
"epoch": 0.2112129212610654,
"grad_norm": 0.8124218583106995,
"learning_rate": 1.5423681195707997e-05,
"loss": 1.5224,
"step": 85
},
{
"epoch": 0.2136977791582544,
"grad_norm": 0.9472981691360474,
"learning_rate": 1.4737180158065644e-05,
"loss": 1.6382,
"step": 86
},
{
"epoch": 0.21618263705544338,
"grad_norm": 1.6688803434371948,
"learning_rate": 1.4094705922787687e-05,
"loss": 2.0258,
"step": 87
},
{
"epoch": 0.2186674949526324,
"grad_norm": 0.21361282467842102,
"learning_rate": 1.3496961026017687e-05,
"loss": 1.1669,
"step": 88
},
{
"epoch": 0.2211523528498214,
"grad_norm": 0.16145868599414825,
"learning_rate": 1.2944599093024267e-05,
"loss": 1.1676,
"step": 89
},
{
"epoch": 0.2236372107470104,
"grad_norm": 0.16978983581066132,
"learning_rate": 1.2438224123471442e-05,
"loss": 1.1627,
"step": 90
},
{
"epoch": 0.2261220686441994,
"grad_norm": 0.20442353188991547,
"learning_rate": 1.1978389830953907e-05,
"loss": 1.24,
"step": 91
},
{
"epoch": 0.22860692654138842,
"grad_norm": 0.20694726705551147,
"learning_rate": 1.1565599037519316e-05,
"loss": 1.2345,
"step": 92
},
{
"epoch": 0.23109178443857742,
"grad_norm": 0.26132482290267944,
"learning_rate": 1.1200303123839742e-05,
"loss": 1.2939,
"step": 93
},
{
"epoch": 0.23357664233576642,
"grad_norm": 0.2953537702560425,
"learning_rate": 1.088290153563358e-05,
"loss": 1.3983,
"step": 94
},
{
"epoch": 0.23606150023295544,
"grad_norm": 0.3531050980091095,
"learning_rate": 1.0613741346877497e-05,
"loss": 1.3998,
"step": 95
},
{
"epoch": 0.23854635813014444,
"grad_norm": 0.4567805230617523,
"learning_rate": 1.0393116880286118e-05,
"loss": 1.5085,
"step": 96
},
{
"epoch": 0.24103121602733343,
"grad_norm": 0.6118177771568298,
"learning_rate": 1.0221269385474488e-05,
"loss": 1.5085,
"step": 97
},
{
"epoch": 0.24351607392452243,
"grad_norm": 0.8797485828399658,
"learning_rate": 1.0098386775155147e-05,
"loss": 1.7005,
"step": 98
},
{
"epoch": 0.24600093182171145,
"grad_norm": 1.1694400310516357,
"learning_rate": 1.0024603419658329e-05,
"loss": 1.6887,
"step": 99
},
{
"epoch": 0.24848578971890045,
"grad_norm": 2.3600339889526367,
"learning_rate": 1e-05,
"loss": 2.1697,
"step": 100
},
{
"epoch": 0.24848578971890045,
"eval_loss": 1.425083041191101,
"eval_runtime": 0.5024,
"eval_samples_per_second": 99.514,
"eval_steps_per_second": 25.874,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.666832784162816e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}