dada22231's picture
Training in progress, step 95, checkpoint
ba60954 verified
{
"best_metric": 1.6852511167526245,
"best_model_checkpoint": "miner_id_24/checkpoint-75",
"epoch": 1.300256629597947,
"eval_steps": 25,
"global_step": 95,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.013686911890504704,
"grad_norm": 3.500505208969116,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.6238,
"step": 1
},
{
"epoch": 0.013686911890504704,
"eval_loss": 2.510066032409668,
"eval_runtime": 1.4348,
"eval_samples_per_second": 34.848,
"eval_steps_per_second": 9.06,
"step": 1
},
{
"epoch": 0.02737382378100941,
"grad_norm": 3.837956428527832,
"learning_rate": 6.666666666666667e-05,
"loss": 2.5922,
"step": 2
},
{
"epoch": 0.041060735671514116,
"grad_norm": 3.278623342514038,
"learning_rate": 0.0001,
"loss": 2.5207,
"step": 3
},
{
"epoch": 0.05474764756201882,
"grad_norm": 2.177558183670044,
"learning_rate": 9.997376600647783e-05,
"loss": 2.3067,
"step": 4
},
{
"epoch": 0.06843455945252352,
"grad_norm": 1.2352246046066284,
"learning_rate": 9.989509461357426e-05,
"loss": 2.1573,
"step": 5
},
{
"epoch": 0.08212147134302823,
"grad_norm": 1.4600045680999756,
"learning_rate": 9.976407754861426e-05,
"loss": 2.1698,
"step": 6
},
{
"epoch": 0.09580838323353294,
"grad_norm": 1.4000813961029053,
"learning_rate": 9.958086757163489e-05,
"loss": 2.0715,
"step": 7
},
{
"epoch": 0.10949529512403763,
"grad_norm": 1.240330696105957,
"learning_rate": 9.934567829727386e-05,
"loss": 2.1,
"step": 8
},
{
"epoch": 0.12318220701454234,
"grad_norm": 1.0595756769180298,
"learning_rate": 9.905878394570453e-05,
"loss": 1.972,
"step": 9
},
{
"epoch": 0.13686911890504705,
"grad_norm": 1.0356624126434326,
"learning_rate": 9.872051902290737e-05,
"loss": 1.869,
"step": 10
},
{
"epoch": 0.15055603079555174,
"grad_norm": 1.1129814386367798,
"learning_rate": 9.833127793065098e-05,
"loss": 1.9284,
"step": 11
},
{
"epoch": 0.16424294268605646,
"grad_norm": 1.6165825128555298,
"learning_rate": 9.789151450663723e-05,
"loss": 1.9648,
"step": 12
},
{
"epoch": 0.17792985457656116,
"grad_norm": 1.1226437091827393,
"learning_rate": 9.740174149534693e-05,
"loss": 2.1428,
"step": 13
},
{
"epoch": 0.19161676646706588,
"grad_norm": 1.0472556352615356,
"learning_rate": 9.686252995020249e-05,
"loss": 2.1569,
"step": 14
},
{
"epoch": 0.20530367835757057,
"grad_norm": 0.9411678314208984,
"learning_rate": 9.627450856774539e-05,
"loss": 2.0623,
"step": 15
},
{
"epoch": 0.21899059024807527,
"grad_norm": 0.8103951811790466,
"learning_rate": 9.563836295460398e-05,
"loss": 1.9841,
"step": 16
},
{
"epoch": 0.23267750213858,
"grad_norm": 0.6999959349632263,
"learning_rate": 9.495483482810688e-05,
"loss": 1.9943,
"step": 17
},
{
"epoch": 0.24636441402908468,
"grad_norm": 0.6278564929962158,
"learning_rate": 9.422472115147382e-05,
"loss": 1.875,
"step": 18
},
{
"epoch": 0.2600513259195894,
"grad_norm": 0.7192858457565308,
"learning_rate": 9.3448873204592e-05,
"loss": 1.9008,
"step": 19
},
{
"epoch": 0.2737382378100941,
"grad_norm": 0.8828967809677124,
"learning_rate": 9.2628195591462e-05,
"loss": 1.888,
"step": 20
},
{
"epoch": 0.2874251497005988,
"grad_norm": 0.8793849349021912,
"learning_rate": 9.176364518546989e-05,
"loss": 1.8238,
"step": 21
},
{
"epoch": 0.3011120615911035,
"grad_norm": 0.9618884921073914,
"learning_rate": 9.08562300137157e-05,
"loss": 1.8574,
"step": 22
},
{
"epoch": 0.31479897348160824,
"grad_norm": 0.9185750484466553,
"learning_rate": 8.990700808169889e-05,
"loss": 1.7576,
"step": 23
},
{
"epoch": 0.32848588537211293,
"grad_norm": 1.0817373991012573,
"learning_rate": 8.891708613973126e-05,
"loss": 1.6355,
"step": 24
},
{
"epoch": 0.3421727972626176,
"grad_norm": 2.1657047271728516,
"learning_rate": 8.788761839251559e-05,
"loss": 1.7874,
"step": 25
},
{
"epoch": 0.3421727972626176,
"eval_loss": 1.7843682765960693,
"eval_runtime": 1.4813,
"eval_samples_per_second": 33.755,
"eval_steps_per_second": 8.776,
"step": 25
},
{
"epoch": 0.3558597091531223,
"grad_norm": 0.9993261098861694,
"learning_rate": 8.681980515339464e-05,
"loss": 2.0195,
"step": 26
},
{
"epoch": 0.369546621043627,
"grad_norm": 1.1792598962783813,
"learning_rate": 8.571489144483944e-05,
"loss": 1.9742,
"step": 27
},
{
"epoch": 0.38323353293413176,
"grad_norm": 1.1005427837371826,
"learning_rate": 8.457416554680877e-05,
"loss": 1.9183,
"step": 28
},
{
"epoch": 0.39692044482463645,
"grad_norm": 0.9050849676132202,
"learning_rate": 8.339895749467238e-05,
"loss": 1.8983,
"step": 29
},
{
"epoch": 0.41060735671514115,
"grad_norm": 0.7925953269004822,
"learning_rate": 8.219063752844926e-05,
"loss": 1.8989,
"step": 30
},
{
"epoch": 0.42429426860564584,
"grad_norm": 0.6936097741127014,
"learning_rate": 8.095061449516903e-05,
"loss": 1.8831,
"step": 31
},
{
"epoch": 0.43798118049615054,
"grad_norm": 0.7707492709159851,
"learning_rate": 7.968033420621935e-05,
"loss": 1.8627,
"step": 32
},
{
"epoch": 0.4516680923866553,
"grad_norm": 0.9204763770103455,
"learning_rate": 7.838127775159452e-05,
"loss": 1.8111,
"step": 33
},
{
"epoch": 0.46535500427716,
"grad_norm": 0.8897156715393066,
"learning_rate": 7.705495977301078e-05,
"loss": 1.7376,
"step": 34
},
{
"epoch": 0.47904191616766467,
"grad_norm": 0.9612038135528564,
"learning_rate": 7.570292669790186e-05,
"loss": 1.7255,
"step": 35
},
{
"epoch": 0.49272882805816937,
"grad_norm": 1.1348515748977661,
"learning_rate": 7.43267549363537e-05,
"loss": 1.6393,
"step": 36
},
{
"epoch": 0.5064157399486741,
"grad_norm": 1.6333129405975342,
"learning_rate": 7.292804904308087e-05,
"loss": 1.7315,
"step": 37
},
{
"epoch": 0.5201026518391788,
"grad_norm": 0.8447318077087402,
"learning_rate": 7.150843984658754e-05,
"loss": 1.9327,
"step": 38
},
{
"epoch": 0.5337895637296834,
"grad_norm": 0.766656756401062,
"learning_rate": 7.006958254769438e-05,
"loss": 1.9587,
"step": 39
},
{
"epoch": 0.5474764756201882,
"grad_norm": 0.7099741101264954,
"learning_rate": 6.861315478964841e-05,
"loss": 1.8917,
"step": 40
},
{
"epoch": 0.561163387510693,
"grad_norm": 0.7500272393226624,
"learning_rate": 6.714085470206609e-05,
"loss": 1.9018,
"step": 41
},
{
"epoch": 0.5748502994011976,
"grad_norm": 0.6390820145606995,
"learning_rate": 6.56543989209901e-05,
"loss": 1.8825,
"step": 42
},
{
"epoch": 0.5885372112917023,
"grad_norm": 0.6714113354682922,
"learning_rate": 6.415552058736854e-05,
"loss": 1.8106,
"step": 43
},
{
"epoch": 0.602224123182207,
"grad_norm": 0.7266939282417297,
"learning_rate": 6.264596732629e-05,
"loss": 1.8261,
"step": 44
},
{
"epoch": 0.6159110350727117,
"grad_norm": 0.8883388638496399,
"learning_rate": 6.112749920933111e-05,
"loss": 1.7769,
"step": 45
},
{
"epoch": 0.6295979469632165,
"grad_norm": 0.8744785785675049,
"learning_rate": 5.960188670239154e-05,
"loss": 1.8136,
"step": 46
},
{
"epoch": 0.6432848588537211,
"grad_norm": 0.8936694860458374,
"learning_rate": 5.80709086014102e-05,
"loss": 1.7252,
"step": 47
},
{
"epoch": 0.6569717707442259,
"grad_norm": 0.9276703596115112,
"learning_rate": 5.653634995836856e-05,
"loss": 1.7283,
"step": 48
},
{
"epoch": 0.6706586826347305,
"grad_norm": 1.1337720155715942,
"learning_rate": 5.500000000000001e-05,
"loss": 1.664,
"step": 49
},
{
"epoch": 0.6843455945252352,
"grad_norm": 1.886711597442627,
"learning_rate": 5.346365004163145e-05,
"loss": 1.7076,
"step": 50
},
{
"epoch": 0.6843455945252352,
"eval_loss": 1.7058221101760864,
"eval_runtime": 1.4721,
"eval_samples_per_second": 33.966,
"eval_steps_per_second": 8.831,
"step": 50
},
{
"epoch": 0.69803250641574,
"grad_norm": 0.6420477628707886,
"learning_rate": 5.192909139858981e-05,
"loss": 1.9832,
"step": 51
},
{
"epoch": 0.7117194183062446,
"grad_norm": 0.6704874634742737,
"learning_rate": 5.0398113297608465e-05,
"loss": 1.962,
"step": 52
},
{
"epoch": 0.7254063301967494,
"grad_norm": 0.6822187304496765,
"learning_rate": 4.887250079066892e-05,
"loss": 1.8799,
"step": 53
},
{
"epoch": 0.739093242087254,
"grad_norm": 0.6840056777000427,
"learning_rate": 4.7354032673710005e-05,
"loss": 1.8171,
"step": 54
},
{
"epoch": 0.7527801539777588,
"grad_norm": 0.7427605390548706,
"learning_rate": 4.584447941263149e-05,
"loss": 1.8624,
"step": 55
},
{
"epoch": 0.7664670658682635,
"grad_norm": 0.6732311844825745,
"learning_rate": 4.43456010790099e-05,
"loss": 1.79,
"step": 56
},
{
"epoch": 0.7801539777587682,
"grad_norm": 0.7461451292037964,
"learning_rate": 4.285914529793391e-05,
"loss": 1.7852,
"step": 57
},
{
"epoch": 0.7938408896492729,
"grad_norm": 0.768389880657196,
"learning_rate": 4.13868452103516e-05,
"loss": 1.7806,
"step": 58
},
{
"epoch": 0.8075278015397775,
"grad_norm": 0.7873702049255371,
"learning_rate": 3.9930417452305626e-05,
"loss": 1.7472,
"step": 59
},
{
"epoch": 0.8212147134302823,
"grad_norm": 0.9594922661781311,
"learning_rate": 3.8491560153412466e-05,
"loss": 1.6708,
"step": 60
},
{
"epoch": 0.834901625320787,
"grad_norm": 1.1094688177108765,
"learning_rate": 3.707195095691913e-05,
"loss": 1.603,
"step": 61
},
{
"epoch": 0.8485885372112917,
"grad_norm": 1.5007215738296509,
"learning_rate": 3.567324506364632e-05,
"loss": 1.5338,
"step": 62
},
{
"epoch": 0.8622754491017964,
"grad_norm": 0.779221773147583,
"learning_rate": 3.4297073302098156e-05,
"loss": 1.9614,
"step": 63
},
{
"epoch": 0.8759623609923011,
"grad_norm": 0.6528869271278381,
"learning_rate": 3.2945040226989244e-05,
"loss": 1.9508,
"step": 64
},
{
"epoch": 0.8896492728828058,
"grad_norm": 0.652769923210144,
"learning_rate": 3.16187222484055e-05,
"loss": 1.8841,
"step": 65
},
{
"epoch": 0.9033361847733106,
"grad_norm": 0.6397238373756409,
"learning_rate": 3.0319665793780648e-05,
"loss": 1.8114,
"step": 66
},
{
"epoch": 0.9170230966638152,
"grad_norm": 0.704137921333313,
"learning_rate": 2.9049385504830985e-05,
"loss": 1.7991,
"step": 67
},
{
"epoch": 0.93071000855432,
"grad_norm": 0.7300670742988586,
"learning_rate": 2.7809362471550748e-05,
"loss": 1.7123,
"step": 68
},
{
"epoch": 0.9443969204448246,
"grad_norm": 0.7672079205513,
"learning_rate": 2.660104250532764e-05,
"loss": 1.758,
"step": 69
},
{
"epoch": 0.9580838323353293,
"grad_norm": 0.8238626718521118,
"learning_rate": 2.5425834453191232e-05,
"loss": 1.6884,
"step": 70
},
{
"epoch": 0.9717707442258341,
"grad_norm": 0.9403167366981506,
"learning_rate": 2.4285108555160577e-05,
"loss": 1.7006,
"step": 71
},
{
"epoch": 0.9854576561163387,
"grad_norm": 1.0409222841262817,
"learning_rate": 2.3180194846605367e-05,
"loss": 1.633,
"step": 72
},
{
"epoch": 0.9991445680068435,
"grad_norm": 1.7192370891571045,
"learning_rate": 2.2112381607484417e-05,
"loss": 1.5341,
"step": 73
},
{
"epoch": 1.0128314798973481,
"grad_norm": 6.818806171417236,
"learning_rate": 2.1082913860268765e-05,
"loss": 3.8448,
"step": 74
},
{
"epoch": 1.0265183917878529,
"grad_norm": 0.607759952545166,
"learning_rate": 2.0092991918301108e-05,
"loss": 1.8502,
"step": 75
},
{
"epoch": 1.0265183917878529,
"eval_loss": 1.6852511167526245,
"eval_runtime": 1.4684,
"eval_samples_per_second": 34.051,
"eval_steps_per_second": 8.853,
"step": 75
},
{
"epoch": 1.0402053036783576,
"grad_norm": 0.6126305460929871,
"learning_rate": 1.91437699862843e-05,
"loss": 1.7548,
"step": 76
},
{
"epoch": 1.0538922155688624,
"grad_norm": 0.6389468312263489,
"learning_rate": 1.8236354814530112e-05,
"loss": 1.8156,
"step": 77
},
{
"epoch": 1.067579127459367,
"grad_norm": 0.6474320888519287,
"learning_rate": 1.7371804408538024e-05,
"loss": 1.7414,
"step": 78
},
{
"epoch": 1.0812660393498716,
"grad_norm": 0.6839907169342041,
"learning_rate": 1.6551126795408016e-05,
"loss": 1.6932,
"step": 79
},
{
"epoch": 1.0949529512403764,
"grad_norm": 0.6910409331321716,
"learning_rate": 1.577527884852619e-05,
"loss": 1.6342,
"step": 80
},
{
"epoch": 1.1086398631308811,
"grad_norm": 0.736857533454895,
"learning_rate": 1.5045165171893116e-05,
"loss": 1.6396,
"step": 81
},
{
"epoch": 1.122326775021386,
"grad_norm": 0.7820208072662354,
"learning_rate": 1.4361637045396029e-05,
"loss": 1.6122,
"step": 82
},
{
"epoch": 1.1360136869118904,
"grad_norm": 0.8468031287193298,
"learning_rate": 1.3725491432254624e-05,
"loss": 1.5167,
"step": 83
},
{
"epoch": 1.1497005988023952,
"grad_norm": 0.8978099822998047,
"learning_rate": 1.313747004979751e-05,
"loss": 1.4598,
"step": 84
},
{
"epoch": 1.1633875106929,
"grad_norm": 1.1949200630187988,
"learning_rate": 1.2598258504653081e-05,
"loss": 1.3726,
"step": 85
},
{
"epoch": 1.1770744225834047,
"grad_norm": 0.7065590023994446,
"learning_rate": 1.2108485493362765e-05,
"loss": 1.7266,
"step": 86
},
{
"epoch": 1.1907613344739094,
"grad_norm": 0.6318903565406799,
"learning_rate": 1.1668722069349041e-05,
"loss": 1.9521,
"step": 87
},
{
"epoch": 1.204448246364414,
"grad_norm": 0.6263776421546936,
"learning_rate": 1.1279480977092635e-05,
"loss": 1.78,
"step": 88
},
{
"epoch": 1.2181351582549187,
"grad_norm": 0.6598425507545471,
"learning_rate": 1.094121605429547e-05,
"loss": 1.7473,
"step": 89
},
{
"epoch": 1.2318220701454234,
"grad_norm": 0.6533094644546509,
"learning_rate": 1.0654321702726141e-05,
"loss": 1.7707,
"step": 90
},
{
"epoch": 1.2455089820359282,
"grad_norm": 0.6790942549705505,
"learning_rate": 1.0419132428365116e-05,
"loss": 1.6519,
"step": 91
},
{
"epoch": 1.259195893926433,
"grad_norm": 0.7855159640312195,
"learning_rate": 1.0235922451385733e-05,
"loss": 1.6926,
"step": 92
},
{
"epoch": 1.2728828058169377,
"grad_norm": 0.8199930787086487,
"learning_rate": 1.0104905386425733e-05,
"loss": 1.703,
"step": 93
},
{
"epoch": 1.2865697177074422,
"grad_norm": 0.8154268860816956,
"learning_rate": 1.002623399352217e-05,
"loss": 1.5764,
"step": 94
},
{
"epoch": 1.300256629597947,
"grad_norm": 0.8530352711677551,
"learning_rate": 1e-05,
"loss": 1.5592,
"step": 95
}
],
"logging_steps": 1,
"max_steps": 95,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.0199811823108096e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}