GaetanMichelet's picture
Model save
4dbaa51 verified
raw
history blame
46.1 kB
{
"best_metric": 0.48128727078437805,
"best_model_checkpoint": "data/Llama-31-8B_task-3_180-samples_config-3/checkpoint-340",
"epoch": 27.0,
"eval_steps": 500,
"global_step": 459,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.058823529411764705,
"grad_norm": 2.980715036392212,
"learning_rate": 3.9215686274509804e-08,
"loss": 2.6305,
"step": 1
},
{
"epoch": 0.11764705882352941,
"grad_norm": 1.5815191268920898,
"learning_rate": 7.843137254901961e-08,
"loss": 2.0227,
"step": 2
},
{
"epoch": 0.23529411764705882,
"grad_norm": 3.0497844219207764,
"learning_rate": 1.5686274509803921e-07,
"loss": 2.8101,
"step": 4
},
{
"epoch": 0.35294117647058826,
"grad_norm": 1.5842593908309937,
"learning_rate": 2.3529411764705883e-07,
"loss": 2.5918,
"step": 6
},
{
"epoch": 0.47058823529411764,
"grad_norm": 2.9371345043182373,
"learning_rate": 3.1372549019607843e-07,
"loss": 2.3864,
"step": 8
},
{
"epoch": 0.5882352941176471,
"grad_norm": 2.9547200202941895,
"learning_rate": 3.921568627450981e-07,
"loss": 2.6168,
"step": 10
},
{
"epoch": 0.7058823529411765,
"grad_norm": 2.5302109718322754,
"learning_rate": 4.7058823529411767e-07,
"loss": 2.2036,
"step": 12
},
{
"epoch": 0.8235294117647058,
"grad_norm": 3.466365337371826,
"learning_rate": 5.490196078431373e-07,
"loss": 2.7139,
"step": 14
},
{
"epoch": 0.9411764705882353,
"grad_norm": 1.7582124471664429,
"learning_rate": 6.274509803921569e-07,
"loss": 2.3708,
"step": 16
},
{
"epoch": 1.0,
"eval_loss": 2.4981932640075684,
"eval_runtime": 31.8694,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 17
},
{
"epoch": 1.0588235294117647,
"grad_norm": 1.3897138833999634,
"learning_rate": 7.058823529411766e-07,
"loss": 2.2296,
"step": 18
},
{
"epoch": 1.1764705882352942,
"grad_norm": 2.683563470840454,
"learning_rate": 7.843137254901962e-07,
"loss": 2.5726,
"step": 20
},
{
"epoch": 1.2941176470588236,
"grad_norm": 2.160468816757202,
"learning_rate": 8.627450980392157e-07,
"loss": 2.4421,
"step": 22
},
{
"epoch": 1.4117647058823528,
"grad_norm": 3.092349052429199,
"learning_rate": 9.411764705882353e-07,
"loss": 2.7441,
"step": 24
},
{
"epoch": 1.5294117647058822,
"grad_norm": 3.0546014308929443,
"learning_rate": 1.019607843137255e-06,
"loss": 2.6233,
"step": 26
},
{
"epoch": 1.6470588235294117,
"grad_norm": 2.6419599056243896,
"learning_rate": 1.0980392156862745e-06,
"loss": 2.6529,
"step": 28
},
{
"epoch": 1.7647058823529411,
"grad_norm": 1.2905603647232056,
"learning_rate": 1.1764705882352942e-06,
"loss": 2.1026,
"step": 30
},
{
"epoch": 1.8823529411764706,
"grad_norm": 2.6626884937286377,
"learning_rate": 1.2549019607843137e-06,
"loss": 2.5973,
"step": 32
},
{
"epoch": 2.0,
"grad_norm": 3.445452928543091,
"learning_rate": 1.3333333333333334e-06,
"loss": 2.4065,
"step": 34
},
{
"epoch": 2.0,
"eval_loss": 2.439713954925537,
"eval_runtime": 31.87,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 34
},
{
"epoch": 2.1176470588235294,
"grad_norm": 2.4814882278442383,
"learning_rate": 1.4117647058823531e-06,
"loss": 2.3601,
"step": 36
},
{
"epoch": 2.235294117647059,
"grad_norm": 3.0113446712493896,
"learning_rate": 1.4901960784313726e-06,
"loss": 2.3377,
"step": 38
},
{
"epoch": 2.3529411764705883,
"grad_norm": 2.5856285095214844,
"learning_rate": 1.5686274509803923e-06,
"loss": 2.24,
"step": 40
},
{
"epoch": 2.4705882352941178,
"grad_norm": 3.1034915447235107,
"learning_rate": 1.6470588235294118e-06,
"loss": 2.5231,
"step": 42
},
{
"epoch": 2.588235294117647,
"grad_norm": 3.499271869659424,
"learning_rate": 1.7254901960784315e-06,
"loss": 2.4463,
"step": 44
},
{
"epoch": 2.7058823529411766,
"grad_norm": 2.990689992904663,
"learning_rate": 1.8039215686274512e-06,
"loss": 2.4596,
"step": 46
},
{
"epoch": 2.8235294117647056,
"grad_norm": 2.712009906768799,
"learning_rate": 1.8823529411764707e-06,
"loss": 2.2987,
"step": 48
},
{
"epoch": 2.9411764705882355,
"grad_norm": 3.5523183345794678,
"learning_rate": 1.96078431372549e-06,
"loss": 2.3549,
"step": 50
},
{
"epoch": 3.0,
"eval_loss": 2.31467604637146,
"eval_runtime": 31.8743,
"eval_samples_per_second": 1.129,
"eval_steps_per_second": 1.129,
"step": 51
},
{
"epoch": 3.0588235294117645,
"grad_norm": 3.0498082637786865,
"learning_rate": 2.03921568627451e-06,
"loss": 2.4567,
"step": 52
},
{
"epoch": 3.176470588235294,
"grad_norm": 2.239677906036377,
"learning_rate": 2.1176470588235296e-06,
"loss": 2.0235,
"step": 54
},
{
"epoch": 3.2941176470588234,
"grad_norm": 2.1120095252990723,
"learning_rate": 2.196078431372549e-06,
"loss": 2.2331,
"step": 56
},
{
"epoch": 3.411764705882353,
"grad_norm": 3.549705743789673,
"learning_rate": 2.274509803921569e-06,
"loss": 2.1111,
"step": 58
},
{
"epoch": 3.5294117647058822,
"grad_norm": 4.140646934509277,
"learning_rate": 2.3529411764705885e-06,
"loss": 2.4917,
"step": 60
},
{
"epoch": 3.6470588235294117,
"grad_norm": 3.1374101638793945,
"learning_rate": 2.431372549019608e-06,
"loss": 2.2287,
"step": 62
},
{
"epoch": 3.764705882352941,
"grad_norm": 2.198146104812622,
"learning_rate": 2.5098039215686274e-06,
"loss": 1.9659,
"step": 64
},
{
"epoch": 3.8823529411764706,
"grad_norm": 2.7118916511535645,
"learning_rate": 2.5882352941176473e-06,
"loss": 2.3314,
"step": 66
},
{
"epoch": 4.0,
"grad_norm": 3.456117868423462,
"learning_rate": 2.666666666666667e-06,
"loss": 2.0578,
"step": 68
},
{
"epoch": 4.0,
"eval_loss": 2.085047483444214,
"eval_runtime": 31.8792,
"eval_samples_per_second": 1.129,
"eval_steps_per_second": 1.129,
"step": 68
},
{
"epoch": 4.117647058823529,
"grad_norm": 2.2668004035949707,
"learning_rate": 2.7450980392156867e-06,
"loss": 2.1099,
"step": 70
},
{
"epoch": 4.235294117647059,
"grad_norm": 2.0983450412750244,
"learning_rate": 2.8235294117647062e-06,
"loss": 2.0241,
"step": 72
},
{
"epoch": 4.352941176470588,
"grad_norm": 1.6723711490631104,
"learning_rate": 2.901960784313726e-06,
"loss": 1.8905,
"step": 74
},
{
"epoch": 4.470588235294118,
"grad_norm": 3.0844905376434326,
"learning_rate": 2.980392156862745e-06,
"loss": 1.8637,
"step": 76
},
{
"epoch": 4.588235294117647,
"grad_norm": 3.0651304721832275,
"learning_rate": 3.058823529411765e-06,
"loss": 1.8883,
"step": 78
},
{
"epoch": 4.705882352941177,
"grad_norm": 2.8657171726226807,
"learning_rate": 3.1372549019607846e-06,
"loss": 1.8309,
"step": 80
},
{
"epoch": 4.823529411764706,
"grad_norm": 2.580749750137329,
"learning_rate": 3.2156862745098045e-06,
"loss": 1.7815,
"step": 82
},
{
"epoch": 4.9411764705882355,
"grad_norm": 2.4597244262695312,
"learning_rate": 3.2941176470588236e-06,
"loss": 1.8089,
"step": 84
},
{
"epoch": 5.0,
"eval_loss": 1.7080037593841553,
"eval_runtime": 31.8717,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 85
},
{
"epoch": 5.0588235294117645,
"grad_norm": 1.954211950302124,
"learning_rate": 3.3725490196078435e-06,
"loss": 1.6625,
"step": 86
},
{
"epoch": 5.176470588235294,
"grad_norm": 1.9714858531951904,
"learning_rate": 3.450980392156863e-06,
"loss": 1.5611,
"step": 88
},
{
"epoch": 5.294117647058823,
"grad_norm": 1.9127044677734375,
"learning_rate": 3.529411764705883e-06,
"loss": 1.5702,
"step": 90
},
{
"epoch": 5.411764705882353,
"grad_norm": 4.0653977394104,
"learning_rate": 3.6078431372549024e-06,
"loss": 1.5627,
"step": 92
},
{
"epoch": 5.529411764705882,
"grad_norm": 1.8669339418411255,
"learning_rate": 3.6862745098039223e-06,
"loss": 1.4645,
"step": 94
},
{
"epoch": 5.647058823529412,
"grad_norm": 2.6541359424591064,
"learning_rate": 3.7647058823529414e-06,
"loss": 1.3323,
"step": 96
},
{
"epoch": 5.764705882352941,
"grad_norm": 1.7167555093765259,
"learning_rate": 3.843137254901962e-06,
"loss": 1.3833,
"step": 98
},
{
"epoch": 5.882352941176471,
"grad_norm": 2.953925132751465,
"learning_rate": 3.92156862745098e-06,
"loss": 1.333,
"step": 100
},
{
"epoch": 6.0,
"grad_norm": 1.2611362934112549,
"learning_rate": 4.000000000000001e-06,
"loss": 1.3018,
"step": 102
},
{
"epoch": 6.0,
"eval_loss": 1.2346911430358887,
"eval_runtime": 31.8703,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 102
},
{
"epoch": 6.117647058823529,
"grad_norm": 1.9918830394744873,
"learning_rate": 4.07843137254902e-06,
"loss": 1.1196,
"step": 104
},
{
"epoch": 6.235294117647059,
"grad_norm": 1.8971362113952637,
"learning_rate": 4.15686274509804e-06,
"loss": 1.1518,
"step": 106
},
{
"epoch": 6.352941176470588,
"grad_norm": 2.2451581954956055,
"learning_rate": 4.235294117647059e-06,
"loss": 1.0684,
"step": 108
},
{
"epoch": 6.470588235294118,
"grad_norm": 2.1816394329071045,
"learning_rate": 4.313725490196079e-06,
"loss": 0.9634,
"step": 110
},
{
"epoch": 6.588235294117647,
"grad_norm": 1.7623378038406372,
"learning_rate": 4.392156862745098e-06,
"loss": 0.9115,
"step": 112
},
{
"epoch": 6.705882352941177,
"grad_norm": 2.0872268676757812,
"learning_rate": 4.4705882352941184e-06,
"loss": 0.7509,
"step": 114
},
{
"epoch": 6.823529411764706,
"grad_norm": 1.5808390378952026,
"learning_rate": 4.549019607843138e-06,
"loss": 0.8618,
"step": 116
},
{
"epoch": 6.9411764705882355,
"grad_norm": 1.1898610591888428,
"learning_rate": 4.627450980392157e-06,
"loss": 1.0212,
"step": 118
},
{
"epoch": 7.0,
"eval_loss": 0.8016352653503418,
"eval_runtime": 31.8718,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 119
},
{
"epoch": 7.0588235294117645,
"grad_norm": 1.153451919555664,
"learning_rate": 4.705882352941177e-06,
"loss": 0.9469,
"step": 120
},
{
"epoch": 7.176470588235294,
"grad_norm": 1.1448363065719604,
"learning_rate": 4.784313725490196e-06,
"loss": 0.9065,
"step": 122
},
{
"epoch": 7.294117647058823,
"grad_norm": 1.0351287126541138,
"learning_rate": 4.862745098039216e-06,
"loss": 0.6538,
"step": 124
},
{
"epoch": 7.411764705882353,
"grad_norm": 0.9266816973686218,
"learning_rate": 4.941176470588236e-06,
"loss": 0.7569,
"step": 126
},
{
"epoch": 7.529411764705882,
"grad_norm": 0.869621992111206,
"learning_rate": 5.019607843137255e-06,
"loss": 0.6096,
"step": 128
},
{
"epoch": 7.647058823529412,
"grad_norm": 0.8324370980262756,
"learning_rate": 5.098039215686274e-06,
"loss": 0.4993,
"step": 130
},
{
"epoch": 7.764705882352941,
"grad_norm": 0.8301700353622437,
"learning_rate": 5.176470588235295e-06,
"loss": 0.4293,
"step": 132
},
{
"epoch": 7.882352941176471,
"grad_norm": 0.3732304275035858,
"learning_rate": 5.254901960784314e-06,
"loss": 0.632,
"step": 134
},
{
"epoch": 8.0,
"grad_norm": 0.5899373888969421,
"learning_rate": 5.333333333333334e-06,
"loss": 0.4899,
"step": 136
},
{
"epoch": 8.0,
"eval_loss": 0.6475194096565247,
"eval_runtime": 31.8748,
"eval_samples_per_second": 1.129,
"eval_steps_per_second": 1.129,
"step": 136
},
{
"epoch": 8.117647058823529,
"grad_norm": 0.6668117642402649,
"learning_rate": 5.411764705882353e-06,
"loss": 0.468,
"step": 138
},
{
"epoch": 8.235294117647058,
"grad_norm": 1.0659523010253906,
"learning_rate": 5.4901960784313735e-06,
"loss": 0.6599,
"step": 140
},
{
"epoch": 8.352941176470589,
"grad_norm": 0.787837028503418,
"learning_rate": 5.568627450980393e-06,
"loss": 0.8816,
"step": 142
},
{
"epoch": 8.470588235294118,
"grad_norm": 0.9166098237037659,
"learning_rate": 5.6470588235294125e-06,
"loss": 0.4608,
"step": 144
},
{
"epoch": 8.588235294117647,
"grad_norm": 0.8993943929672241,
"learning_rate": 5.725490196078431e-06,
"loss": 0.6013,
"step": 146
},
{
"epoch": 8.705882352941176,
"grad_norm": 0.5819871425628662,
"learning_rate": 5.803921568627452e-06,
"loss": 0.2645,
"step": 148
},
{
"epoch": 8.823529411764707,
"grad_norm": 0.6202341318130493,
"learning_rate": 5.882352941176471e-06,
"loss": 0.5653,
"step": 150
},
{
"epoch": 8.941176470588236,
"grad_norm": 0.737612247467041,
"learning_rate": 5.96078431372549e-06,
"loss": 0.6106,
"step": 152
},
{
"epoch": 9.0,
"eval_loss": 0.5890491008758545,
"eval_runtime": 31.8673,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 153
},
{
"epoch": 9.058823529411764,
"grad_norm": 0.8201161623001099,
"learning_rate": 6.03921568627451e-06,
"loss": 0.556,
"step": 154
},
{
"epoch": 9.176470588235293,
"grad_norm": 0.5118715167045593,
"learning_rate": 6.11764705882353e-06,
"loss": 0.521,
"step": 156
},
{
"epoch": 9.294117647058824,
"grad_norm": 2.472463369369507,
"learning_rate": 6.19607843137255e-06,
"loss": 0.5874,
"step": 158
},
{
"epoch": 9.411764705882353,
"grad_norm": 0.6849164366722107,
"learning_rate": 6.274509803921569e-06,
"loss": 0.5052,
"step": 160
},
{
"epoch": 9.529411764705882,
"grad_norm": 0.5705221891403198,
"learning_rate": 6.352941176470589e-06,
"loss": 0.4448,
"step": 162
},
{
"epoch": 9.647058823529411,
"grad_norm": 0.9986915588378906,
"learning_rate": 6.431372549019609e-06,
"loss": 0.4622,
"step": 164
},
{
"epoch": 9.764705882352942,
"grad_norm": 0.5936263203620911,
"learning_rate": 6.5098039215686285e-06,
"loss": 0.5221,
"step": 166
},
{
"epoch": 9.882352941176471,
"grad_norm": 0.4011266231536865,
"learning_rate": 6.588235294117647e-06,
"loss": 0.3669,
"step": 168
},
{
"epoch": 10.0,
"grad_norm": 1.4033674001693726,
"learning_rate": 6.666666666666667e-06,
"loss": 0.5388,
"step": 170
},
{
"epoch": 10.0,
"eval_loss": 0.5729286670684814,
"eval_runtime": 31.8736,
"eval_samples_per_second": 1.129,
"eval_steps_per_second": 1.129,
"step": 170
},
{
"epoch": 10.117647058823529,
"grad_norm": 0.7268795371055603,
"learning_rate": 6.745098039215687e-06,
"loss": 0.4123,
"step": 172
},
{
"epoch": 10.235294117647058,
"grad_norm": 0.534287691116333,
"learning_rate": 6.8235294117647065e-06,
"loss": 0.3577,
"step": 174
},
{
"epoch": 10.352941176470589,
"grad_norm": 0.46323052048683167,
"learning_rate": 6.901960784313726e-06,
"loss": 0.3157,
"step": 176
},
{
"epoch": 10.470588235294118,
"grad_norm": 0.4801871180534363,
"learning_rate": 6.9803921568627454e-06,
"loss": 0.5767,
"step": 178
},
{
"epoch": 10.588235294117647,
"grad_norm": 0.47244688868522644,
"learning_rate": 7.058823529411766e-06,
"loss": 0.5139,
"step": 180
},
{
"epoch": 10.705882352941176,
"grad_norm": 0.41083067655563354,
"learning_rate": 7.137254901960785e-06,
"loss": 0.5551,
"step": 182
},
{
"epoch": 10.823529411764707,
"grad_norm": 0.6211682558059692,
"learning_rate": 7.215686274509805e-06,
"loss": 0.5971,
"step": 184
},
{
"epoch": 10.941176470588236,
"grad_norm": 0.4872354567050934,
"learning_rate": 7.294117647058823e-06,
"loss": 0.7245,
"step": 186
},
{
"epoch": 11.0,
"eval_loss": 0.5584969520568848,
"eval_runtime": 31.8629,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 187
},
{
"epoch": 11.058823529411764,
"grad_norm": 0.41652852296829224,
"learning_rate": 7.372549019607845e-06,
"loss": 0.3599,
"step": 188
},
{
"epoch": 11.176470588235293,
"grad_norm": 0.5096337795257568,
"learning_rate": 7.450980392156863e-06,
"loss": 0.5491,
"step": 190
},
{
"epoch": 11.294117647058824,
"grad_norm": 0.5131371021270752,
"learning_rate": 7.529411764705883e-06,
"loss": 0.6327,
"step": 192
},
{
"epoch": 11.411764705882353,
"grad_norm": 0.33539438247680664,
"learning_rate": 7.607843137254902e-06,
"loss": 0.4431,
"step": 194
},
{
"epoch": 11.529411764705882,
"grad_norm": 1.0883010625839233,
"learning_rate": 7.686274509803923e-06,
"loss": 0.5964,
"step": 196
},
{
"epoch": 11.647058823529411,
"grad_norm": 0.13403330743312836,
"learning_rate": 7.764705882352941e-06,
"loss": 0.1832,
"step": 198
},
{
"epoch": 11.764705882352942,
"grad_norm": 0.38453081250190735,
"learning_rate": 7.84313725490196e-06,
"loss": 0.5463,
"step": 200
},
{
"epoch": 11.882352941176471,
"grad_norm": 0.4325025677680969,
"learning_rate": 7.92156862745098e-06,
"loss": 0.3552,
"step": 202
},
{
"epoch": 12.0,
"grad_norm": 0.26013830304145813,
"learning_rate": 8.000000000000001e-06,
"loss": 0.3568,
"step": 204
},
{
"epoch": 12.0,
"eval_loss": 0.5533129572868347,
"eval_runtime": 31.8552,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 204
},
{
"epoch": 12.117647058823529,
"grad_norm": 0.5007168054580688,
"learning_rate": 8.07843137254902e-06,
"loss": 0.3615,
"step": 206
},
{
"epoch": 12.235294117647058,
"grad_norm": 0.4056757986545563,
"learning_rate": 8.15686274509804e-06,
"loss": 0.7152,
"step": 208
},
{
"epoch": 12.352941176470589,
"grad_norm": 0.2833678424358368,
"learning_rate": 8.23529411764706e-06,
"loss": 0.4289,
"step": 210
},
{
"epoch": 12.470588235294118,
"grad_norm": 0.3110792934894562,
"learning_rate": 8.31372549019608e-06,
"loss": 0.4519,
"step": 212
},
{
"epoch": 12.588235294117647,
"grad_norm": 0.3763163387775421,
"learning_rate": 8.392156862745099e-06,
"loss": 0.405,
"step": 214
},
{
"epoch": 12.705882352941176,
"grad_norm": 0.24176886677742004,
"learning_rate": 8.470588235294118e-06,
"loss": 0.4032,
"step": 216
},
{
"epoch": 12.823529411764707,
"grad_norm": 0.3594723343849182,
"learning_rate": 8.549019607843138e-06,
"loss": 0.3982,
"step": 218
},
{
"epoch": 12.941176470588236,
"grad_norm": 0.50929194688797,
"learning_rate": 8.627450980392157e-06,
"loss": 0.4165,
"step": 220
},
{
"epoch": 13.0,
"eval_loss": 0.5352616310119629,
"eval_runtime": 31.8489,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 221
},
{
"epoch": 13.058823529411764,
"grad_norm": 0.32427483797073364,
"learning_rate": 8.705882352941177e-06,
"loss": 0.49,
"step": 222
},
{
"epoch": 13.176470588235293,
"grad_norm": 0.24497142434120178,
"learning_rate": 8.784313725490196e-06,
"loss": 0.3279,
"step": 224
},
{
"epoch": 13.294117647058824,
"grad_norm": 0.34544771909713745,
"learning_rate": 8.862745098039216e-06,
"loss": 0.4668,
"step": 226
},
{
"epoch": 13.411764705882353,
"grad_norm": 0.2216784805059433,
"learning_rate": 8.941176470588237e-06,
"loss": 0.3077,
"step": 228
},
{
"epoch": 13.529411764705882,
"grad_norm": 0.2850175201892853,
"learning_rate": 9.019607843137256e-06,
"loss": 0.3767,
"step": 230
},
{
"epoch": 13.647058823529411,
"grad_norm": 0.4109225571155548,
"learning_rate": 9.098039215686276e-06,
"loss": 0.4402,
"step": 232
},
{
"epoch": 13.764705882352942,
"grad_norm": 0.24095743894577026,
"learning_rate": 9.176470588235294e-06,
"loss": 0.3823,
"step": 234
},
{
"epoch": 13.882352941176471,
"grad_norm": 0.5017974972724915,
"learning_rate": 9.254901960784315e-06,
"loss": 0.5895,
"step": 236
},
{
"epoch": 14.0,
"grad_norm": 0.45756474137306213,
"learning_rate": 9.333333333333334e-06,
"loss": 0.6226,
"step": 238
},
{
"epoch": 14.0,
"eval_loss": 0.5420221090316772,
"eval_runtime": 31.849,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 238
},
{
"epoch": 14.117647058823529,
"grad_norm": 0.4417901039123535,
"learning_rate": 9.411764705882354e-06,
"loss": 0.545,
"step": 240
},
{
"epoch": 14.235294117647058,
"grad_norm": 0.3756580054759979,
"learning_rate": 9.490196078431373e-06,
"loss": 0.4739,
"step": 242
},
{
"epoch": 14.352941176470589,
"grad_norm": 0.4785820543766022,
"learning_rate": 9.568627450980393e-06,
"loss": 0.4149,
"step": 244
},
{
"epoch": 14.470588235294118,
"grad_norm": 0.444455087184906,
"learning_rate": 9.647058823529412e-06,
"loss": 0.508,
"step": 246
},
{
"epoch": 14.588235294117647,
"grad_norm": 0.3459950387477875,
"learning_rate": 9.725490196078432e-06,
"loss": 0.3752,
"step": 248
},
{
"epoch": 14.705882352941176,
"grad_norm": 0.276597797870636,
"learning_rate": 9.803921568627451e-06,
"loss": 0.3527,
"step": 250
},
{
"epoch": 14.823529411764707,
"grad_norm": 0.31315046548843384,
"learning_rate": 9.882352941176472e-06,
"loss": 0.396,
"step": 252
},
{
"epoch": 14.941176470588236,
"grad_norm": 0.009531126357614994,
"learning_rate": 9.960784313725492e-06,
"loss": 0.3284,
"step": 254
},
{
"epoch": 15.0,
"eval_loss": 0.5025795102119446,
"eval_runtime": 31.8517,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 255
},
{
"epoch": 15.058823529411764,
"grad_norm": 0.5613787174224854,
"learning_rate": 9.999995315380667e-06,
"loss": 0.502,
"step": 256
},
{
"epoch": 15.176470588235293,
"grad_norm": 0.467040091753006,
"learning_rate": 9.99995783847866e-06,
"loss": 0.4182,
"step": 258
},
{
"epoch": 15.294117647058824,
"grad_norm": 0.41092485189437866,
"learning_rate": 9.999882884955554e-06,
"loss": 0.245,
"step": 260
},
{
"epoch": 15.411764705882353,
"grad_norm": 0.36648833751678467,
"learning_rate": 9.99977045537315e-06,
"loss": 0.3851,
"step": 262
},
{
"epoch": 15.529411764705882,
"grad_norm": 0.31992214918136597,
"learning_rate": 9.999620550574155e-06,
"loss": 0.4584,
"step": 264
},
{
"epoch": 15.647058823529411,
"grad_norm": 0.34673774242401123,
"learning_rate": 9.999433171682158e-06,
"loss": 0.3905,
"step": 266
},
{
"epoch": 15.764705882352942,
"grad_norm": 0.46017172932624817,
"learning_rate": 9.999208320101643e-06,
"loss": 0.4964,
"step": 268
},
{
"epoch": 15.882352941176471,
"grad_norm": 0.5312222838401794,
"learning_rate": 9.998945997517957e-06,
"loss": 0.3088,
"step": 270
},
{
"epoch": 16.0,
"grad_norm": 0.5483973026275635,
"learning_rate": 9.99864620589731e-06,
"loss": 0.4813,
"step": 272
},
{
"epoch": 16.0,
"eval_loss": 0.5214402079582214,
"eval_runtime": 31.8457,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 272
},
{
"epoch": 16.11764705882353,
"grad_norm": 0.40592437982559204,
"learning_rate": 9.998308947486753e-06,
"loss": 0.274,
"step": 274
},
{
"epoch": 16.235294117647058,
"grad_norm": 0.4894661009311676,
"learning_rate": 9.997934224814173e-06,
"loss": 0.361,
"step": 276
},
{
"epoch": 16.352941176470587,
"grad_norm": 0.5340806841850281,
"learning_rate": 9.997522040688258e-06,
"loss": 0.4403,
"step": 278
},
{
"epoch": 16.470588235294116,
"grad_norm": 0.6398447751998901,
"learning_rate": 9.997072398198492e-06,
"loss": 0.6416,
"step": 280
},
{
"epoch": 16.58823529411765,
"grad_norm": 0.4408299922943115,
"learning_rate": 9.996585300715117e-06,
"loss": 0.3814,
"step": 282
},
{
"epoch": 16.705882352941178,
"grad_norm": 0.45829370617866516,
"learning_rate": 9.996060751889114e-06,
"loss": 0.4494,
"step": 284
},
{
"epoch": 16.823529411764707,
"grad_norm": 0.5563424229621887,
"learning_rate": 9.995498755652186e-06,
"loss": 0.297,
"step": 286
},
{
"epoch": 16.941176470588236,
"grad_norm": 0.423210084438324,
"learning_rate": 9.994899316216709e-06,
"loss": 0.3015,
"step": 288
},
{
"epoch": 17.0,
"eval_loss": 0.511587381362915,
"eval_runtime": 31.8593,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 289
},
{
"epoch": 17.058823529411764,
"grad_norm": 0.46335744857788086,
"learning_rate": 9.994262438075713e-06,
"loss": 0.4301,
"step": 290
},
{
"epoch": 17.176470588235293,
"grad_norm": 0.5282275676727295,
"learning_rate": 9.993588126002848e-06,
"loss": 0.3083,
"step": 292
},
{
"epoch": 17.294117647058822,
"grad_norm": 0.6960487961769104,
"learning_rate": 9.992876385052346e-06,
"loss": 0.5187,
"step": 294
},
{
"epoch": 17.41176470588235,
"grad_norm": 0.47753918170928955,
"learning_rate": 9.992127220558976e-06,
"loss": 0.2761,
"step": 296
},
{
"epoch": 17.529411764705884,
"grad_norm": 0.49851253628730774,
"learning_rate": 9.991340638138022e-06,
"loss": 0.2777,
"step": 298
},
{
"epoch": 17.647058823529413,
"grad_norm": 0.5102773904800415,
"learning_rate": 9.990516643685222e-06,
"loss": 0.4131,
"step": 300
},
{
"epoch": 17.764705882352942,
"grad_norm": 0.47633931040763855,
"learning_rate": 9.98965524337673e-06,
"loss": 0.5281,
"step": 302
},
{
"epoch": 17.88235294117647,
"grad_norm": 0.33498552441596985,
"learning_rate": 9.988756443669081e-06,
"loss": 0.2696,
"step": 304
},
{
"epoch": 18.0,
"grad_norm": 0.5153418183326721,
"learning_rate": 9.987820251299121e-06,
"loss": 0.3513,
"step": 306
},
{
"epoch": 18.0,
"eval_loss": 0.5070950388908386,
"eval_runtime": 31.8477,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 306
},
{
"epoch": 18.11764705882353,
"grad_norm": 0.3366803824901581,
"learning_rate": 9.98684667328398e-06,
"loss": 0.1873,
"step": 308
},
{
"epoch": 18.235294117647058,
"grad_norm": 0.6154054403305054,
"learning_rate": 9.985835716921e-06,
"loss": 0.391,
"step": 310
},
{
"epoch": 18.352941176470587,
"grad_norm": 0.6151386499404907,
"learning_rate": 9.984787389787689e-06,
"loss": 0.5524,
"step": 312
},
{
"epoch": 18.470588235294116,
"grad_norm": 0.34717944264411926,
"learning_rate": 9.983701699741668e-06,
"loss": 0.214,
"step": 314
},
{
"epoch": 18.58823529411765,
"grad_norm": 0.5958353877067566,
"learning_rate": 9.982578654920601e-06,
"loss": 0.4503,
"step": 316
},
{
"epoch": 18.705882352941178,
"grad_norm": 0.45278429985046387,
"learning_rate": 9.981418263742148e-06,
"loss": 0.2803,
"step": 318
},
{
"epoch": 18.823529411764707,
"grad_norm": 1.5949257612228394,
"learning_rate": 9.980220534903889e-06,
"loss": 0.3574,
"step": 320
},
{
"epoch": 18.941176470588236,
"grad_norm": 0.6842171549797058,
"learning_rate": 9.978985477383264e-06,
"loss": 0.3638,
"step": 322
},
{
"epoch": 19.0,
"eval_loss": 0.5485844016075134,
"eval_runtime": 31.8529,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 323
},
{
"epoch": 19.058823529411764,
"grad_norm": 0.5384336113929749,
"learning_rate": 9.97771310043751e-06,
"loss": 0.4372,
"step": 324
},
{
"epoch": 19.176470588235293,
"grad_norm": 18.550317764282227,
"learning_rate": 9.97640341360358e-06,
"loss": 0.3407,
"step": 326
},
{
"epoch": 19.294117647058822,
"grad_norm": 2.0581817626953125,
"learning_rate": 9.975056426698094e-06,
"loss": 0.2623,
"step": 328
},
{
"epoch": 19.41176470588235,
"grad_norm": 0.6005405187606812,
"learning_rate": 9.973672149817232e-06,
"loss": 0.258,
"step": 330
},
{
"epoch": 19.529411764705884,
"grad_norm": 0.8219459056854248,
"learning_rate": 9.972250593336689e-06,
"loss": 0.3986,
"step": 332
},
{
"epoch": 19.647058823529413,
"grad_norm": 1.8439754247665405,
"learning_rate": 9.970791767911581e-06,
"loss": 0.2887,
"step": 334
},
{
"epoch": 19.764705882352942,
"grad_norm": 2.892220973968506,
"learning_rate": 9.96929568447637e-06,
"loss": 0.3433,
"step": 336
},
{
"epoch": 19.88235294117647,
"grad_norm": 0.6804599761962891,
"learning_rate": 9.967762354244778e-06,
"loss": 0.3686,
"step": 338
},
{
"epoch": 20.0,
"grad_norm": 0.8408982753753662,
"learning_rate": 9.966191788709716e-06,
"loss": 0.5246,
"step": 340
},
{
"epoch": 20.0,
"eval_loss": 0.48128727078437805,
"eval_runtime": 31.8515,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 340
},
{
"epoch": 20.11764705882353,
"grad_norm": 0.5616675019264221,
"learning_rate": 9.964583999643174e-06,
"loss": 0.2985,
"step": 342
},
{
"epoch": 20.235294117647058,
"grad_norm": 0.4580283463001251,
"learning_rate": 9.962938999096159e-06,
"loss": 0.2537,
"step": 344
},
{
"epoch": 20.352941176470587,
"grad_norm": 0.857369601726532,
"learning_rate": 9.961256799398584e-06,
"loss": 0.2804,
"step": 346
},
{
"epoch": 20.470588235294116,
"grad_norm": 1.1619380712509155,
"learning_rate": 9.95953741315919e-06,
"loss": 0.519,
"step": 348
},
{
"epoch": 20.58823529411765,
"grad_norm": 0.5293999314308167,
"learning_rate": 9.957780853265441e-06,
"loss": 0.2812,
"step": 350
},
{
"epoch": 20.705882352941178,
"grad_norm": 0.6322659850120544,
"learning_rate": 9.955987132883435e-06,
"loss": 0.2476,
"step": 352
},
{
"epoch": 20.823529411764707,
"grad_norm": 0.6491472125053406,
"learning_rate": 9.954156265457801e-06,
"loss": 0.2483,
"step": 354
},
{
"epoch": 20.941176470588236,
"grad_norm": 0.869428813457489,
"learning_rate": 9.952288264711601e-06,
"loss": 0.4751,
"step": 356
},
{
"epoch": 21.0,
"eval_loss": 0.536905825138092,
"eval_runtime": 31.8461,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 357
},
{
"epoch": 21.058823529411764,
"grad_norm": 0.7692728042602539,
"learning_rate": 9.950383144646221e-06,
"loss": 0.4331,
"step": 358
},
{
"epoch": 21.176470588235293,
"grad_norm": 0.6134036779403687,
"learning_rate": 9.948440919541277e-06,
"loss": 0.3302,
"step": 360
},
{
"epoch": 21.294117647058822,
"grad_norm": 0.8637880682945251,
"learning_rate": 9.946461603954499e-06,
"loss": 0.5403,
"step": 362
},
{
"epoch": 21.41176470588235,
"grad_norm": 0.7665501832962036,
"learning_rate": 9.944445212721619e-06,
"loss": 0.2682,
"step": 364
},
{
"epoch": 21.529411764705884,
"grad_norm": 0.6994332671165466,
"learning_rate": 9.942391760956277e-06,
"loss": 0.4364,
"step": 366
},
{
"epoch": 21.647058823529413,
"grad_norm": 0.8093287944793701,
"learning_rate": 9.940301264049885e-06,
"loss": 0.2887,
"step": 368
},
{
"epoch": 21.764705882352942,
"grad_norm": 0.7530141472816467,
"learning_rate": 9.938173737671531e-06,
"loss": 0.2479,
"step": 370
},
{
"epoch": 21.88235294117647,
"grad_norm": 0.35253098607063293,
"learning_rate": 9.936009197767847e-06,
"loss": 0.1859,
"step": 372
},
{
"epoch": 22.0,
"grad_norm": 0.8288949728012085,
"learning_rate": 9.933807660562898e-06,
"loss": 0.2074,
"step": 374
},
{
"epoch": 22.0,
"eval_loss": 0.5176519751548767,
"eval_runtime": 31.8467,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 374
},
{
"epoch": 22.11764705882353,
"grad_norm": 0.7651407122612,
"learning_rate": 9.931569142558057e-06,
"loss": 0.3298,
"step": 376
},
{
"epoch": 22.235294117647058,
"grad_norm": 0.7411403059959412,
"learning_rate": 9.929293660531889e-06,
"loss": 0.3866,
"step": 378
},
{
"epoch": 22.352941176470587,
"grad_norm": 0.8898152709007263,
"learning_rate": 9.926981231540007e-06,
"loss": 0.3071,
"step": 380
},
{
"epoch": 22.470588235294116,
"grad_norm": 1.235049843788147,
"learning_rate": 9.924631872914967e-06,
"loss": 0.3644,
"step": 382
},
{
"epoch": 22.58823529411765,
"grad_norm": 0.9050986766815186,
"learning_rate": 9.922245602266119e-06,
"loss": 0.2616,
"step": 384
},
{
"epoch": 22.705882352941178,
"grad_norm": 0.986419141292572,
"learning_rate": 9.919822437479488e-06,
"loss": 0.2003,
"step": 386
},
{
"epoch": 22.823529411764707,
"grad_norm": 1.1197755336761475,
"learning_rate": 9.91736239671763e-06,
"loss": 0.3103,
"step": 388
},
{
"epoch": 22.941176470588236,
"grad_norm": 1.1346862316131592,
"learning_rate": 9.91486549841951e-06,
"loss": 0.2513,
"step": 390
},
{
"epoch": 23.0,
"eval_loss": 0.5108680129051208,
"eval_runtime": 31.8499,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 391
},
{
"epoch": 23.058823529411764,
"grad_norm": 0.3815372884273529,
"learning_rate": 9.912331761300341e-06,
"loss": 0.1238,
"step": 392
},
{
"epoch": 23.176470588235293,
"grad_norm": 0.004847167991101742,
"learning_rate": 9.909761204351469e-06,
"loss": 0.0666,
"step": 394
},
{
"epoch": 23.294117647058822,
"grad_norm": 0.9460958242416382,
"learning_rate": 9.90715384684021e-06,
"loss": 0.4942,
"step": 396
},
{
"epoch": 23.41176470588235,
"grad_norm": 1.0769481658935547,
"learning_rate": 9.904509708309723e-06,
"loss": 0.2983,
"step": 398
},
{
"epoch": 23.529411764705884,
"grad_norm": 0.9719020128250122,
"learning_rate": 9.901828808578846e-06,
"loss": 0.2081,
"step": 400
},
{
"epoch": 23.647058823529413,
"grad_norm": 1.7906395196914673,
"learning_rate": 9.899111167741966e-06,
"loss": 0.3697,
"step": 402
},
{
"epoch": 23.764705882352942,
"grad_norm": 1.5768994092941284,
"learning_rate": 9.896356806168851e-06,
"loss": 0.3432,
"step": 404
},
{
"epoch": 23.88235294117647,
"grad_norm": 0.9857346415519714,
"learning_rate": 9.89356574450451e-06,
"loss": 0.203,
"step": 406
},
{
"epoch": 24.0,
"grad_norm": 1.2476699352264404,
"learning_rate": 9.890738003669029e-06,
"loss": 0.3019,
"step": 408
},
{
"epoch": 24.0,
"eval_loss": 0.5099675059318542,
"eval_runtime": 31.8826,
"eval_samples_per_second": 1.129,
"eval_steps_per_second": 1.129,
"step": 408
},
{
"epoch": 24.11764705882353,
"grad_norm": 1.1214749813079834,
"learning_rate": 9.887873604857424e-06,
"loss": 0.3208,
"step": 410
},
{
"epoch": 24.235294117647058,
"grad_norm": 1.2565882205963135,
"learning_rate": 9.884972569539471e-06,
"loss": 0.2897,
"step": 412
},
{
"epoch": 24.352941176470587,
"grad_norm": 0.9853368401527405,
"learning_rate": 9.882034919459556e-06,
"loss": 0.2316,
"step": 414
},
{
"epoch": 24.470588235294116,
"grad_norm": 1.3978081941604614,
"learning_rate": 9.879060676636502e-06,
"loss": 0.3282,
"step": 416
},
{
"epoch": 24.58823529411765,
"grad_norm": 1.6501458883285522,
"learning_rate": 9.876049863363415e-06,
"loss": 0.2489,
"step": 418
},
{
"epoch": 24.705882352941178,
"grad_norm": 1.0865875482559204,
"learning_rate": 9.873002502207502e-06,
"loss": 0.1472,
"step": 420
},
{
"epoch": 24.823529411764707,
"grad_norm": 1.1791576147079468,
"learning_rate": 9.86991861600992e-06,
"loss": 0.1987,
"step": 422
},
{
"epoch": 24.941176470588236,
"grad_norm": 1.4849443435668945,
"learning_rate": 9.866798227885588e-06,
"loss": 0.2039,
"step": 424
},
{
"epoch": 25.0,
"eval_loss": 0.542856752872467,
"eval_runtime": 31.8504,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 425
},
{
"epoch": 25.058823529411764,
"grad_norm": 1.064898133277893,
"learning_rate": 9.863641361223025e-06,
"loss": 0.2573,
"step": 426
},
{
"epoch": 25.176470588235293,
"grad_norm": 1.3044614791870117,
"learning_rate": 9.860448039684169e-06,
"loss": 0.2652,
"step": 428
},
{
"epoch": 25.294117647058822,
"grad_norm": 1.1536026000976562,
"learning_rate": 9.857218287204204e-06,
"loss": 0.1707,
"step": 430
},
{
"epoch": 25.41176470588235,
"grad_norm": 1.575131893157959,
"learning_rate": 9.853952127991374e-06,
"loss": 0.2226,
"step": 432
},
{
"epoch": 25.529411764705884,
"grad_norm": 1.8576124906539917,
"learning_rate": 9.850649586526808e-06,
"loss": 0.1639,
"step": 434
},
{
"epoch": 25.647058823529413,
"grad_norm": 1.8986772298812866,
"learning_rate": 9.847310687564335e-06,
"loss": 0.367,
"step": 436
},
{
"epoch": 25.764705882352942,
"grad_norm": 1.0811642408370972,
"learning_rate": 9.843935456130295e-06,
"loss": 0.2656,
"step": 438
},
{
"epoch": 25.88235294117647,
"grad_norm": 1.5638922452926636,
"learning_rate": 9.840523917523354e-06,
"loss": 0.1376,
"step": 440
},
{
"epoch": 26.0,
"grad_norm": 1.4766254425048828,
"learning_rate": 9.83707609731432e-06,
"loss": 0.228,
"step": 442
},
{
"epoch": 26.0,
"eval_loss": 0.5160675644874573,
"eval_runtime": 31.8614,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 442
},
{
"epoch": 26.11764705882353,
"grad_norm": 0.983343780040741,
"learning_rate": 9.833592021345938e-06,
"loss": 0.1742,
"step": 444
},
{
"epoch": 26.235294117647058,
"grad_norm": 1.3315422534942627,
"learning_rate": 9.830071715732708e-06,
"loss": 0.1853,
"step": 446
},
{
"epoch": 26.352941176470587,
"grad_norm": 0.6733668446540833,
"learning_rate": 9.826515206860683e-06,
"loss": 0.1493,
"step": 448
},
{
"epoch": 26.470588235294116,
"grad_norm": 1.4821362495422363,
"learning_rate": 9.822922521387277e-06,
"loss": 0.2319,
"step": 450
},
{
"epoch": 26.58823529411765,
"grad_norm": 3.0113203525543213,
"learning_rate": 9.819293686241057e-06,
"loss": 0.253,
"step": 452
},
{
"epoch": 26.705882352941178,
"grad_norm": 2.0522031784057617,
"learning_rate": 9.81562872862155e-06,
"loss": 0.1756,
"step": 454
},
{
"epoch": 26.823529411764707,
"grad_norm": 2.2314300537109375,
"learning_rate": 9.811927675999035e-06,
"loss": 0.1977,
"step": 456
},
{
"epoch": 26.941176470588236,
"grad_norm": 1.571890115737915,
"learning_rate": 9.808190556114333e-06,
"loss": 0.2127,
"step": 458
},
{
"epoch": 27.0,
"eval_loss": 0.5205699801445007,
"eval_runtime": 31.8543,
"eval_samples_per_second": 1.13,
"eval_steps_per_second": 1.13,
"step": 459
},
{
"epoch": 27.0,
"step": 459,
"total_flos": 2.8798901486092288e+17,
"train_loss": 0.8010637635823689,
"train_runtime": 10567.3661,
"train_samples_per_second": 1.93,
"train_steps_per_second": 0.241
}
],
"logging_steps": 2,
"max_steps": 2550,
"num_input_tokens_seen": 0,
"num_train_epochs": 150,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.8798901486092288e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}