joseagmz's picture
Upload folder using huggingface_hub
6bba358 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 155,
"global_step": 620,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 5.8125,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.7582,
"step": 1
},
{
"epoch": 0.0,
"eval_loss": 2.128222703933716,
"eval_runtime": 1.7527,
"eval_samples_per_second": 19.398,
"eval_steps_per_second": 19.398,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 8.0625,
"learning_rate": 4.000000000000001e-06,
"loss": 2.0767,
"step": 2
},
{
"epoch": 0.0,
"grad_norm": 5.875,
"learning_rate": 6e-06,
"loss": 2.074,
"step": 3
},
{
"epoch": 0.01,
"grad_norm": 6.21875,
"learning_rate": 8.000000000000001e-06,
"loss": 1.9594,
"step": 4
},
{
"epoch": 0.01,
"grad_norm": 7.75,
"learning_rate": 1e-05,
"loss": 2.1593,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 7.15625,
"learning_rate": 1.2e-05,
"loss": 1.5345,
"step": 6
},
{
"epoch": 0.01,
"grad_norm": 6.5625,
"learning_rate": 1.4000000000000001e-05,
"loss": 1.7973,
"step": 7
},
{
"epoch": 0.01,
"grad_norm": 6.875,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.0634,
"step": 8
},
{
"epoch": 0.01,
"grad_norm": 5.4375,
"learning_rate": 1.8e-05,
"loss": 1.5362,
"step": 9
},
{
"epoch": 0.02,
"grad_norm": 7.125,
"learning_rate": 2e-05,
"loss": 2.2476,
"step": 10
},
{
"epoch": 0.02,
"grad_norm": 5.90625,
"learning_rate": 2.2000000000000003e-05,
"loss": 1.8936,
"step": 11
},
{
"epoch": 0.02,
"grad_norm": 6.0625,
"learning_rate": 2.4e-05,
"loss": 1.5785,
"step": 12
},
{
"epoch": 0.02,
"grad_norm": 5.90625,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.5667,
"step": 13
},
{
"epoch": 0.02,
"grad_norm": 6.28125,
"learning_rate": 2.8000000000000003e-05,
"loss": 1.9219,
"step": 14
},
{
"epoch": 0.02,
"grad_norm": 7.78125,
"learning_rate": 3e-05,
"loss": 2.0164,
"step": 15
},
{
"epoch": 0.03,
"grad_norm": 5.53125,
"learning_rate": 3.2000000000000005e-05,
"loss": 2.0587,
"step": 16
},
{
"epoch": 0.03,
"grad_norm": 5.84375,
"learning_rate": 3.4000000000000007e-05,
"loss": 1.9962,
"step": 17
},
{
"epoch": 0.03,
"grad_norm": 28.25,
"learning_rate": 3.6e-05,
"loss": 2.2685,
"step": 18
},
{
"epoch": 0.03,
"grad_norm": 7.5,
"learning_rate": 3.8e-05,
"loss": 2.3435,
"step": 19
},
{
"epoch": 0.03,
"grad_norm": 8.1875,
"learning_rate": 4e-05,
"loss": 2.3058,
"step": 20
},
{
"epoch": 0.03,
"grad_norm": 5.6875,
"learning_rate": 4.2e-05,
"loss": 2.1123,
"step": 21
},
{
"epoch": 0.04,
"grad_norm": 9.1875,
"learning_rate": 4.4000000000000006e-05,
"loss": 2.8498,
"step": 22
},
{
"epoch": 0.04,
"grad_norm": 6.0,
"learning_rate": 4.600000000000001e-05,
"loss": 1.9676,
"step": 23
},
{
"epoch": 0.04,
"grad_norm": 7.625,
"learning_rate": 4.8e-05,
"loss": 2.3663,
"step": 24
},
{
"epoch": 0.04,
"grad_norm": 5.34375,
"learning_rate": 5e-05,
"loss": 1.8029,
"step": 25
},
{
"epoch": 0.04,
"grad_norm": 17.125,
"learning_rate": 5.2000000000000004e-05,
"loss": 2.0883,
"step": 26
},
{
"epoch": 0.04,
"grad_norm": 5.4375,
"learning_rate": 5.4000000000000005e-05,
"loss": 1.8337,
"step": 27
},
{
"epoch": 0.05,
"grad_norm": 7.40625,
"learning_rate": 5.6000000000000006e-05,
"loss": 2.0822,
"step": 28
},
{
"epoch": 0.05,
"grad_norm": 8.1875,
"learning_rate": 5.8e-05,
"loss": 2.3772,
"step": 29
},
{
"epoch": 0.05,
"grad_norm": 5.875,
"learning_rate": 6e-05,
"loss": 1.8399,
"step": 30
},
{
"epoch": 0.05,
"grad_norm": 6.84375,
"learning_rate": 6.2e-05,
"loss": 1.7067,
"step": 31
},
{
"epoch": 0.05,
"grad_norm": 5.6875,
"learning_rate": 6.400000000000001e-05,
"loss": 1.8348,
"step": 32
},
{
"epoch": 0.05,
"grad_norm": 7.15625,
"learning_rate": 6.6e-05,
"loss": 1.9547,
"step": 33
},
{
"epoch": 0.05,
"grad_norm": 7.40625,
"learning_rate": 6.800000000000001e-05,
"loss": 2.3197,
"step": 34
},
{
"epoch": 0.06,
"grad_norm": 7.5,
"learning_rate": 7e-05,
"loss": 2.2976,
"step": 35
},
{
"epoch": 0.06,
"grad_norm": 6.6875,
"learning_rate": 7.2e-05,
"loss": 2.2088,
"step": 36
},
{
"epoch": 0.06,
"grad_norm": 5.4375,
"learning_rate": 7.4e-05,
"loss": 1.8717,
"step": 37
},
{
"epoch": 0.06,
"grad_norm": 9.0625,
"learning_rate": 7.6e-05,
"loss": 2.8409,
"step": 38
},
{
"epoch": 0.06,
"grad_norm": 7.875,
"learning_rate": 7.800000000000001e-05,
"loss": 2.1854,
"step": 39
},
{
"epoch": 0.06,
"grad_norm": 5.375,
"learning_rate": 8e-05,
"loss": 1.741,
"step": 40
},
{
"epoch": 0.07,
"grad_norm": 10.4375,
"learning_rate": 8.2e-05,
"loss": 2.3502,
"step": 41
},
{
"epoch": 0.07,
"grad_norm": 5.9375,
"learning_rate": 8.4e-05,
"loss": 1.6967,
"step": 42
},
{
"epoch": 0.07,
"grad_norm": 5.8125,
"learning_rate": 8.6e-05,
"loss": 1.9294,
"step": 43
},
{
"epoch": 0.07,
"grad_norm": 6.3125,
"learning_rate": 8.800000000000001e-05,
"loss": 1.834,
"step": 44
},
{
"epoch": 0.07,
"grad_norm": 11.125,
"learning_rate": 9e-05,
"loss": 1.4147,
"step": 45
},
{
"epoch": 0.07,
"grad_norm": 5.96875,
"learning_rate": 9.200000000000001e-05,
"loss": 1.6542,
"step": 46
},
{
"epoch": 0.08,
"grad_norm": 9.0,
"learning_rate": 9.4e-05,
"loss": 2.4612,
"step": 47
},
{
"epoch": 0.08,
"grad_norm": 5.8125,
"learning_rate": 9.6e-05,
"loss": 1.8718,
"step": 48
},
{
"epoch": 0.08,
"grad_norm": 7.59375,
"learning_rate": 9.8e-05,
"loss": 2.3953,
"step": 49
},
{
"epoch": 0.08,
"grad_norm": 8.0625,
"learning_rate": 0.0001,
"loss": 2.0245,
"step": 50
},
{
"epoch": 0.08,
"grad_norm": 8.875,
"learning_rate": 0.00010200000000000001,
"loss": 2.251,
"step": 51
},
{
"epoch": 0.08,
"grad_norm": 7.4375,
"learning_rate": 0.00010400000000000001,
"loss": 2.2304,
"step": 52
},
{
"epoch": 0.09,
"grad_norm": 8.625,
"learning_rate": 0.00010600000000000002,
"loss": 2.0407,
"step": 53
},
{
"epoch": 0.09,
"grad_norm": 8.25,
"learning_rate": 0.00010800000000000001,
"loss": 2.4401,
"step": 54
},
{
"epoch": 0.09,
"grad_norm": 7.5625,
"learning_rate": 0.00011000000000000002,
"loss": 1.9831,
"step": 55
},
{
"epoch": 0.09,
"grad_norm": 8.5625,
"learning_rate": 0.00011200000000000001,
"loss": 1.9876,
"step": 56
},
{
"epoch": 0.09,
"grad_norm": 6.1875,
"learning_rate": 0.00011399999999999999,
"loss": 2.0943,
"step": 57
},
{
"epoch": 0.09,
"grad_norm": 5.34375,
"learning_rate": 0.000116,
"loss": 2.0148,
"step": 58
},
{
"epoch": 0.1,
"grad_norm": 7.65625,
"learning_rate": 0.000118,
"loss": 1.8346,
"step": 59
},
{
"epoch": 0.1,
"grad_norm": 5.4375,
"learning_rate": 0.00012,
"loss": 2.0344,
"step": 60
},
{
"epoch": 0.1,
"grad_norm": 6.96875,
"learning_rate": 0.000122,
"loss": 1.8906,
"step": 61
},
{
"epoch": 0.1,
"grad_norm": 14.4375,
"learning_rate": 0.000124,
"loss": 2.4039,
"step": 62
},
{
"epoch": 0.1,
"grad_norm": 10.3125,
"learning_rate": 0.000126,
"loss": 2.052,
"step": 63
},
{
"epoch": 0.1,
"grad_norm": 13.3125,
"learning_rate": 0.00012800000000000002,
"loss": 2.7162,
"step": 64
},
{
"epoch": 0.1,
"grad_norm": 6.75,
"learning_rate": 0.00013000000000000002,
"loss": 1.6805,
"step": 65
},
{
"epoch": 0.11,
"grad_norm": 13.375,
"learning_rate": 0.000132,
"loss": 2.9215,
"step": 66
},
{
"epoch": 0.11,
"grad_norm": 6.90625,
"learning_rate": 0.000134,
"loss": 2.0593,
"step": 67
},
{
"epoch": 0.11,
"grad_norm": 8.1875,
"learning_rate": 0.00013600000000000003,
"loss": 2.1428,
"step": 68
},
{
"epoch": 0.11,
"grad_norm": 7.0,
"learning_rate": 0.000138,
"loss": 2.4703,
"step": 69
},
{
"epoch": 0.11,
"grad_norm": 10.75,
"learning_rate": 0.00014,
"loss": 2.5697,
"step": 70
},
{
"epoch": 0.11,
"grad_norm": 9.625,
"learning_rate": 0.000142,
"loss": 2.8569,
"step": 71
},
{
"epoch": 0.12,
"grad_norm": 8.5625,
"learning_rate": 0.000144,
"loss": 2.3486,
"step": 72
},
{
"epoch": 0.12,
"grad_norm": 7.90625,
"learning_rate": 0.000146,
"loss": 2.2554,
"step": 73
},
{
"epoch": 0.12,
"grad_norm": 10.0,
"learning_rate": 0.000148,
"loss": 2.2662,
"step": 74
},
{
"epoch": 0.12,
"grad_norm": 12.8125,
"learning_rate": 0.00015000000000000001,
"loss": 2.4359,
"step": 75
},
{
"epoch": 0.12,
"grad_norm": 9.4375,
"learning_rate": 0.000152,
"loss": 3.3211,
"step": 76
},
{
"epoch": 0.12,
"grad_norm": 13.375,
"learning_rate": 0.000154,
"loss": 2.9317,
"step": 77
},
{
"epoch": 0.13,
"grad_norm": 8.9375,
"learning_rate": 0.00015600000000000002,
"loss": 2.7471,
"step": 78
},
{
"epoch": 0.13,
"grad_norm": 6.5,
"learning_rate": 0.00015800000000000002,
"loss": 2.2128,
"step": 79
},
{
"epoch": 0.13,
"grad_norm": 15.25,
"learning_rate": 0.00016,
"loss": 2.613,
"step": 80
},
{
"epoch": 0.13,
"grad_norm": 14.0625,
"learning_rate": 0.000162,
"loss": 3.2154,
"step": 81
},
{
"epoch": 0.13,
"grad_norm": 7.5,
"learning_rate": 0.000164,
"loss": 2.3345,
"step": 82
},
{
"epoch": 0.13,
"grad_norm": 13.25,
"learning_rate": 0.000166,
"loss": 2.1628,
"step": 83
},
{
"epoch": 0.14,
"grad_norm": 11.0,
"learning_rate": 0.000168,
"loss": 2.4309,
"step": 84
},
{
"epoch": 0.14,
"grad_norm": 9.25,
"learning_rate": 0.00017,
"loss": 2.8301,
"step": 85
},
{
"epoch": 0.14,
"grad_norm": 6.09375,
"learning_rate": 0.000172,
"loss": 2.3429,
"step": 86
},
{
"epoch": 0.14,
"grad_norm": 20.0,
"learning_rate": 0.000174,
"loss": 3.0507,
"step": 87
},
{
"epoch": 0.14,
"grad_norm": 9.75,
"learning_rate": 0.00017600000000000002,
"loss": 2.5522,
"step": 88
},
{
"epoch": 0.14,
"grad_norm": 6.84375,
"learning_rate": 0.00017800000000000002,
"loss": 2.22,
"step": 89
},
{
"epoch": 0.15,
"grad_norm": 11.5,
"learning_rate": 0.00018,
"loss": 2.4792,
"step": 90
},
{
"epoch": 0.15,
"grad_norm": 19.375,
"learning_rate": 0.000182,
"loss": 2.8168,
"step": 91
},
{
"epoch": 0.15,
"grad_norm": 10.0,
"learning_rate": 0.00018400000000000003,
"loss": 2.7499,
"step": 92
},
{
"epoch": 0.15,
"grad_norm": 11.125,
"learning_rate": 0.00018600000000000002,
"loss": 2.12,
"step": 93
},
{
"epoch": 0.15,
"grad_norm": 11.0625,
"learning_rate": 0.000188,
"loss": 2.5596,
"step": 94
},
{
"epoch": 0.15,
"grad_norm": 9.0625,
"learning_rate": 0.00019,
"loss": 2.2917,
"step": 95
},
{
"epoch": 0.15,
"grad_norm": 8.375,
"learning_rate": 0.000192,
"loss": 2.5138,
"step": 96
},
{
"epoch": 0.16,
"grad_norm": 14.5625,
"learning_rate": 0.000194,
"loss": 2.6197,
"step": 97
},
{
"epoch": 0.16,
"grad_norm": 13.6875,
"learning_rate": 0.000196,
"loss": 2.4397,
"step": 98
},
{
"epoch": 0.16,
"grad_norm": 9.375,
"learning_rate": 0.00019800000000000002,
"loss": 2.3659,
"step": 99
},
{
"epoch": 0.16,
"grad_norm": 46.5,
"learning_rate": 0.0002,
"loss": 2.0891,
"step": 100
},
{
"epoch": 0.16,
"grad_norm": 7.65625,
"learning_rate": 0.00019999817500473724,
"loss": 2.3711,
"step": 101
},
{
"epoch": 0.16,
"grad_norm": 11.8125,
"learning_rate": 0.00019999270008556108,
"loss": 2.5669,
"step": 102
},
{
"epoch": 0.17,
"grad_norm": 9.375,
"learning_rate": 0.00019998357544230558,
"loss": 2.7071,
"step": 103
},
{
"epoch": 0.17,
"grad_norm": 9.0,
"learning_rate": 0.00019997080140801932,
"loss": 2.0942,
"step": 104
},
{
"epoch": 0.17,
"grad_norm": 14.0625,
"learning_rate": 0.00019995437844895334,
"loss": 2.6619,
"step": 105
},
{
"epoch": 0.17,
"grad_norm": 9.375,
"learning_rate": 0.00019993430716454413,
"loss": 2.0561,
"step": 106
},
{
"epoch": 0.17,
"grad_norm": 9.0625,
"learning_rate": 0.00019991058828739165,
"loss": 2.5201,
"step": 107
},
{
"epoch": 0.17,
"grad_norm": 17.375,
"learning_rate": 0.00019988322268323268,
"loss": 2.6833,
"step": 108
},
{
"epoch": 0.18,
"grad_norm": 8.1875,
"learning_rate": 0.00019985221135090914,
"loss": 2.3537,
"step": 109
},
{
"epoch": 0.18,
"grad_norm": 9.1875,
"learning_rate": 0.00019981755542233177,
"loss": 2.2725,
"step": 110
},
{
"epoch": 0.18,
"grad_norm": 44.25,
"learning_rate": 0.00019977925616243862,
"loss": 2.5759,
"step": 111
},
{
"epoch": 0.18,
"grad_norm": 18.125,
"learning_rate": 0.00019973731496914914,
"loss": 2.7988,
"step": 112
},
{
"epoch": 0.18,
"grad_norm": 10.0,
"learning_rate": 0.0001996917333733128,
"loss": 2.4719,
"step": 113
},
{
"epoch": 0.18,
"grad_norm": 12.3125,
"learning_rate": 0.00019964251303865362,
"loss": 3.1389,
"step": 114
},
{
"epoch": 0.19,
"grad_norm": 16.25,
"learning_rate": 0.00019958965576170908,
"loss": 2.5832,
"step": 115
},
{
"epoch": 0.19,
"grad_norm": 11.125,
"learning_rate": 0.00019953316347176488,
"loss": 2.8191,
"step": 116
},
{
"epoch": 0.19,
"grad_norm": 8.1875,
"learning_rate": 0.00019947303823078416,
"loss": 2.6941,
"step": 117
},
{
"epoch": 0.19,
"grad_norm": 11.875,
"learning_rate": 0.00019940928223333252,
"loss": 2.3098,
"step": 118
},
{
"epoch": 0.19,
"grad_norm": 11.375,
"learning_rate": 0.0001993418978064979,
"loss": 2.6137,
"step": 119
},
{
"epoch": 0.19,
"grad_norm": 16.375,
"learning_rate": 0.0001992708874098054,
"loss": 4.0113,
"step": 120
},
{
"epoch": 0.2,
"grad_norm": 7.25,
"learning_rate": 0.00019919625363512786,
"loss": 2.4582,
"step": 121
},
{
"epoch": 0.2,
"grad_norm": 19.5,
"learning_rate": 0.00019911799920659093,
"loss": 2.1897,
"step": 122
},
{
"epoch": 0.2,
"grad_norm": 20.25,
"learning_rate": 0.00019903612698047383,
"loss": 2.9714,
"step": 123
},
{
"epoch": 0.2,
"grad_norm": 18.375,
"learning_rate": 0.0001989506399451051,
"loss": 3.2223,
"step": 124
},
{
"epoch": 0.2,
"grad_norm": 189.0,
"learning_rate": 0.00019886154122075343,
"loss": 8.0224,
"step": 125
},
{
"epoch": 0.2,
"grad_norm": 19.75,
"learning_rate": 0.00019876883405951377,
"loss": 2.276,
"step": 126
},
{
"epoch": 0.2,
"grad_norm": 8.0625,
"learning_rate": 0.00019867252184518878,
"loss": 2.263,
"step": 127
},
{
"epoch": 0.21,
"grad_norm": 129.0,
"learning_rate": 0.0001985726080931651,
"loss": 9.2328,
"step": 128
},
{
"epoch": 0.21,
"grad_norm": 134.0,
"learning_rate": 0.00019846909645028523,
"loss": 7.3449,
"step": 129
},
{
"epoch": 0.21,
"grad_norm": 25.625,
"learning_rate": 0.00019836199069471437,
"loss": 2.7,
"step": 130
},
{
"epoch": 0.21,
"grad_norm": 12.625,
"learning_rate": 0.0001982512947358024,
"loss": 2.18,
"step": 131
},
{
"epoch": 0.21,
"grad_norm": 27.0,
"learning_rate": 0.00019813701261394136,
"loss": 2.9489,
"step": 132
},
{
"epoch": 0.21,
"grad_norm": 13.4375,
"learning_rate": 0.00019801914850041784,
"loss": 2.2871,
"step": 133
},
{
"epoch": 0.22,
"grad_norm": 16.625,
"learning_rate": 0.00019789770669726087,
"loss": 2.6805,
"step": 134
},
{
"epoch": 0.22,
"grad_norm": 136.0,
"learning_rate": 0.00019777269163708468,
"loss": 2.0789,
"step": 135
},
{
"epoch": 0.22,
"grad_norm": 23.25,
"learning_rate": 0.00019764410788292722,
"loss": 2.7603,
"step": 136
},
{
"epoch": 0.22,
"grad_norm": 53.75,
"learning_rate": 0.00019751196012808325,
"loss": 2.6992,
"step": 137
},
{
"epoch": 0.22,
"grad_norm": 84.5,
"learning_rate": 0.00019737625319593335,
"loss": 8.1541,
"step": 138
},
{
"epoch": 0.22,
"grad_norm": 320.0,
"learning_rate": 0.00019723699203976766,
"loss": 8.7045,
"step": 139
},
{
"epoch": 0.23,
"grad_norm": 1192.0,
"learning_rate": 0.0001970941817426052,
"loss": 3.8857,
"step": 140
},
{
"epoch": 0.23,
"grad_norm": 202.0,
"learning_rate": 0.00019694782751700828,
"loss": 3.4135,
"step": 141
},
{
"epoch": 0.23,
"grad_norm": 14.1875,
"learning_rate": 0.00019679793470489228,
"loss": 2.4865,
"step": 142
},
{
"epoch": 0.23,
"grad_norm": 18.625,
"learning_rate": 0.00019664450877733062,
"loss": 2.2073,
"step": 143
},
{
"epoch": 0.23,
"grad_norm": 22.625,
"learning_rate": 0.00019648755533435518,
"loss": 2.4245,
"step": 144
},
{
"epoch": 0.23,
"grad_norm": 92.0,
"learning_rate": 0.00019632708010475165,
"loss": 2.2213,
"step": 145
},
{
"epoch": 0.24,
"grad_norm": 25.875,
"learning_rate": 0.00019616308894585078,
"loss": 2.3082,
"step": 146
},
{
"epoch": 0.24,
"grad_norm": 4512.0,
"learning_rate": 0.0001959955878433143,
"loss": 7.3644,
"step": 147
},
{
"epoch": 0.24,
"grad_norm": 12.0625,
"learning_rate": 0.00019582458291091663,
"loss": 2.4085,
"step": 148
},
{
"epoch": 0.24,
"grad_norm": 50.25,
"learning_rate": 0.00019565008039032158,
"loss": 9.1058,
"step": 149
},
{
"epoch": 0.24,
"grad_norm": 12.5625,
"learning_rate": 0.00019547208665085457,
"loss": 2.3966,
"step": 150
},
{
"epoch": 0.24,
"grad_norm": 15.4375,
"learning_rate": 0.0001952906081892703,
"loss": 2.4106,
"step": 151
},
{
"epoch": 0.25,
"grad_norm": 1448.0,
"learning_rate": 0.00019510565162951537,
"loss": 3.339,
"step": 152
},
{
"epoch": 0.25,
"grad_norm": 161.0,
"learning_rate": 0.0001949172237224867,
"loss": 3.5134,
"step": 153
},
{
"epoch": 0.25,
"grad_norm": 486.0,
"learning_rate": 0.00019472533134578507,
"loss": 7.866,
"step": 154
},
{
"epoch": 0.25,
"grad_norm": 17.75,
"learning_rate": 0.00019452998150346401,
"loss": 2.6905,
"step": 155
},
{
"epoch": 0.25,
"eval_loss": 4.079593658447266,
"eval_runtime": 1.7475,
"eval_samples_per_second": 19.457,
"eval_steps_per_second": 19.457,
"step": 155
},
{
"epoch": 0.25,
"grad_norm": 296.0,
"learning_rate": 0.0001943311813257743,
"loss": 2.2737,
"step": 156
},
{
"epoch": 0.25,
"grad_norm": 93.5,
"learning_rate": 0.00019412893806890357,
"loss": 2.3946,
"step": 157
},
{
"epoch": 0.25,
"grad_norm": 36.0,
"learning_rate": 0.00019392325911471155,
"loss": 2.5526,
"step": 158
},
{
"epoch": 0.26,
"grad_norm": 114.0,
"learning_rate": 0.00019371415197046052,
"loss": 2.3686,
"step": 159
},
{
"epoch": 0.26,
"grad_norm": 1016.0,
"learning_rate": 0.0001935016242685415,
"loss": 4.7788,
"step": 160
},
{
"epoch": 0.26,
"grad_norm": 86.5,
"learning_rate": 0.00019328568376619543,
"loss": 2.9732,
"step": 161
},
{
"epoch": 0.26,
"grad_norm": 205.0,
"learning_rate": 0.00019306633834523024,
"loss": 2.886,
"step": 162
},
{
"epoch": 0.26,
"grad_norm": 41.0,
"learning_rate": 0.00019284359601173294,
"loss": 2.3179,
"step": 163
},
{
"epoch": 0.26,
"grad_norm": 46.0,
"learning_rate": 0.00019261746489577765,
"loss": 2.6493,
"step": 164
},
{
"epoch": 0.27,
"grad_norm": 16.625,
"learning_rate": 0.0001923879532511287,
"loss": 3.2452,
"step": 165
},
{
"epoch": 0.27,
"grad_norm": 17.375,
"learning_rate": 0.0001921550694549393,
"loss": 2.8346,
"step": 166
},
{
"epoch": 0.27,
"grad_norm": 16.5,
"learning_rate": 0.000191918822007446,
"loss": 2.4778,
"step": 167
},
{
"epoch": 0.27,
"grad_norm": 262.0,
"learning_rate": 0.00019167921953165825,
"loss": 3.5156,
"step": 168
},
{
"epoch": 0.27,
"grad_norm": 59.5,
"learning_rate": 0.0001914362707730437,
"loss": 2.7138,
"step": 169
},
{
"epoch": 0.27,
"grad_norm": 25.625,
"learning_rate": 0.00019118998459920902,
"loss": 2.9188,
"step": 170
},
{
"epoch": 0.28,
"grad_norm": 432.0,
"learning_rate": 0.00019094036999957624,
"loss": 4.9694,
"step": 171
},
{
"epoch": 0.28,
"grad_norm": 43.0,
"learning_rate": 0.00019068743608505455,
"loss": 2.2251,
"step": 172
},
{
"epoch": 0.28,
"grad_norm": 36.0,
"learning_rate": 0.00019043119208770793,
"loss": 2.3021,
"step": 173
},
{
"epoch": 0.28,
"grad_norm": 11.5,
"learning_rate": 0.00019017164736041795,
"loss": 2.6769,
"step": 174
},
{
"epoch": 0.28,
"grad_norm": 21.875,
"learning_rate": 0.00018990881137654258,
"loss": 2.9547,
"step": 175
},
{
"epoch": 0.28,
"grad_norm": 12.5,
"learning_rate": 0.00018964269372957038,
"loss": 2.5707,
"step": 176
},
{
"epoch": 0.29,
"grad_norm": 235.0,
"learning_rate": 0.0001893733041327702,
"loss": 2.3129,
"step": 177
},
{
"epoch": 0.29,
"grad_norm": 10.875,
"learning_rate": 0.0001891006524188368,
"loss": 2.3162,
"step": 178
},
{
"epoch": 0.29,
"grad_norm": 32.75,
"learning_rate": 0.0001888247485395319,
"loss": 2.3063,
"step": 179
},
{
"epoch": 0.29,
"grad_norm": 17.75,
"learning_rate": 0.000188545602565321,
"loss": 3.1659,
"step": 180
},
{
"epoch": 0.29,
"grad_norm": 14.4375,
"learning_rate": 0.00018826322468500566,
"loss": 2.4882,
"step": 181
},
{
"epoch": 0.29,
"grad_norm": 35.25,
"learning_rate": 0.00018797762520535177,
"loss": 2.8105,
"step": 182
},
{
"epoch": 0.3,
"grad_norm": 9.0,
"learning_rate": 0.00018768881455071332,
"loss": 2.2759,
"step": 183
},
{
"epoch": 0.3,
"grad_norm": 70.0,
"learning_rate": 0.0001873968032626518,
"loss": 2.8899,
"step": 184
},
{
"epoch": 0.3,
"grad_norm": 7.28125,
"learning_rate": 0.00018710160199955156,
"loss": 2.6619,
"step": 185
},
{
"epoch": 0.3,
"grad_norm": 9.125,
"learning_rate": 0.00018680322153623075,
"loss": 2.6123,
"step": 186
},
{
"epoch": 0.3,
"grad_norm": 17.375,
"learning_rate": 0.000186501672763548,
"loss": 2.5625,
"step": 187
},
{
"epoch": 0.3,
"grad_norm": 11.3125,
"learning_rate": 0.00018619696668800492,
"loss": 2.6897,
"step": 188
},
{
"epoch": 0.3,
"grad_norm": 11.9375,
"learning_rate": 0.00018588911443134448,
"loss": 3.0509,
"step": 189
},
{
"epoch": 0.31,
"grad_norm": 27.75,
"learning_rate": 0.00018557812723014476,
"loss": 2.5462,
"step": 190
},
{
"epoch": 0.31,
"grad_norm": 9.5,
"learning_rate": 0.00018526401643540922,
"loss": 2.0449,
"step": 191
},
{
"epoch": 0.31,
"grad_norm": 15.9375,
"learning_rate": 0.0001849467935121521,
"loss": 3.0498,
"step": 192
},
{
"epoch": 0.31,
"grad_norm": 11.8125,
"learning_rate": 0.00018462647003898006,
"loss": 3.1474,
"step": 193
},
{
"epoch": 0.31,
"grad_norm": 7.0625,
"learning_rate": 0.00018430305770766948,
"loss": 2.4962,
"step": 194
},
{
"epoch": 0.31,
"grad_norm": 2464.0,
"learning_rate": 0.0001839765683227398,
"loss": 12.6715,
"step": 195
},
{
"epoch": 0.32,
"grad_norm": 13.0,
"learning_rate": 0.00018364701380102266,
"loss": 2.5459,
"step": 196
},
{
"epoch": 0.32,
"grad_norm": 43.25,
"learning_rate": 0.00018331440617122696,
"loss": 2.8457,
"step": 197
},
{
"epoch": 0.32,
"grad_norm": 10.4375,
"learning_rate": 0.00018297875757349952,
"loss": 2.9238,
"step": 198
},
{
"epoch": 0.32,
"grad_norm": 19.5,
"learning_rate": 0.00018264008025898248,
"loss": 2.4803,
"step": 199
},
{
"epoch": 0.32,
"grad_norm": 16.0,
"learning_rate": 0.00018229838658936564,
"loss": 2.7507,
"step": 200
},
{
"epoch": 0.32,
"grad_norm": 17.0,
"learning_rate": 0.00018195368903643563,
"loss": 2.4951,
"step": 201
},
{
"epoch": 0.33,
"grad_norm": 7.4375,
"learning_rate": 0.0001816060001816205,
"loss": 1.9929,
"step": 202
},
{
"epoch": 0.33,
"grad_norm": 14.25,
"learning_rate": 0.00018125533271553043,
"loss": 2.7251,
"step": 203
},
{
"epoch": 0.33,
"grad_norm": 45.5,
"learning_rate": 0.00018090169943749476,
"loss": 2.2007,
"step": 204
},
{
"epoch": 0.33,
"grad_norm": 8.5,
"learning_rate": 0.0001805451132550946,
"loss": 2.6761,
"step": 205
},
{
"epoch": 0.33,
"grad_norm": 5.78125,
"learning_rate": 0.00018018558718369186,
"loss": 2.1643,
"step": 206
},
{
"epoch": 0.33,
"grad_norm": 13.75,
"learning_rate": 0.00017982313434595406,
"loss": 1.9931,
"step": 207
},
{
"epoch": 0.34,
"grad_norm": 7.875,
"learning_rate": 0.00017945776797137543,
"loss": 2.9199,
"step": 208
},
{
"epoch": 0.34,
"grad_norm": 24.75,
"learning_rate": 0.00017908950139579406,
"loss": 3.1686,
"step": 209
},
{
"epoch": 0.34,
"grad_norm": 6240.0,
"learning_rate": 0.00017871834806090501,
"loss": 4.8329,
"step": 210
},
{
"epoch": 0.34,
"grad_norm": 8.6875,
"learning_rate": 0.0001783443215137699,
"loss": 2.3425,
"step": 211
},
{
"epoch": 0.34,
"grad_norm": 11.125,
"learning_rate": 0.00017796743540632223,
"loss": 7.5991,
"step": 212
},
{
"epoch": 0.34,
"grad_norm": 7.03125,
"learning_rate": 0.00017758770349486923,
"loss": 2.5081,
"step": 213
},
{
"epoch": 0.35,
"grad_norm": 12.25,
"learning_rate": 0.00017720513963958968,
"loss": 2.3669,
"step": 214
},
{
"epoch": 0.35,
"grad_norm": 31.875,
"learning_rate": 0.00017681975780402807,
"loss": 3.2162,
"step": 215
},
{
"epoch": 0.35,
"grad_norm": 13.8125,
"learning_rate": 0.00017643157205458483,
"loss": 2.8832,
"step": 216
},
{
"epoch": 0.35,
"grad_norm": 9.1875,
"learning_rate": 0.0001760405965600031,
"loss": 2.1019,
"step": 217
},
{
"epoch": 0.35,
"grad_norm": 6.96875,
"learning_rate": 0.00017564684559085136,
"loss": 2.1741,
"step": 218
},
{
"epoch": 0.35,
"grad_norm": 88.5,
"learning_rate": 0.00017525033351900268,
"loss": 2.875,
"step": 219
},
{
"epoch": 0.35,
"grad_norm": 15.5,
"learning_rate": 0.00017485107481711012,
"loss": 2.9173,
"step": 220
},
{
"epoch": 0.36,
"grad_norm": 18.875,
"learning_rate": 0.00017444908405807845,
"loss": 2.77,
"step": 221
},
{
"epoch": 0.36,
"grad_norm": 114.5,
"learning_rate": 0.00017404437591453235,
"loss": 2.5471,
"step": 222
},
{
"epoch": 0.36,
"grad_norm": 19.5,
"learning_rate": 0.00017363696515828062,
"loss": 2.4391,
"step": 223
},
{
"epoch": 0.36,
"grad_norm": 14.0625,
"learning_rate": 0.00017322686665977737,
"loss": 2.4229,
"step": 224
},
{
"epoch": 0.36,
"grad_norm": 33.5,
"learning_rate": 0.00017281409538757883,
"loss": 2.9198,
"step": 225
},
{
"epoch": 0.36,
"grad_norm": 25.125,
"learning_rate": 0.00017239866640779745,
"loss": 2.8782,
"step": 226
},
{
"epoch": 0.37,
"grad_norm": 50.25,
"learning_rate": 0.0001719805948835515,
"loss": 3.3913,
"step": 227
},
{
"epoch": 0.37,
"grad_norm": 8.8125,
"learning_rate": 0.00017155989607441213,
"loss": 2.4202,
"step": 228
},
{
"epoch": 0.37,
"grad_norm": 398.0,
"learning_rate": 0.00017113658533584594,
"loss": 2.6242,
"step": 229
},
{
"epoch": 0.37,
"grad_norm": 22.0,
"learning_rate": 0.00017071067811865476,
"loss": 2.3384,
"step": 230
},
{
"epoch": 0.37,
"grad_norm": 9.875,
"learning_rate": 0.00017028218996841172,
"loss": 2.4174,
"step": 231
},
{
"epoch": 0.37,
"grad_norm": 8.0625,
"learning_rate": 0.00016985113652489374,
"loss": 2.3576,
"step": 232
},
{
"epoch": 0.38,
"grad_norm": 20.0,
"learning_rate": 0.00016941753352151055,
"loss": 2.7567,
"step": 233
},
{
"epoch": 0.38,
"grad_norm": 19.75,
"learning_rate": 0.00016898139678473076,
"loss": 2.6665,
"step": 234
},
{
"epoch": 0.38,
"grad_norm": 8.125,
"learning_rate": 0.00016854274223350397,
"loss": 2.5973,
"step": 235
},
{
"epoch": 0.38,
"grad_norm": 9.4375,
"learning_rate": 0.00016810158587867973,
"loss": 2.7815,
"step": 236
},
{
"epoch": 0.38,
"grad_norm": 10.25,
"learning_rate": 0.00016765794382242314,
"loss": 2.6786,
"step": 237
},
{
"epoch": 0.38,
"grad_norm": 5.75,
"learning_rate": 0.00016721183225762727,
"loss": 1.876,
"step": 238
},
{
"epoch": 0.39,
"grad_norm": 12.6875,
"learning_rate": 0.00016676326746732195,
"loss": 1.9815,
"step": 239
},
{
"epoch": 0.39,
"grad_norm": 7.40625,
"learning_rate": 0.00016631226582407952,
"loss": 2.2768,
"step": 240
},
{
"epoch": 0.39,
"grad_norm": 9.0,
"learning_rate": 0.00016585884378941725,
"loss": 2.6424,
"step": 241
},
{
"epoch": 0.39,
"grad_norm": 7.8125,
"learning_rate": 0.00016540301791319645,
"loss": 2.2763,
"step": 242
},
{
"epoch": 0.39,
"grad_norm": 9.0,
"learning_rate": 0.00016494480483301836,
"loss": 2.6912,
"step": 243
},
{
"epoch": 0.39,
"grad_norm": 9.25,
"learning_rate": 0.00016448422127361706,
"loss": 2.0026,
"step": 244
},
{
"epoch": 0.4,
"grad_norm": 13.875,
"learning_rate": 0.00016402128404624882,
"loss": 2.4934,
"step": 245
},
{
"epoch": 0.4,
"grad_norm": 7.53125,
"learning_rate": 0.00016355601004807856,
"loss": 2.2699,
"step": 246
},
{
"epoch": 0.4,
"grad_norm": 9.25,
"learning_rate": 0.00016308841626156307,
"loss": 2.2252,
"step": 247
},
{
"epoch": 0.4,
"grad_norm": 8.3125,
"learning_rate": 0.00016261851975383137,
"loss": 2.9339,
"step": 248
},
{
"epoch": 0.4,
"grad_norm": 8.625,
"learning_rate": 0.00016214633767606143,
"loss": 3.2039,
"step": 249
},
{
"epoch": 0.4,
"grad_norm": 6.53125,
"learning_rate": 0.00016167188726285434,
"loss": 2.7348,
"step": 250
},
{
"epoch": 0.4,
"grad_norm": 6.34375,
"learning_rate": 0.0001611951858316052,
"loss": 2.8089,
"step": 251
},
{
"epoch": 0.41,
"grad_norm": 22.75,
"learning_rate": 0.00016071625078187114,
"loss": 3.457,
"step": 252
},
{
"epoch": 0.41,
"grad_norm": 7.875,
"learning_rate": 0.00016023509959473605,
"loss": 2.8804,
"step": 253
},
{
"epoch": 0.41,
"grad_norm": 11.25,
"learning_rate": 0.00015975174983217275,
"loss": 2.3933,
"step": 254
},
{
"epoch": 0.41,
"grad_norm": 6.59375,
"learning_rate": 0.0001592662191364017,
"loss": 2.4153,
"step": 255
},
{
"epoch": 0.41,
"grad_norm": 8.375,
"learning_rate": 0.00015877852522924732,
"loss": 2.9298,
"step": 256
},
{
"epoch": 0.41,
"grad_norm": 9.25,
"learning_rate": 0.00015828868591149104,
"loss": 2.7037,
"step": 257
},
{
"epoch": 0.42,
"grad_norm": 7.34375,
"learning_rate": 0.0001577967190622215,
"loss": 2.8552,
"step": 258
},
{
"epoch": 0.42,
"grad_norm": 83.0,
"learning_rate": 0.00015730264263818212,
"loss": 3.3196,
"step": 259
},
{
"epoch": 0.42,
"grad_norm": 11.3125,
"learning_rate": 0.00015680647467311557,
"loss": 2.6933,
"step": 260
},
{
"epoch": 0.42,
"grad_norm": 9.875,
"learning_rate": 0.00015630823327710558,
"loss": 2.8295,
"step": 261
},
{
"epoch": 0.42,
"grad_norm": 400.0,
"learning_rate": 0.00015580793663591585,
"loss": 7.8334,
"step": 262
},
{
"epoch": 0.42,
"grad_norm": 9.1875,
"learning_rate": 0.0001553056030103264,
"loss": 3.0095,
"step": 263
},
{
"epoch": 0.43,
"grad_norm": 14.25,
"learning_rate": 0.00015480125073546704,
"loss": 2.8319,
"step": 264
},
{
"epoch": 0.43,
"grad_norm": 10.6875,
"learning_rate": 0.0001542948982201479,
"loss": 2.7952,
"step": 265
},
{
"epoch": 0.43,
"grad_norm": 63.5,
"learning_rate": 0.00015378656394618787,
"loss": 3.3161,
"step": 266
},
{
"epoch": 0.43,
"grad_norm": 10.6875,
"learning_rate": 0.00015327626646773976,
"loss": 2.3066,
"step": 267
},
{
"epoch": 0.43,
"grad_norm": 13.4375,
"learning_rate": 0.0001527640244106133,
"loss": 2.9634,
"step": 268
},
{
"epoch": 0.43,
"grad_norm": 9.6875,
"learning_rate": 0.0001522498564715949,
"loss": 2.9318,
"step": 269
},
{
"epoch": 0.44,
"grad_norm": 6.5625,
"learning_rate": 0.00015173378141776568,
"loss": 2.6438,
"step": 270
},
{
"epoch": 0.44,
"grad_norm": 520.0,
"learning_rate": 0.00015121581808581622,
"loss": 10.9186,
"step": 271
},
{
"epoch": 0.44,
"grad_norm": 8.375,
"learning_rate": 0.00015069598538135906,
"loss": 2.6323,
"step": 272
},
{
"epoch": 0.44,
"grad_norm": 8.125,
"learning_rate": 0.00015017430227823864,
"loss": 2.9288,
"step": 273
},
{
"epoch": 0.44,
"grad_norm": 14336.0,
"learning_rate": 0.0001496507878178388,
"loss": 16.6016,
"step": 274
},
{
"epoch": 0.44,
"grad_norm": 11.0,
"learning_rate": 0.00014912546110838775,
"loss": 3.2856,
"step": 275
},
{
"epoch": 0.45,
"grad_norm": 13.0,
"learning_rate": 0.0001485983413242606,
"loss": 2.1977,
"step": 276
},
{
"epoch": 0.45,
"grad_norm": 70.5,
"learning_rate": 0.00014806944770527958,
"loss": 13.3843,
"step": 277
},
{
"epoch": 0.45,
"grad_norm": 11.1875,
"learning_rate": 0.00014753879955601163,
"loss": 3.659,
"step": 278
},
{
"epoch": 0.45,
"grad_norm": 7.3125,
"learning_rate": 0.00014700641624506392,
"loss": 2.1687,
"step": 279
},
{
"epoch": 0.45,
"grad_norm": 10.75,
"learning_rate": 0.00014647231720437686,
"loss": 2.7034,
"step": 280
},
{
"epoch": 0.45,
"grad_norm": 100.0,
"learning_rate": 0.00014593652192851486,
"loss": 8.6095,
"step": 281
},
{
"epoch": 0.45,
"grad_norm": 8.375,
"learning_rate": 0.00014539904997395468,
"loss": 2.0712,
"step": 282
},
{
"epoch": 0.46,
"grad_norm": 414.0,
"learning_rate": 0.00014485992095837177,
"loss": 1.984,
"step": 283
},
{
"epoch": 0.46,
"grad_norm": 6.53125,
"learning_rate": 0.00014431915455992414,
"loss": 2.2412,
"step": 284
},
{
"epoch": 0.46,
"grad_norm": 6.84375,
"learning_rate": 0.00014377677051653404,
"loss": 2.4154,
"step": 285
},
{
"epoch": 0.46,
"grad_norm": 10.1875,
"learning_rate": 0.00014323278862516775,
"loss": 2.6303,
"step": 286
},
{
"epoch": 0.46,
"grad_norm": 9.375,
"learning_rate": 0.00014268722874111265,
"loss": 2.42,
"step": 287
},
{
"epoch": 0.46,
"grad_norm": 9.0625,
"learning_rate": 0.00014214011077725292,
"loss": 2.403,
"step": 288
},
{
"epoch": 0.47,
"grad_norm": 10.125,
"learning_rate": 0.00014159145470334235,
"loss": 2.8993,
"step": 289
},
{
"epoch": 0.47,
"grad_norm": 9.6875,
"learning_rate": 0.0001410412805452757,
"loss": 2.2136,
"step": 290
},
{
"epoch": 0.47,
"grad_norm": 5.1875,
"learning_rate": 0.00014048960838435753,
"loss": 1.9811,
"step": 291
},
{
"epoch": 0.47,
"grad_norm": 7.625,
"learning_rate": 0.00013993645835656953,
"loss": 2.8393,
"step": 292
},
{
"epoch": 0.47,
"grad_norm": 6.875,
"learning_rate": 0.00013938185065183532,
"loss": 2.2938,
"step": 293
},
{
"epoch": 0.47,
"grad_norm": 10.75,
"learning_rate": 0.0001388258055132835,
"loss": 2.9148,
"step": 294
},
{
"epoch": 0.48,
"grad_norm": 7.96875,
"learning_rate": 0.000138268343236509,
"loss": 2.8022,
"step": 295
},
{
"epoch": 0.48,
"grad_norm": 10.125,
"learning_rate": 0.00013770948416883205,
"loss": 3.0303,
"step": 296
},
{
"epoch": 0.48,
"grad_norm": 7.0625,
"learning_rate": 0.00013714924870855571,
"loss": 2.5784,
"step": 297
},
{
"epoch": 0.48,
"grad_norm": 5.9375,
"learning_rate": 0.00013658765730422125,
"loss": 2.2308,
"step": 298
},
{
"epoch": 0.48,
"grad_norm": 13.6875,
"learning_rate": 0.00013602473045386165,
"loss": 2.5253,
"step": 299
},
{
"epoch": 0.48,
"grad_norm": 14.9375,
"learning_rate": 0.00013546048870425356,
"loss": 2.809,
"step": 300
},
{
"epoch": 0.49,
"grad_norm": 11.375,
"learning_rate": 0.0001348949526501675,
"loss": 3.2099,
"step": 301
},
{
"epoch": 0.49,
"grad_norm": 14.75,
"learning_rate": 0.00013432814293361584,
"loss": 2.0988,
"step": 302
},
{
"epoch": 0.49,
"grad_norm": 106.5,
"learning_rate": 0.00013376008024309948,
"loss": 2.4118,
"step": 303
},
{
"epoch": 0.49,
"grad_norm": 9.5625,
"learning_rate": 0.00013319078531285285,
"loss": 3.1705,
"step": 304
},
{
"epoch": 0.49,
"grad_norm": 7.5,
"learning_rate": 0.00013262027892208694,
"loss": 2.5198,
"step": 305
},
{
"epoch": 0.49,
"grad_norm": 8.0625,
"learning_rate": 0.00013204858189423097,
"loss": 2.875,
"step": 306
},
{
"epoch": 0.5,
"grad_norm": 10.6875,
"learning_rate": 0.00013147571509617228,
"loss": 2.9605,
"step": 307
},
{
"epoch": 0.5,
"grad_norm": 6.3125,
"learning_rate": 0.00013090169943749476,
"loss": 2.2224,
"step": 308
},
{
"epoch": 0.5,
"grad_norm": 6.1875,
"learning_rate": 0.00013032655586971552,
"loss": 2.577,
"step": 309
},
{
"epoch": 0.5,
"grad_norm": 14.25,
"learning_rate": 0.00012975030538552032,
"loss": 2.9887,
"step": 310
},
{
"epoch": 0.5,
"eval_loss": 2.833022356033325,
"eval_runtime": 1.7613,
"eval_samples_per_second": 19.304,
"eval_steps_per_second": 19.304,
"step": 310
},
{
"epoch": 0.5,
"grad_norm": 7.90625,
"learning_rate": 0.0001291729690179972,
"loss": 2.6105,
"step": 311
},
{
"epoch": 0.5,
"grad_norm": 25.125,
"learning_rate": 0.00012859456783986893,
"loss": 2.7618,
"step": 312
},
{
"epoch": 0.5,
"grad_norm": 8.0,
"learning_rate": 0.00012801512296272368,
"loss": 3.0351,
"step": 313
},
{
"epoch": 0.51,
"grad_norm": 6.1875,
"learning_rate": 0.0001274346555362446,
"loss": 2.5821,
"step": 314
},
{
"epoch": 0.51,
"grad_norm": 8.375,
"learning_rate": 0.0001268531867474377,
"loss": 2.5642,
"step": 315
},
{
"epoch": 0.51,
"grad_norm": 24.5,
"learning_rate": 0.0001262707378198587,
"loss": 2.1907,
"step": 316
},
{
"epoch": 0.51,
"grad_norm": 5.75,
"learning_rate": 0.00012568733001283827,
"loss": 2.3074,
"step": 317
},
{
"epoch": 0.51,
"grad_norm": 11.25,
"learning_rate": 0.00012510298462070619,
"loss": 2.5847,
"step": 318
},
{
"epoch": 0.51,
"grad_norm": 8.375,
"learning_rate": 0.00012451772297201376,
"loss": 2.3077,
"step": 319
},
{
"epoch": 0.52,
"grad_norm": 6.15625,
"learning_rate": 0.0001239315664287558,
"loss": 2.4337,
"step": 320
},
{
"epoch": 0.52,
"grad_norm": 8.8125,
"learning_rate": 0.00012334453638559057,
"loss": 2.3457,
"step": 321
},
{
"epoch": 0.52,
"grad_norm": 6.34375,
"learning_rate": 0.000122756654269059,
"loss": 2.7569,
"step": 322
},
{
"epoch": 0.52,
"grad_norm": 7.84375,
"learning_rate": 0.00012216794153680274,
"loss": 2.8255,
"step": 323
},
{
"epoch": 0.52,
"grad_norm": 6.875,
"learning_rate": 0.00012157841967678063,
"loss": 2.4809,
"step": 324
},
{
"epoch": 0.52,
"grad_norm": 8.375,
"learning_rate": 0.00012098811020648475,
"loss": 2.7188,
"step": 325
},
{
"epoch": 0.53,
"grad_norm": 5.9375,
"learning_rate": 0.00012039703467215488,
"loss": 2.7395,
"step": 326
},
{
"epoch": 0.53,
"grad_norm": 6.9375,
"learning_rate": 0.00011980521464799198,
"loss": 2.5989,
"step": 327
},
{
"epoch": 0.53,
"grad_norm": 6.90625,
"learning_rate": 0.00011921267173537086,
"loss": 2.5155,
"step": 328
},
{
"epoch": 0.53,
"grad_norm": 7.40625,
"learning_rate": 0.00011861942756205169,
"loss": 2.4556,
"step": 329
},
{
"epoch": 0.53,
"grad_norm": 6.96875,
"learning_rate": 0.0001180255037813906,
"loss": 2.2609,
"step": 330
},
{
"epoch": 0.53,
"grad_norm": 9.0625,
"learning_rate": 0.00011743092207154929,
"loss": 2.6818,
"step": 331
},
{
"epoch": 0.54,
"grad_norm": 6.0625,
"learning_rate": 0.00011683570413470383,
"loss": 2.5213,
"step": 332
},
{
"epoch": 0.54,
"grad_norm": 6.15625,
"learning_rate": 0.00011623987169625261,
"loss": 2.513,
"step": 333
},
{
"epoch": 0.54,
"grad_norm": 6.65625,
"learning_rate": 0.0001156434465040231,
"loss": 2.8551,
"step": 334
},
{
"epoch": 0.54,
"grad_norm": 5.65625,
"learning_rate": 0.00011504645032747832,
"loss": 2.336,
"step": 335
},
{
"epoch": 0.54,
"grad_norm": 6.5,
"learning_rate": 0.00011444890495692213,
"loss": 2.4159,
"step": 336
},
{
"epoch": 0.54,
"grad_norm": 7.59375,
"learning_rate": 0.00011385083220270401,
"loss": 2.8159,
"step": 337
},
{
"epoch": 0.55,
"grad_norm": 8.6875,
"learning_rate": 0.00011325225389442277,
"loss": 3.1515,
"step": 338
},
{
"epoch": 0.55,
"grad_norm": 5.9375,
"learning_rate": 0.00011265319188012994,
"loss": 2.5154,
"step": 339
},
{
"epoch": 0.55,
"grad_norm": 9.1875,
"learning_rate": 0.0001120536680255323,
"loss": 2.9747,
"step": 340
},
{
"epoch": 0.55,
"grad_norm": 5.75,
"learning_rate": 0.00011145370421319377,
"loss": 2.2161,
"step": 341
},
{
"epoch": 0.55,
"grad_norm": 5.0,
"learning_rate": 0.00011085332234173664,
"loss": 2.8727,
"step": 342
},
{
"epoch": 0.55,
"grad_norm": 6.5,
"learning_rate": 0.00011025254432504233,
"loss": 2.5946,
"step": 343
},
{
"epoch": 0.55,
"grad_norm": 12.5625,
"learning_rate": 0.00010965139209145152,
"loss": 2.898,
"step": 344
},
{
"epoch": 0.56,
"grad_norm": 5.15625,
"learning_rate": 0.0001090498875829638,
"loss": 2.4455,
"step": 345
},
{
"epoch": 0.56,
"grad_norm": 6.25,
"learning_rate": 0.00010844805275443673,
"loss": 2.4731,
"step": 346
},
{
"epoch": 0.56,
"grad_norm": 5.71875,
"learning_rate": 0.0001078459095727845,
"loss": 2.1894,
"step": 347
},
{
"epoch": 0.56,
"grad_norm": 13.125,
"learning_rate": 0.00010724348001617625,
"loss": 2.7624,
"step": 348
},
{
"epoch": 0.56,
"grad_norm": 35.5,
"learning_rate": 0.00010664078607323367,
"loss": 2.284,
"step": 349
},
{
"epoch": 0.56,
"grad_norm": 6.46875,
"learning_rate": 0.00010603784974222861,
"loss": 2.2169,
"step": 350
},
{
"epoch": 0.57,
"grad_norm": 10.875,
"learning_rate": 0.00010543469303028002,
"loss": 2.6135,
"step": 351
},
{
"epoch": 0.57,
"grad_norm": 5.78125,
"learning_rate": 0.00010483133795255071,
"loss": 2.6733,
"step": 352
},
{
"epoch": 0.57,
"grad_norm": 5.875,
"learning_rate": 0.0001042278065314439,
"loss": 2.2963,
"step": 353
},
{
"epoch": 0.57,
"grad_norm": 5.625,
"learning_rate": 0.00010362412079579924,
"loss": 2.2819,
"step": 354
},
{
"epoch": 0.57,
"grad_norm": 6.0,
"learning_rate": 0.0001030203027800889,
"loss": 2.7232,
"step": 355
},
{
"epoch": 0.57,
"grad_norm": 17.875,
"learning_rate": 0.00010241637452361323,
"loss": 2.8421,
"step": 356
},
{
"epoch": 0.58,
"grad_norm": 24.875,
"learning_rate": 0.0001018123580696964,
"loss": 2.554,
"step": 357
},
{
"epoch": 0.58,
"grad_norm": 5.5,
"learning_rate": 0.00010120827546488174,
"loss": 2.4021,
"step": 358
},
{
"epoch": 0.58,
"grad_norm": 5.09375,
"learning_rate": 0.00010060414875812709,
"loss": 2.0252,
"step": 359
},
{
"epoch": 0.58,
"grad_norm": 11.125,
"learning_rate": 0.0001,
"loss": 2.2328,
"step": 360
},
{
"epoch": 0.58,
"grad_norm": 6.875,
"learning_rate": 9.939585124187292e-05,
"loss": 2.6733,
"step": 361
},
{
"epoch": 0.58,
"grad_norm": 4.59375,
"learning_rate": 9.879172453511827e-05,
"loss": 2.2601,
"step": 362
},
{
"epoch": 0.59,
"grad_norm": 7.09375,
"learning_rate": 9.818764193030363e-05,
"loss": 2.1966,
"step": 363
},
{
"epoch": 0.59,
"grad_norm": 33.25,
"learning_rate": 9.75836254763868e-05,
"loss": 2.4876,
"step": 364
},
{
"epoch": 0.59,
"grad_norm": 15.75,
"learning_rate": 9.697969721991114e-05,
"loss": 2.2825,
"step": 365
},
{
"epoch": 0.59,
"grad_norm": 6.53125,
"learning_rate": 9.63758792042008e-05,
"loss": 2.8742,
"step": 366
},
{
"epoch": 0.59,
"grad_norm": 7.0,
"learning_rate": 9.577219346855613e-05,
"loss": 2.3392,
"step": 367
},
{
"epoch": 0.59,
"grad_norm": 7.4375,
"learning_rate": 9.516866204744931e-05,
"loss": 2.4111,
"step": 368
},
{
"epoch": 0.6,
"grad_norm": 7.71875,
"learning_rate": 9.456530696971999e-05,
"loss": 2.2599,
"step": 369
},
{
"epoch": 0.6,
"grad_norm": 5.71875,
"learning_rate": 9.396215025777139e-05,
"loss": 2.8202,
"step": 370
},
{
"epoch": 0.6,
"grad_norm": 4.6875,
"learning_rate": 9.335921392676631e-05,
"loss": 2.3614,
"step": 371
},
{
"epoch": 0.6,
"grad_norm": 5.21875,
"learning_rate": 9.275651998382377e-05,
"loss": 1.7052,
"step": 372
},
{
"epoch": 0.6,
"grad_norm": 6.15625,
"learning_rate": 9.215409042721552e-05,
"loss": 1.8829,
"step": 373
},
{
"epoch": 0.6,
"grad_norm": 5.0625,
"learning_rate": 9.155194724556331e-05,
"loss": 2.2968,
"step": 374
},
{
"epoch": 0.6,
"grad_norm": 5.4375,
"learning_rate": 9.095011241703623e-05,
"loss": 2.1253,
"step": 375
},
{
"epoch": 0.61,
"grad_norm": 5.125,
"learning_rate": 9.034860790854849e-05,
"loss": 2.3356,
"step": 376
},
{
"epoch": 0.61,
"grad_norm": 6.46875,
"learning_rate": 8.974745567495768e-05,
"loss": 2.1902,
"step": 377
},
{
"epoch": 0.61,
"grad_norm": 8.1875,
"learning_rate": 8.914667765826338e-05,
"loss": 2.7202,
"step": 378
},
{
"epoch": 0.61,
"grad_norm": 7.8125,
"learning_rate": 8.854629578680624e-05,
"loss": 2.4657,
"step": 379
},
{
"epoch": 0.61,
"grad_norm": 9.9375,
"learning_rate": 8.79463319744677e-05,
"loss": 2.5267,
"step": 380
},
{
"epoch": 0.61,
"grad_norm": 5.21875,
"learning_rate": 8.73468081198701e-05,
"loss": 2.4513,
"step": 381
},
{
"epoch": 0.62,
"grad_norm": 5.6875,
"learning_rate": 8.674774610557728e-05,
"loss": 2.2212,
"step": 382
},
{
"epoch": 0.62,
"grad_norm": 6.0,
"learning_rate": 8.614916779729603e-05,
"loss": 2.2433,
"step": 383
},
{
"epoch": 0.62,
"grad_norm": 5.15625,
"learning_rate": 8.55510950430779e-05,
"loss": 2.5887,
"step": 384
},
{
"epoch": 0.62,
"grad_norm": 6.28125,
"learning_rate": 8.495354967252169e-05,
"loss": 1.5424,
"step": 385
},
{
"epoch": 0.62,
"grad_norm": 5.34375,
"learning_rate": 8.435655349597689e-05,
"loss": 2.4985,
"step": 386
},
{
"epoch": 0.62,
"grad_norm": 7.0,
"learning_rate": 8.37601283037474e-05,
"loss": 3.0591,
"step": 387
},
{
"epoch": 0.63,
"grad_norm": 5.25,
"learning_rate": 8.316429586529615e-05,
"loss": 2.5296,
"step": 388
},
{
"epoch": 0.63,
"grad_norm": 4.71875,
"learning_rate": 8.256907792845072e-05,
"loss": 2.4503,
"step": 389
},
{
"epoch": 0.63,
"grad_norm": 5.53125,
"learning_rate": 8.197449621860943e-05,
"loss": 1.951,
"step": 390
},
{
"epoch": 0.63,
"grad_norm": 7.0,
"learning_rate": 8.138057243794833e-05,
"loss": 2.2415,
"step": 391
},
{
"epoch": 0.63,
"grad_norm": 6.53125,
"learning_rate": 8.078732826462915e-05,
"loss": 3.0526,
"step": 392
},
{
"epoch": 0.63,
"grad_norm": 4.40625,
"learning_rate": 8.019478535200806e-05,
"loss": 2.0695,
"step": 393
},
{
"epoch": 0.64,
"grad_norm": 4.78125,
"learning_rate": 7.960296532784515e-05,
"loss": 1.9639,
"step": 394
},
{
"epoch": 0.64,
"grad_norm": 5.09375,
"learning_rate": 7.901188979351526e-05,
"loss": 2.5458,
"step": 395
},
{
"epoch": 0.64,
"grad_norm": 5.875,
"learning_rate": 7.84215803232194e-05,
"loss": 2.6561,
"step": 396
},
{
"epoch": 0.64,
"grad_norm": 4.09375,
"learning_rate": 7.78320584631973e-05,
"loss": 1.9434,
"step": 397
},
{
"epoch": 0.64,
"grad_norm": 5.375,
"learning_rate": 7.7243345730941e-05,
"loss": 2.291,
"step": 398
},
{
"epoch": 0.64,
"grad_norm": 6.09375,
"learning_rate": 7.66554636144095e-05,
"loss": 1.9882,
"step": 399
},
{
"epoch": 0.65,
"grad_norm": 6.40625,
"learning_rate": 7.606843357124426e-05,
"loss": 2.5872,
"step": 400
},
{
"epoch": 0.65,
"grad_norm": 4.5625,
"learning_rate": 7.548227702798624e-05,
"loss": 2.3354,
"step": 401
},
{
"epoch": 0.65,
"grad_norm": 10.6875,
"learning_rate": 7.489701537929384e-05,
"loss": 2.9243,
"step": 402
},
{
"epoch": 0.65,
"grad_norm": 4.875,
"learning_rate": 7.431266998716171e-05,
"loss": 2.5853,
"step": 403
},
{
"epoch": 0.65,
"grad_norm": 10.9375,
"learning_rate": 7.372926218014131e-05,
"loss": 2.3285,
"step": 404
},
{
"epoch": 0.65,
"grad_norm": 4.34375,
"learning_rate": 7.314681325256232e-05,
"loss": 2.222,
"step": 405
},
{
"epoch": 0.65,
"grad_norm": 4.28125,
"learning_rate": 7.256534446375542e-05,
"loss": 2.165,
"step": 406
},
{
"epoch": 0.66,
"grad_norm": 5.59375,
"learning_rate": 7.198487703727632e-05,
"loss": 2.1,
"step": 407
},
{
"epoch": 0.66,
"grad_norm": 6.75,
"learning_rate": 7.14054321601311e-05,
"loss": 2.7438,
"step": 408
},
{
"epoch": 0.66,
"grad_norm": 5.1875,
"learning_rate": 7.082703098200282e-05,
"loss": 1.7364,
"step": 409
},
{
"epoch": 0.66,
"grad_norm": 8.3125,
"learning_rate": 7.024969461447972e-05,
"loss": 3.0458,
"step": 410
},
{
"epoch": 0.66,
"grad_norm": 4.71875,
"learning_rate": 6.967344413028452e-05,
"loss": 2.2354,
"step": 411
},
{
"epoch": 0.66,
"grad_norm": 5.90625,
"learning_rate": 6.909830056250527e-05,
"loss": 2.6881,
"step": 412
},
{
"epoch": 0.67,
"grad_norm": 141.0,
"learning_rate": 6.852428490382773e-05,
"loss": 10.2024,
"step": 413
},
{
"epoch": 0.67,
"grad_norm": 35.5,
"learning_rate": 6.795141810576906e-05,
"loss": 2.0233,
"step": 414
},
{
"epoch": 0.67,
"grad_norm": 5.625,
"learning_rate": 6.73797210779131e-05,
"loss": 2.0324,
"step": 415
},
{
"epoch": 0.67,
"grad_norm": 4.59375,
"learning_rate": 6.680921468714719e-05,
"loss": 1.9068,
"step": 416
},
{
"epoch": 0.67,
"grad_norm": 5.90625,
"learning_rate": 6.623991975690051e-05,
"loss": 2.4164,
"step": 417
},
{
"epoch": 0.67,
"grad_norm": 5.46875,
"learning_rate": 6.567185706638417e-05,
"loss": 2.4648,
"step": 418
},
{
"epoch": 0.68,
"grad_norm": 4.0625,
"learning_rate": 6.510504734983249e-05,
"loss": 1.9487,
"step": 419
},
{
"epoch": 0.68,
"grad_norm": 4.03125,
"learning_rate": 6.453951129574644e-05,
"loss": 1.5947,
"step": 420
},
{
"epoch": 0.68,
"grad_norm": 4.21875,
"learning_rate": 6.397526954613839e-05,
"loss": 2.0581,
"step": 421
},
{
"epoch": 0.68,
"grad_norm": 4.78125,
"learning_rate": 6.341234269577879e-05,
"loss": 2.4201,
"step": 422
},
{
"epoch": 0.68,
"grad_norm": 38.25,
"learning_rate": 6.285075129144428e-05,
"loss": 9.8769,
"step": 423
},
{
"epoch": 0.68,
"grad_norm": 5.125,
"learning_rate": 6.229051583116796e-05,
"loss": 2.0862,
"step": 424
},
{
"epoch": 0.69,
"grad_norm": 4.875,
"learning_rate": 6.173165676349103e-05,
"loss": 2.2542,
"step": 425
},
{
"epoch": 0.69,
"grad_norm": 5.15625,
"learning_rate": 6.117419448671651e-05,
"loss": 1.8006,
"step": 426
},
{
"epoch": 0.69,
"grad_norm": 9.4375,
"learning_rate": 6.0618149348164696e-05,
"loss": 2.324,
"step": 427
},
{
"epoch": 0.69,
"grad_norm": 4.21875,
"learning_rate": 6.006354164343046e-05,
"loss": 2.3711,
"step": 428
},
{
"epoch": 0.69,
"grad_norm": 4.21875,
"learning_rate": 5.9510391615642466e-05,
"loss": 2.1159,
"step": 429
},
{
"epoch": 0.69,
"grad_norm": 4.3125,
"learning_rate": 5.8958719454724346e-05,
"loss": 2.1294,
"step": 430
},
{
"epoch": 0.7,
"grad_norm": 37.25,
"learning_rate": 5.840854529665767e-05,
"loss": 1.9822,
"step": 431
},
{
"epoch": 0.7,
"grad_norm": 31.875,
"learning_rate": 5.785988922274711e-05,
"loss": 2.5124,
"step": 432
},
{
"epoch": 0.7,
"grad_norm": 5.375,
"learning_rate": 5.7312771258887386e-05,
"loss": 2.2204,
"step": 433
},
{
"epoch": 0.7,
"grad_norm": 4.40625,
"learning_rate": 5.676721137483225e-05,
"loss": 2.1498,
"step": 434
},
{
"epoch": 0.7,
"grad_norm": 4.9375,
"learning_rate": 5.622322948346594e-05,
"loss": 2.4613,
"step": 435
},
{
"epoch": 0.7,
"grad_norm": 5.6875,
"learning_rate": 5.568084544007588e-05,
"loss": 2.8178,
"step": 436
},
{
"epoch": 0.7,
"grad_norm": 6.15625,
"learning_rate": 5.5140079041628214e-05,
"loss": 2.6615,
"step": 437
},
{
"epoch": 0.71,
"grad_norm": 2960.0,
"learning_rate": 5.4600950026045326e-05,
"loss": 4.0852,
"step": 438
},
{
"epoch": 0.71,
"grad_norm": 6.0,
"learning_rate": 5.406347807148515e-05,
"loss": 2.5659,
"step": 439
},
{
"epoch": 0.71,
"grad_norm": 4.15625,
"learning_rate": 5.3527682795623146e-05,
"loss": 2.181,
"step": 440
},
{
"epoch": 0.71,
"grad_norm": 4.15625,
"learning_rate": 5.2993583754936126e-05,
"loss": 1.9782,
"step": 441
},
{
"epoch": 0.71,
"grad_norm": 4.34375,
"learning_rate": 5.246120044398839e-05,
"loss": 2.0542,
"step": 442
},
{
"epoch": 0.71,
"grad_norm": 4.25,
"learning_rate": 5.193055229472045e-05,
"loss": 2.3066,
"step": 443
},
{
"epoch": 0.72,
"grad_norm": 4.53125,
"learning_rate": 5.14016586757394e-05,
"loss": 2.1146,
"step": 444
},
{
"epoch": 0.72,
"grad_norm": 5.125,
"learning_rate": 5.087453889161229e-05,
"loss": 2.7877,
"step": 445
},
{
"epoch": 0.72,
"grad_norm": 4.90625,
"learning_rate": 5.0349212182161254e-05,
"loss": 2.4127,
"step": 446
},
{
"epoch": 0.72,
"grad_norm": 5.34375,
"learning_rate": 4.98256977217614e-05,
"loss": 2.1602,
"step": 447
},
{
"epoch": 0.72,
"grad_norm": 4.0625,
"learning_rate": 4.9304014618640995e-05,
"loss": 1.8885,
"step": 448
},
{
"epoch": 0.72,
"grad_norm": 28.75,
"learning_rate": 4.87841819141838e-05,
"loss": 8.8349,
"step": 449
},
{
"epoch": 0.73,
"grad_norm": 3.828125,
"learning_rate": 4.826621858223431e-05,
"loss": 1.738,
"step": 450
},
{
"epoch": 0.73,
"grad_norm": 5.25,
"learning_rate": 4.7750143528405126e-05,
"loss": 2.5058,
"step": 451
},
{
"epoch": 0.73,
"grad_norm": 4.0625,
"learning_rate": 4.723597558938672e-05,
"loss": 2.0976,
"step": 452
},
{
"epoch": 0.73,
"grad_norm": 4.53125,
"learning_rate": 4.672373353226023e-05,
"loss": 2.1503,
"step": 453
},
{
"epoch": 0.73,
"grad_norm": 4.03125,
"learning_rate": 4.6213436053812144e-05,
"loss": 1.9976,
"step": 454
},
{
"epoch": 0.73,
"grad_norm": 10.1875,
"learning_rate": 4.5705101779852135e-05,
"loss": 3.1302,
"step": 455
},
{
"epoch": 0.74,
"grad_norm": 27.875,
"learning_rate": 4.519874926453302e-05,
"loss": 8.4862,
"step": 456
},
{
"epoch": 0.74,
"grad_norm": 5.75,
"learning_rate": 4.469439698967359e-05,
"loss": 1.7939,
"step": 457
},
{
"epoch": 0.74,
"grad_norm": 13.125,
"learning_rate": 4.419206336408418e-05,
"loss": 2.2682,
"step": 458
},
{
"epoch": 0.74,
"grad_norm": 5.1875,
"learning_rate": 4.3691766722894435e-05,
"loss": 2.3877,
"step": 459
},
{
"epoch": 0.74,
"grad_norm": 5.5,
"learning_rate": 4.3193525326884435e-05,
"loss": 2.703,
"step": 460
},
{
"epoch": 0.74,
"grad_norm": 5.15625,
"learning_rate": 4.26973573618179e-05,
"loss": 2.3439,
"step": 461
},
{
"epoch": 0.75,
"grad_norm": 5.125,
"learning_rate": 4.220328093777851e-05,
"loss": 2.6788,
"step": 462
},
{
"epoch": 0.75,
"grad_norm": 4.3125,
"learning_rate": 4.1711314088509e-05,
"loss": 2.2618,
"step": 463
},
{
"epoch": 0.75,
"grad_norm": 4.4375,
"learning_rate": 4.12214747707527e-05,
"loss": 2.3582,
"step": 464
},
{
"epoch": 0.75,
"grad_norm": 4.78125,
"learning_rate": 4.0733780863598335e-05,
"loss": 2.6398,
"step": 465
},
{
"epoch": 0.75,
"eval_loss": 2.7038376331329346,
"eval_runtime": 1.7504,
"eval_samples_per_second": 19.424,
"eval_steps_per_second": 19.424,
"step": 465
},
{
"epoch": 0.75,
"grad_norm": 5.21875,
"learning_rate": 4.0248250167827275e-05,
"loss": 2.1894,
"step": 466
},
{
"epoch": 0.75,
"grad_norm": 11.0625,
"learning_rate": 3.976490040526394e-05,
"loss": 2.2171,
"step": 467
},
{
"epoch": 0.75,
"grad_norm": 4.5,
"learning_rate": 3.9283749218128885e-05,
"loss": 2.514,
"step": 468
},
{
"epoch": 0.76,
"grad_norm": 75.5,
"learning_rate": 3.88048141683948e-05,
"loss": 2.8149,
"step": 469
},
{
"epoch": 0.76,
"grad_norm": 11.0625,
"learning_rate": 3.832811273714569e-05,
"loss": 2.3044,
"step": 470
},
{
"epoch": 0.76,
"grad_norm": 5.5625,
"learning_rate": 3.785366232393861e-05,
"loss": 2.2342,
"step": 471
},
{
"epoch": 0.76,
"grad_norm": 4.34375,
"learning_rate": 3.738148024616863e-05,
"loss": 2.2901,
"step": 472
},
{
"epoch": 0.76,
"grad_norm": 4.46875,
"learning_rate": 3.691158373843694e-05,
"loss": 2.0687,
"step": 473
},
{
"epoch": 0.76,
"grad_norm": 5.90625,
"learning_rate": 3.644398995192147e-05,
"loss": 2.7686,
"step": 474
},
{
"epoch": 0.77,
"grad_norm": 4.03125,
"learning_rate": 3.597871595375121e-05,
"loss": 2.3497,
"step": 475
},
{
"epoch": 0.77,
"grad_norm": 3.984375,
"learning_rate": 3.5515778726382966e-05,
"loss": 2.0393,
"step": 476
},
{
"epoch": 0.77,
"grad_norm": 41.0,
"learning_rate": 3.5055195166981645e-05,
"loss": 2.5633,
"step": 477
},
{
"epoch": 0.77,
"grad_norm": 5.375,
"learning_rate": 3.459698208680359e-05,
"loss": 2.3496,
"step": 478
},
{
"epoch": 0.77,
"grad_norm": 10.625,
"learning_rate": 3.4141156210582756e-05,
"loss": 4.4862,
"step": 479
},
{
"epoch": 0.77,
"grad_norm": 6.28125,
"learning_rate": 3.36877341759205e-05,
"loss": 2.6964,
"step": 480
},
{
"epoch": 0.78,
"grad_norm": 4.28125,
"learning_rate": 3.3236732532678096e-05,
"loss": 2.3155,
"step": 481
},
{
"epoch": 0.78,
"grad_norm": 4.5,
"learning_rate": 3.2788167742372725e-05,
"loss": 2.3611,
"step": 482
},
{
"epoch": 0.78,
"grad_norm": 3.9375,
"learning_rate": 3.234205617757686e-05,
"loss": 1.846,
"step": 483
},
{
"epoch": 0.78,
"grad_norm": 4.09375,
"learning_rate": 3.1898414121320276e-05,
"loss": 1.8453,
"step": 484
},
{
"epoch": 0.78,
"grad_norm": 107.0,
"learning_rate": 3.1457257766496015e-05,
"loss": 2.3292,
"step": 485
},
{
"epoch": 0.78,
"grad_norm": 5.90625,
"learning_rate": 3.101860321526924e-05,
"loss": 2.655,
"step": 486
},
{
"epoch": 0.79,
"grad_norm": 5.0625,
"learning_rate": 3.0582466478489455e-05,
"loss": 2.2701,
"step": 487
},
{
"epoch": 0.79,
"grad_norm": 4.625,
"learning_rate": 3.0148863475106314e-05,
"loss": 2.1766,
"step": 488
},
{
"epoch": 0.79,
"grad_norm": 4.96875,
"learning_rate": 2.9717810031588277e-05,
"loss": 2.3862,
"step": 489
},
{
"epoch": 0.79,
"grad_norm": 4.6875,
"learning_rate": 2.9289321881345254e-05,
"loss": 2.2309,
"step": 490
},
{
"epoch": 0.79,
"grad_norm": 5.9375,
"learning_rate": 2.886341466415412e-05,
"loss": 2.2681,
"step": 491
},
{
"epoch": 0.79,
"grad_norm": 4.5625,
"learning_rate": 2.84401039255879e-05,
"loss": 1.7952,
"step": 492
},
{
"epoch": 0.8,
"grad_norm": 4.59375,
"learning_rate": 2.8019405116448516e-05,
"loss": 2.6097,
"step": 493
},
{
"epoch": 0.8,
"grad_norm": 3.65625,
"learning_rate": 2.7601333592202583e-05,
"loss": 1.8333,
"step": 494
},
{
"epoch": 0.8,
"grad_norm": 4.5625,
"learning_rate": 2.7185904612421176e-05,
"loss": 1.6016,
"step": 495
},
{
"epoch": 0.8,
"grad_norm": 5.1875,
"learning_rate": 2.677313334022268e-05,
"loss": 2.4065,
"step": 496
},
{
"epoch": 0.8,
"grad_norm": 4.34375,
"learning_rate": 2.6363034841719392e-05,
"loss": 2.3659,
"step": 497
},
{
"epoch": 0.8,
"grad_norm": 4.5,
"learning_rate": 2.59556240854677e-05,
"loss": 2.1504,
"step": 498
},
{
"epoch": 0.8,
"grad_norm": 4.375,
"learning_rate": 2.5550915941921526e-05,
"loss": 2.4407,
"step": 499
},
{
"epoch": 0.81,
"grad_norm": 3.71875,
"learning_rate": 2.514892518288988e-05,
"loss": 1.938,
"step": 500
},
{
"epoch": 0.81,
"grad_norm": 4.28125,
"learning_rate": 2.4749666480997337e-05,
"loss": 1.6936,
"step": 501
},
{
"epoch": 0.81,
"grad_norm": 4.40625,
"learning_rate": 2.4353154409148637e-05,
"loss": 2.4835,
"step": 502
},
{
"epoch": 0.81,
"grad_norm": 4.25,
"learning_rate": 2.3959403439996907e-05,
"loss": 2.2736,
"step": 503
},
{
"epoch": 0.81,
"grad_norm": 54.75,
"learning_rate": 2.356842794541516e-05,
"loss": 1.9168,
"step": 504
},
{
"epoch": 0.81,
"grad_norm": 5.46875,
"learning_rate": 2.318024219597196e-05,
"loss": 2.3022,
"step": 505
},
{
"epoch": 0.82,
"grad_norm": 3.609375,
"learning_rate": 2.2794860360410342e-05,
"loss": 1.7,
"step": 506
},
{
"epoch": 0.82,
"grad_norm": 9.6875,
"learning_rate": 2.241229650513077e-05,
"loss": 1.8489,
"step": 507
},
{
"epoch": 0.82,
"grad_norm": 3.953125,
"learning_rate": 2.2032564593677774e-05,
"loss": 2.0878,
"step": 508
},
{
"epoch": 0.82,
"grad_norm": 4.0,
"learning_rate": 2.165567848623009e-05,
"loss": 2.169,
"step": 509
},
{
"epoch": 0.82,
"grad_norm": 4.4375,
"learning_rate": 2.1281651939094992e-05,
"loss": 2.2711,
"step": 510
},
{
"epoch": 0.82,
"grad_norm": 4.40625,
"learning_rate": 2.0910498604205986e-05,
"loss": 2.1268,
"step": 511
},
{
"epoch": 0.83,
"grad_norm": 3.6875,
"learning_rate": 2.0542232028624586e-05,
"loss": 1.8266,
"step": 512
},
{
"epoch": 0.83,
"grad_norm": 6.25,
"learning_rate": 2.0176865654045974e-05,
"loss": 2.7551,
"step": 513
},
{
"epoch": 0.83,
"grad_norm": 4.1875,
"learning_rate": 1.981441281630816e-05,
"loss": 2.5119,
"step": 514
},
{
"epoch": 0.83,
"grad_norm": 14.9375,
"learning_rate": 1.94548867449054e-05,
"loss": 2.2502,
"step": 515
},
{
"epoch": 0.83,
"grad_norm": 6.375,
"learning_rate": 1.9098300562505266e-05,
"loss": 1.5596,
"step": 516
},
{
"epoch": 0.83,
"grad_norm": 4.59375,
"learning_rate": 1.8744667284469575e-05,
"loss": 2.0902,
"step": 517
},
{
"epoch": 0.84,
"grad_norm": 5.6875,
"learning_rate": 1.8393999818379525e-05,
"loss": 2.5071,
"step": 518
},
{
"epoch": 0.84,
"grad_norm": 5.09375,
"learning_rate": 1.804631096356435e-05,
"loss": 1.9177,
"step": 519
},
{
"epoch": 0.84,
"grad_norm": 4.21875,
"learning_rate": 1.7701613410634365e-05,
"loss": 1.7456,
"step": 520
},
{
"epoch": 0.84,
"grad_norm": 9.3125,
"learning_rate": 1.735991974101756e-05,
"loss": 3.7113,
"step": 521
},
{
"epoch": 0.84,
"grad_norm": 3.90625,
"learning_rate": 1.7021242426500493e-05,
"loss": 1.8664,
"step": 522
},
{
"epoch": 0.84,
"grad_norm": 14.6875,
"learning_rate": 1.6685593828773095e-05,
"loss": 8.2793,
"step": 523
},
{
"epoch": 0.85,
"grad_norm": 4.3125,
"learning_rate": 1.6352986198977325e-05,
"loss": 2.4292,
"step": 524
},
{
"epoch": 0.85,
"grad_norm": 3.84375,
"learning_rate": 1.6023431677260214e-05,
"loss": 1.657,
"step": 525
},
{
"epoch": 0.85,
"grad_norm": 5.125,
"learning_rate": 1.5696942292330576e-05,
"loss": 2.4565,
"step": 526
},
{
"epoch": 0.85,
"grad_norm": 4.6875,
"learning_rate": 1.5373529961019974e-05,
"loss": 2.3324,
"step": 527
},
{
"epoch": 0.85,
"grad_norm": 4.90625,
"learning_rate": 1.5053206487847914e-05,
"loss": 2.8164,
"step": 528
},
{
"epoch": 0.85,
"grad_norm": 4.65625,
"learning_rate": 1.4735983564590783e-05,
"loss": 2.2615,
"step": 529
},
{
"epoch": 0.85,
"grad_norm": 4.4375,
"learning_rate": 1.442187276985526e-05,
"loss": 2.0414,
"step": 530
},
{
"epoch": 0.86,
"grad_norm": 6.59375,
"learning_rate": 1.4110885568655564e-05,
"loss": 2.8476,
"step": 531
},
{
"epoch": 0.86,
"grad_norm": 3.828125,
"learning_rate": 1.3803033311995072e-05,
"loss": 1.8397,
"step": 532
},
{
"epoch": 0.86,
"grad_norm": 7.25,
"learning_rate": 1.3498327236452013e-05,
"loss": 2.8825,
"step": 533
},
{
"epoch": 0.86,
"grad_norm": 4.6875,
"learning_rate": 1.3196778463769255e-05,
"loss": 2.5735,
"step": 534
},
{
"epoch": 0.86,
"grad_norm": 4.3125,
"learning_rate": 1.2898398000448443e-05,
"loss": 2.2819,
"step": 535
},
{
"epoch": 0.86,
"grad_norm": 4.53125,
"learning_rate": 1.260319673734821e-05,
"loss": 1.9974,
"step": 536
},
{
"epoch": 0.87,
"grad_norm": 4.09375,
"learning_rate": 1.2311185449286677e-05,
"loss": 1.935,
"step": 537
},
{
"epoch": 0.87,
"grad_norm": 4.46875,
"learning_rate": 1.2022374794648228e-05,
"loss": 2.685,
"step": 538
},
{
"epoch": 0.87,
"grad_norm": 3.765625,
"learning_rate": 1.1736775314994342e-05,
"loss": 1.77,
"step": 539
},
{
"epoch": 0.87,
"grad_norm": 4.4375,
"learning_rate": 1.1454397434679021e-05,
"loss": 2.3842,
"step": 540
},
{
"epoch": 0.87,
"grad_norm": 4.46875,
"learning_rate": 1.1175251460468117e-05,
"loss": 1.6934,
"step": 541
},
{
"epoch": 0.87,
"grad_norm": 4.8125,
"learning_rate": 1.0899347581163221e-05,
"loss": 2.4064,
"step": 542
},
{
"epoch": 0.88,
"grad_norm": 4.0,
"learning_rate": 1.062669586722983e-05,
"loss": 2.1773,
"step": 543
},
{
"epoch": 0.88,
"grad_norm": 4.0625,
"learning_rate": 1.0357306270429624e-05,
"loss": 2.1731,
"step": 544
},
{
"epoch": 0.88,
"grad_norm": 4.0,
"learning_rate": 1.0091188623457415e-05,
"loss": 2.1143,
"step": 545
},
{
"epoch": 0.88,
"grad_norm": 3.875,
"learning_rate": 9.828352639582072e-06,
"loss": 1.8268,
"step": 546
},
{
"epoch": 0.88,
"grad_norm": 4.28125,
"learning_rate": 9.568807912292077e-06,
"loss": 2.2509,
"step": 547
},
{
"epoch": 0.88,
"grad_norm": 4.8125,
"learning_rate": 9.31256391494546e-06,
"loss": 2.2418,
"step": 548
},
{
"epoch": 0.89,
"grad_norm": 5.5,
"learning_rate": 9.05963000042378e-06,
"loss": 2.7369,
"step": 549
},
{
"epoch": 0.89,
"grad_norm": 4.1875,
"learning_rate": 8.810015400790994e-06,
"loss": 2.2038,
"step": 550
},
{
"epoch": 0.89,
"grad_norm": 4.4375,
"learning_rate": 8.563729226956319e-06,
"loss": 2.3315,
"step": 551
},
{
"epoch": 0.89,
"grad_norm": 4.28125,
"learning_rate": 8.32078046834176e-06,
"loss": 2.4199,
"step": 552
},
{
"epoch": 0.89,
"grad_norm": 1096.0,
"learning_rate": 8.081177992554013e-06,
"loss": 2.5443,
"step": 553
},
{
"epoch": 0.89,
"grad_norm": 3.90625,
"learning_rate": 7.844930545060703e-06,
"loss": 2.1696,
"step": 554
},
{
"epoch": 0.9,
"grad_norm": 5.0,
"learning_rate": 7.612046748871327e-06,
"loss": 1.9471,
"step": 555
},
{
"epoch": 0.9,
"grad_norm": 5.5,
"learning_rate": 7.382535104222366e-06,
"loss": 2.8064,
"step": 556
},
{
"epoch": 0.9,
"grad_norm": 5.65625,
"learning_rate": 7.156403988267069e-06,
"loss": 2.7012,
"step": 557
},
{
"epoch": 0.9,
"grad_norm": 4.84375,
"learning_rate": 6.9336616547697965e-06,
"loss": 2.7909,
"step": 558
},
{
"epoch": 0.9,
"grad_norm": 6.09375,
"learning_rate": 6.714316233804574e-06,
"loss": 2.2768,
"step": 559
},
{
"epoch": 0.9,
"grad_norm": 5.03125,
"learning_rate": 6.498375731458528e-06,
"loss": 2.1573,
"step": 560
},
{
"epoch": 0.9,
"grad_norm": 4.5,
"learning_rate": 6.28584802953951e-06,
"loss": 2.0637,
"step": 561
},
{
"epoch": 0.91,
"grad_norm": 4.96875,
"learning_rate": 6.076740885288479e-06,
"loss": 2.5967,
"step": 562
},
{
"epoch": 0.91,
"grad_norm": 4.625,
"learning_rate": 5.8710619310964445e-06,
"loss": 2.5432,
"step": 563
},
{
"epoch": 0.91,
"grad_norm": 4.34375,
"learning_rate": 5.668818674225685e-06,
"loss": 2.2381,
"step": 564
},
{
"epoch": 0.91,
"grad_norm": 18.625,
"learning_rate": 5.470018496535967e-06,
"loss": 2.029,
"step": 565
},
{
"epoch": 0.91,
"grad_norm": 4.0625,
"learning_rate": 5.274668654214932e-06,
"loss": 2.1447,
"step": 566
},
{
"epoch": 0.91,
"grad_norm": 6.5,
"learning_rate": 5.08277627751329e-06,
"loss": 2.4182,
"step": 567
},
{
"epoch": 0.92,
"grad_norm": 3.984375,
"learning_rate": 4.8943483704846475e-06,
"loss": 2.0494,
"step": 568
},
{
"epoch": 0.92,
"grad_norm": 3.703125,
"learning_rate": 4.709391810729713e-06,
"loss": 1.7768,
"step": 569
},
{
"epoch": 0.92,
"grad_norm": 3.6875,
"learning_rate": 4.527913349145441e-06,
"loss": 2.0692,
"step": 570
},
{
"epoch": 0.92,
"grad_norm": 4.9375,
"learning_rate": 4.349919609678455e-06,
"loss": 2.5842,
"step": 571
},
{
"epoch": 0.92,
"grad_norm": 4.1875,
"learning_rate": 4.175417089083378e-06,
"loss": 2.354,
"step": 572
},
{
"epoch": 0.92,
"grad_norm": 26.875,
"learning_rate": 4.004412156685711e-06,
"loss": 2.6345,
"step": 573
},
{
"epoch": 0.93,
"grad_norm": 3.546875,
"learning_rate": 3.836911054149239e-06,
"loss": 1.7726,
"step": 574
},
{
"epoch": 0.93,
"grad_norm": 4.4375,
"learning_rate": 3.6729198952483724e-06,
"loss": 1.7874,
"step": 575
},
{
"epoch": 0.93,
"grad_norm": 4.84375,
"learning_rate": 3.512444665644865e-06,
"loss": 2.2945,
"step": 576
},
{
"epoch": 0.93,
"grad_norm": 4.46875,
"learning_rate": 3.355491222669371e-06,
"loss": 1.9434,
"step": 577
},
{
"epoch": 0.93,
"grad_norm": 5.1875,
"learning_rate": 3.202065295107726e-06,
"loss": 2.1977,
"step": 578
},
{
"epoch": 0.93,
"grad_norm": 4.59375,
"learning_rate": 3.052172482991711e-06,
"loss": 1.7724,
"step": 579
},
{
"epoch": 0.94,
"grad_norm": 4.03125,
"learning_rate": 2.905818257394799e-06,
"loss": 2.1582,
"step": 580
},
{
"epoch": 0.94,
"grad_norm": 3.421875,
"learning_rate": 2.7630079602323442e-06,
"loss": 1.8277,
"step": 581
},
{
"epoch": 0.94,
"grad_norm": 4.15625,
"learning_rate": 2.6237468040666512e-06,
"loss": 2.0602,
"step": 582
},
{
"epoch": 0.94,
"grad_norm": 4.59375,
"learning_rate": 2.4880398719167586e-06,
"loss": 2.0597,
"step": 583
},
{
"epoch": 0.94,
"grad_norm": 4.28125,
"learning_rate": 2.3558921170727888e-06,
"loss": 2.1979,
"step": 584
},
{
"epoch": 0.94,
"grad_norm": 5.25,
"learning_rate": 2.2273083629153147e-06,
"loss": 1.8302,
"step": 585
},
{
"epoch": 0.95,
"grad_norm": 5.125,
"learning_rate": 2.1022933027391555e-06,
"loss": 2.534,
"step": 586
},
{
"epoch": 0.95,
"grad_norm": 3.921875,
"learning_rate": 1.9808514995821593e-06,
"loss": 1.9609,
"step": 587
},
{
"epoch": 0.95,
"grad_norm": 4.53125,
"learning_rate": 1.8629873860586566e-06,
"loss": 2.2896,
"step": 588
},
{
"epoch": 0.95,
"grad_norm": 4.09375,
"learning_rate": 1.7487052641976032e-06,
"loss": 1.7238,
"step": 589
},
{
"epoch": 0.95,
"grad_norm": 3.84375,
"learning_rate": 1.6380093052856483e-06,
"loss": 2.0665,
"step": 590
},
{
"epoch": 0.95,
"grad_norm": 5.125,
"learning_rate": 1.5309035497147684e-06,
"loss": 2.7144,
"step": 591
},
{
"epoch": 0.95,
"grad_norm": 4.5,
"learning_rate": 1.4273919068349184e-06,
"loss": 2.633,
"step": 592
},
{
"epoch": 0.96,
"grad_norm": 8.125,
"learning_rate": 1.3274781548112458e-06,
"loss": 2.5471,
"step": 593
},
{
"epoch": 0.96,
"grad_norm": 4.90625,
"learning_rate": 1.231165940486234e-06,
"loss": 2.6535,
"step": 594
},
{
"epoch": 0.96,
"grad_norm": 4.46875,
"learning_rate": 1.1384587792465872e-06,
"loss": 2.3933,
"step": 595
},
{
"epoch": 0.96,
"grad_norm": 4.875,
"learning_rate": 1.0493600548948878e-06,
"loss": 2.4272,
"step": 596
},
{
"epoch": 0.96,
"grad_norm": 3.921875,
"learning_rate": 9.638730195261625e-07,
"loss": 1.8742,
"step": 597
},
{
"epoch": 0.96,
"grad_norm": 4.9375,
"learning_rate": 8.820007934090879e-07,
"loss": 2.5128,
"step": 598
},
{
"epoch": 0.97,
"grad_norm": 6.0625,
"learning_rate": 8.037463648721488e-07,
"loss": 2.5476,
"step": 599
},
{
"epoch": 0.97,
"grad_norm": 4.4375,
"learning_rate": 7.291125901946027e-07,
"loss": 2.1254,
"step": 600
},
{
"epoch": 0.97,
"grad_norm": 4.375,
"learning_rate": 6.581021935021304e-07,
"loss": 1.8256,
"step": 601
},
{
"epoch": 0.97,
"grad_norm": 4.6875,
"learning_rate": 5.907177666674812e-07,
"loss": 2.4575,
"step": 602
},
{
"epoch": 0.97,
"grad_norm": 4.0625,
"learning_rate": 5.269617692158613e-07,
"loss": 1.8101,
"step": 603
},
{
"epoch": 0.97,
"grad_norm": 5.0625,
"learning_rate": 4.668365282351372e-07,
"loss": 2.6114,
"step": 604
},
{
"epoch": 0.98,
"grad_norm": 4.0625,
"learning_rate": 4.103442382909051e-07,
"loss": 2.2765,
"step": 605
},
{
"epoch": 0.98,
"grad_norm": 4.15625,
"learning_rate": 3.5748696134639825e-07,
"loss": 2.2232,
"step": 606
},
{
"epoch": 0.98,
"grad_norm": 3.921875,
"learning_rate": 3.0826662668720364e-07,
"loss": 2.0377,
"step": 607
},
{
"epoch": 0.98,
"grad_norm": 6.0,
"learning_rate": 2.6268503085089547e-07,
"loss": 2.5245,
"step": 608
},
{
"epoch": 0.98,
"grad_norm": 6.65625,
"learning_rate": 2.2074383756137686e-07,
"loss": 2.4988,
"step": 609
},
{
"epoch": 0.98,
"grad_norm": 12.8125,
"learning_rate": 1.824445776682504e-07,
"loss": 2.7772,
"step": 610
},
{
"epoch": 0.99,
"grad_norm": 3.890625,
"learning_rate": 1.477886490908742e-07,
"loss": 1.8278,
"step": 611
},
{
"epoch": 0.99,
"grad_norm": 4.03125,
"learning_rate": 1.1677731676733584e-07,
"loss": 2.0586,
"step": 612
},
{
"epoch": 0.99,
"grad_norm": 4.09375,
"learning_rate": 8.941171260835601e-08,
"loss": 1.8343,
"step": 613
},
{
"epoch": 0.99,
"grad_norm": 13.1875,
"learning_rate": 6.569283545587724e-08,
"loss": 8.1851,
"step": 614
},
{
"epoch": 0.99,
"grad_norm": 3.859375,
"learning_rate": 4.562155104665955e-08,
"loss": 2.291,
"step": 615
},
{
"epoch": 0.99,
"grad_norm": 3.875,
"learning_rate": 2.9198591980705848e-08,
"loss": 2.1164,
"step": 616
},
{
"epoch": 1.0,
"grad_norm": 4.375,
"learning_rate": 1.642455769444995e-08,
"loss": 2.2408,
"step": 617
},
{
"epoch": 1.0,
"grad_norm": 4.0625,
"learning_rate": 7.2999144389296335e-09,
"loss": 2.0169,
"step": 618
},
{
"epoch": 1.0,
"grad_norm": 4.03125,
"learning_rate": 1.8249952627669154e-09,
"loss": 2.4849,
"step": 619
},
{
"epoch": 1.0,
"grad_norm": 3.8125,
"learning_rate": 0.0,
"loss": 1.7458,
"step": 620
},
{
"epoch": 1.0,
"eval_loss": 2.632516384124756,
"eval_runtime": 1.7594,
"eval_samples_per_second": 19.325,
"eval_steps_per_second": 19.325,
"step": 620
}
],
"logging_steps": 1,
"max_steps": 620,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 7881494668247040.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}