joseagmz's picture
Upload folder using huggingface_hub
7efe243 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 155,
"global_step": 620,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 5.8125,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.7582,
"step": 1
},
{
"epoch": 0.0,
"eval_loss": 2.128222703933716,
"eval_runtime": 1.7853,
"eval_samples_per_second": 19.044,
"eval_steps_per_second": 19.044,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 8.0625,
"learning_rate": 4.000000000000001e-06,
"loss": 2.0767,
"step": 2
},
{
"epoch": 0.0,
"grad_norm": 5.875,
"learning_rate": 6e-06,
"loss": 2.0746,
"step": 3
},
{
"epoch": 0.01,
"grad_norm": 6.1875,
"learning_rate": 8.000000000000001e-06,
"loss": 1.9598,
"step": 4
},
{
"epoch": 0.01,
"grad_norm": 7.75,
"learning_rate": 1e-05,
"loss": 2.1597,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 7.15625,
"learning_rate": 1.2e-05,
"loss": 1.5354,
"step": 6
},
{
"epoch": 0.01,
"grad_norm": 6.53125,
"learning_rate": 1.4000000000000001e-05,
"loss": 1.7962,
"step": 7
},
{
"epoch": 0.01,
"grad_norm": 6.875,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.0638,
"step": 8
},
{
"epoch": 0.01,
"grad_norm": 5.4375,
"learning_rate": 1.8e-05,
"loss": 1.5363,
"step": 9
},
{
"epoch": 0.02,
"grad_norm": 7.125,
"learning_rate": 2e-05,
"loss": 2.2466,
"step": 10
},
{
"epoch": 0.02,
"grad_norm": 5.9375,
"learning_rate": 2.2000000000000003e-05,
"loss": 1.8931,
"step": 11
},
{
"epoch": 0.02,
"grad_norm": 6.0625,
"learning_rate": 2.4e-05,
"loss": 1.579,
"step": 12
},
{
"epoch": 0.02,
"grad_norm": 5.9375,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.5665,
"step": 13
},
{
"epoch": 0.02,
"grad_norm": 6.25,
"learning_rate": 2.8000000000000003e-05,
"loss": 1.9216,
"step": 14
},
{
"epoch": 0.02,
"grad_norm": 7.78125,
"learning_rate": 3e-05,
"loss": 2.0173,
"step": 15
},
{
"epoch": 0.03,
"grad_norm": 5.5,
"learning_rate": 3.2000000000000005e-05,
"loss": 2.0586,
"step": 16
},
{
"epoch": 0.03,
"grad_norm": 5.84375,
"learning_rate": 3.4000000000000007e-05,
"loss": 1.9965,
"step": 17
},
{
"epoch": 0.03,
"grad_norm": 28.125,
"learning_rate": 3.6e-05,
"loss": 2.2671,
"step": 18
},
{
"epoch": 0.03,
"grad_norm": 7.5,
"learning_rate": 3.8e-05,
"loss": 2.3432,
"step": 19
},
{
"epoch": 0.03,
"grad_norm": 8.1875,
"learning_rate": 4e-05,
"loss": 2.3057,
"step": 20
},
{
"epoch": 0.03,
"grad_norm": 5.6875,
"learning_rate": 4.2e-05,
"loss": 2.1117,
"step": 21
},
{
"epoch": 0.04,
"grad_norm": 9.25,
"learning_rate": 4.4000000000000006e-05,
"loss": 2.8508,
"step": 22
},
{
"epoch": 0.04,
"grad_norm": 6.0,
"learning_rate": 4.600000000000001e-05,
"loss": 1.9681,
"step": 23
},
{
"epoch": 0.04,
"grad_norm": 7.59375,
"learning_rate": 4.8e-05,
"loss": 2.3675,
"step": 24
},
{
"epoch": 0.04,
"grad_norm": 5.40625,
"learning_rate": 5e-05,
"loss": 1.8022,
"step": 25
},
{
"epoch": 0.04,
"grad_norm": 17.125,
"learning_rate": 5.2000000000000004e-05,
"loss": 2.0862,
"step": 26
},
{
"epoch": 0.04,
"grad_norm": 5.40625,
"learning_rate": 5.4000000000000005e-05,
"loss": 1.8344,
"step": 27
},
{
"epoch": 0.05,
"grad_norm": 7.40625,
"learning_rate": 5.6000000000000006e-05,
"loss": 2.0822,
"step": 28
},
{
"epoch": 0.05,
"grad_norm": 8.25,
"learning_rate": 5.8e-05,
"loss": 2.3784,
"step": 29
},
{
"epoch": 0.05,
"grad_norm": 5.875,
"learning_rate": 6e-05,
"loss": 1.8403,
"step": 30
},
{
"epoch": 0.05,
"grad_norm": 6.8125,
"learning_rate": 6.2e-05,
"loss": 1.7077,
"step": 31
},
{
"epoch": 0.05,
"grad_norm": 5.65625,
"learning_rate": 6.400000000000001e-05,
"loss": 1.8347,
"step": 32
},
{
"epoch": 0.05,
"grad_norm": 7.15625,
"learning_rate": 6.6e-05,
"loss": 1.9535,
"step": 33
},
{
"epoch": 0.05,
"grad_norm": 7.34375,
"learning_rate": 6.800000000000001e-05,
"loss": 2.3192,
"step": 34
},
{
"epoch": 0.06,
"grad_norm": 7.46875,
"learning_rate": 7e-05,
"loss": 2.2975,
"step": 35
},
{
"epoch": 0.06,
"grad_norm": 6.6875,
"learning_rate": 7.2e-05,
"loss": 2.2092,
"step": 36
},
{
"epoch": 0.06,
"grad_norm": 5.4375,
"learning_rate": 7.4e-05,
"loss": 1.872,
"step": 37
},
{
"epoch": 0.06,
"grad_norm": 9.0625,
"learning_rate": 7.6e-05,
"loss": 2.8407,
"step": 38
},
{
"epoch": 0.06,
"grad_norm": 7.8125,
"learning_rate": 7.800000000000001e-05,
"loss": 2.1849,
"step": 39
},
{
"epoch": 0.06,
"grad_norm": 5.375,
"learning_rate": 8e-05,
"loss": 1.7414,
"step": 40
},
{
"epoch": 0.07,
"grad_norm": 10.375,
"learning_rate": 8.2e-05,
"loss": 2.3472,
"step": 41
},
{
"epoch": 0.07,
"grad_norm": 5.9375,
"learning_rate": 8.4e-05,
"loss": 1.6975,
"step": 42
},
{
"epoch": 0.07,
"grad_norm": 5.78125,
"learning_rate": 8.6e-05,
"loss": 1.9298,
"step": 43
},
{
"epoch": 0.07,
"grad_norm": 6.34375,
"learning_rate": 8.800000000000001e-05,
"loss": 1.8339,
"step": 44
},
{
"epoch": 0.07,
"grad_norm": 11.0,
"learning_rate": 9e-05,
"loss": 1.4134,
"step": 45
},
{
"epoch": 0.07,
"grad_norm": 6.0,
"learning_rate": 9.200000000000001e-05,
"loss": 1.656,
"step": 46
},
{
"epoch": 0.08,
"grad_norm": 9.0,
"learning_rate": 9.4e-05,
"loss": 2.4637,
"step": 47
},
{
"epoch": 0.08,
"grad_norm": 5.8125,
"learning_rate": 9.6e-05,
"loss": 1.8719,
"step": 48
},
{
"epoch": 0.08,
"grad_norm": 7.5625,
"learning_rate": 9.8e-05,
"loss": 2.3955,
"step": 49
},
{
"epoch": 0.08,
"grad_norm": 8.0625,
"learning_rate": 0.0001,
"loss": 2.024,
"step": 50
},
{
"epoch": 0.08,
"grad_norm": 9.0625,
"learning_rate": 0.00010200000000000001,
"loss": 2.2503,
"step": 51
},
{
"epoch": 0.08,
"grad_norm": 7.4375,
"learning_rate": 0.00010400000000000001,
"loss": 2.2305,
"step": 52
},
{
"epoch": 0.09,
"grad_norm": 8.6875,
"learning_rate": 0.00010600000000000002,
"loss": 2.0418,
"step": 53
},
{
"epoch": 0.09,
"grad_norm": 8.25,
"learning_rate": 0.00010800000000000001,
"loss": 2.4381,
"step": 54
},
{
"epoch": 0.09,
"grad_norm": 7.53125,
"learning_rate": 0.00011000000000000002,
"loss": 1.9802,
"step": 55
},
{
"epoch": 0.09,
"grad_norm": 8.75,
"learning_rate": 0.00011200000000000001,
"loss": 1.9867,
"step": 56
},
{
"epoch": 0.09,
"grad_norm": 6.21875,
"learning_rate": 0.00011399999999999999,
"loss": 2.0959,
"step": 57
},
{
"epoch": 0.09,
"grad_norm": 5.34375,
"learning_rate": 0.000116,
"loss": 2.0148,
"step": 58
},
{
"epoch": 0.1,
"grad_norm": 7.75,
"learning_rate": 0.000118,
"loss": 1.8376,
"step": 59
},
{
"epoch": 0.1,
"grad_norm": 5.59375,
"learning_rate": 0.00012,
"loss": 2.0351,
"step": 60
},
{
"epoch": 0.1,
"grad_norm": 147.0,
"learning_rate": 0.000122,
"loss": 7.0141,
"step": 61
},
{
"epoch": 0.1,
"grad_norm": 15.3125,
"learning_rate": 0.000124,
"loss": 2.4206,
"step": 62
},
{
"epoch": 0.1,
"grad_norm": 9.9375,
"learning_rate": 0.000126,
"loss": 2.049,
"step": 63
},
{
"epoch": 0.1,
"grad_norm": 11.875,
"learning_rate": 0.00012800000000000002,
"loss": 2.7425,
"step": 64
},
{
"epoch": 0.1,
"grad_norm": 6.90625,
"learning_rate": 0.00013000000000000002,
"loss": 1.6744,
"step": 65
},
{
"epoch": 0.11,
"grad_norm": 12.25,
"learning_rate": 0.000132,
"loss": 2.9067,
"step": 66
},
{
"epoch": 0.11,
"grad_norm": 6.78125,
"learning_rate": 0.000134,
"loss": 2.0443,
"step": 67
},
{
"epoch": 0.11,
"grad_norm": 7.8125,
"learning_rate": 0.00013600000000000003,
"loss": 2.1493,
"step": 68
},
{
"epoch": 0.11,
"grad_norm": 7.4375,
"learning_rate": 0.000138,
"loss": 2.4634,
"step": 69
},
{
"epoch": 0.11,
"grad_norm": 10.125,
"learning_rate": 0.00014,
"loss": 2.5528,
"step": 70
},
{
"epoch": 0.11,
"grad_norm": 9.625,
"learning_rate": 0.000142,
"loss": 2.8141,
"step": 71
},
{
"epoch": 0.12,
"grad_norm": 8.625,
"learning_rate": 0.000144,
"loss": 2.3318,
"step": 72
},
{
"epoch": 0.12,
"grad_norm": 7.78125,
"learning_rate": 0.000146,
"loss": 2.2342,
"step": 73
},
{
"epoch": 0.12,
"grad_norm": 440.0,
"learning_rate": 0.000148,
"loss": 11.3354,
"step": 74
},
{
"epoch": 0.12,
"grad_norm": 16.75,
"learning_rate": 0.00015000000000000001,
"loss": 2.4637,
"step": 75
},
{
"epoch": 0.12,
"grad_norm": 10.3125,
"learning_rate": 0.000152,
"loss": 3.3555,
"step": 76
},
{
"epoch": 0.12,
"grad_norm": 11.5625,
"learning_rate": 0.000154,
"loss": 2.9384,
"step": 77
},
{
"epoch": 0.13,
"grad_norm": 10.5625,
"learning_rate": 0.00015600000000000002,
"loss": 2.7772,
"step": 78
},
{
"epoch": 0.13,
"grad_norm": 6.375,
"learning_rate": 0.00015800000000000002,
"loss": 2.2272,
"step": 79
},
{
"epoch": 0.13,
"grad_norm": 8.9375,
"learning_rate": 0.00016,
"loss": 2.5675,
"step": 80
},
{
"epoch": 0.13,
"grad_norm": 10.0,
"learning_rate": 0.000162,
"loss": 3.1618,
"step": 81
},
{
"epoch": 0.13,
"grad_norm": 5.9375,
"learning_rate": 0.000164,
"loss": 2.2929,
"step": 82
},
{
"epoch": 0.13,
"grad_norm": 10.125,
"learning_rate": 0.000166,
"loss": 2.1003,
"step": 83
},
{
"epoch": 0.14,
"grad_norm": 8.6875,
"learning_rate": 0.000168,
"loss": 2.3771,
"step": 84
},
{
"epoch": 0.14,
"grad_norm": 12.25,
"learning_rate": 0.00017,
"loss": 2.8536,
"step": 85
},
{
"epoch": 0.14,
"grad_norm": 6.0625,
"learning_rate": 0.000172,
"loss": 2.3478,
"step": 86
},
{
"epoch": 0.14,
"grad_norm": 23.875,
"learning_rate": 0.000174,
"loss": 3.0549,
"step": 87
},
{
"epoch": 0.14,
"grad_norm": 9.25,
"learning_rate": 0.00017600000000000002,
"loss": 2.5753,
"step": 88
},
{
"epoch": 0.14,
"grad_norm": 7.5,
"learning_rate": 0.00017800000000000002,
"loss": 2.2508,
"step": 89
},
{
"epoch": 0.15,
"grad_norm": 8.875,
"learning_rate": 0.00018,
"loss": 2.476,
"step": 90
},
{
"epoch": 0.15,
"grad_norm": 11.3125,
"learning_rate": 0.000182,
"loss": 2.7688,
"step": 91
},
{
"epoch": 0.15,
"grad_norm": 9.0625,
"learning_rate": 0.00018400000000000003,
"loss": 2.7879,
"step": 92
},
{
"epoch": 0.15,
"grad_norm": 10.25,
"learning_rate": 0.00018600000000000002,
"loss": 2.1463,
"step": 93
},
{
"epoch": 0.15,
"grad_norm": 16.5,
"learning_rate": 0.000188,
"loss": 2.5949,
"step": 94
},
{
"epoch": 0.15,
"grad_norm": 10.8125,
"learning_rate": 0.00019,
"loss": 2.381,
"step": 95
},
{
"epoch": 0.15,
"grad_norm": 10.0625,
"learning_rate": 0.000192,
"loss": 2.592,
"step": 96
},
{
"epoch": 0.16,
"grad_norm": 16.375,
"learning_rate": 0.000194,
"loss": 2.6753,
"step": 97
},
{
"epoch": 0.16,
"grad_norm": 55.0,
"learning_rate": 0.000196,
"loss": 2.5502,
"step": 98
},
{
"epoch": 0.16,
"grad_norm": 24.5,
"learning_rate": 0.00019800000000000002,
"loss": 2.4627,
"step": 99
},
{
"epoch": 0.16,
"grad_norm": 10.875,
"learning_rate": 0.0002,
"loss": 2.0743,
"step": 100
},
{
"epoch": 0.16,
"grad_norm": 7.34375,
"learning_rate": 0.00019999817500473724,
"loss": 2.3793,
"step": 101
},
{
"epoch": 0.16,
"grad_norm": 9.125,
"learning_rate": 0.00019999270008556108,
"loss": 2.6009,
"step": 102
},
{
"epoch": 0.17,
"grad_norm": 10.125,
"learning_rate": 0.00019998357544230558,
"loss": 2.7316,
"step": 103
},
{
"epoch": 0.17,
"grad_norm": 10.0625,
"learning_rate": 0.00019997080140801932,
"loss": 2.0909,
"step": 104
},
{
"epoch": 0.17,
"grad_norm": 9.0625,
"learning_rate": 0.00019995437844895334,
"loss": 2.6797,
"step": 105
},
{
"epoch": 0.17,
"grad_norm": 23.375,
"learning_rate": 0.00019993430716454413,
"loss": 2.1525,
"step": 106
},
{
"epoch": 0.17,
"grad_norm": 13.25,
"learning_rate": 0.00019991058828739165,
"loss": 2.6369,
"step": 107
},
{
"epoch": 0.17,
"grad_norm": 11.0,
"learning_rate": 0.00019988322268323268,
"loss": 2.7311,
"step": 108
},
{
"epoch": 0.18,
"grad_norm": 8.9375,
"learning_rate": 0.00019985221135090914,
"loss": 2.3733,
"step": 109
},
{
"epoch": 0.18,
"grad_norm": 8.875,
"learning_rate": 0.00019981755542233177,
"loss": 2.2644,
"step": 110
},
{
"epoch": 0.18,
"grad_norm": 12.4375,
"learning_rate": 0.00019977925616243862,
"loss": 2.5241,
"step": 111
},
{
"epoch": 0.18,
"grad_norm": 15.875,
"learning_rate": 0.00019973731496914914,
"loss": 2.8353,
"step": 112
},
{
"epoch": 0.18,
"grad_norm": 11.6875,
"learning_rate": 0.0001996917333733128,
"loss": 2.5239,
"step": 113
},
{
"epoch": 0.18,
"grad_norm": 17.75,
"learning_rate": 0.00019964251303865362,
"loss": 3.1981,
"step": 114
},
{
"epoch": 0.19,
"grad_norm": 10.9375,
"learning_rate": 0.00019958965576170908,
"loss": 2.609,
"step": 115
},
{
"epoch": 0.19,
"grad_norm": 98.0,
"learning_rate": 0.00019953316347176488,
"loss": 2.8462,
"step": 116
},
{
"epoch": 0.19,
"grad_norm": 11.125,
"learning_rate": 0.00019947303823078416,
"loss": 2.6904,
"step": 117
},
{
"epoch": 0.19,
"grad_norm": 8.6875,
"learning_rate": 0.00019940928223333252,
"loss": 2.2545,
"step": 118
},
{
"epoch": 0.19,
"grad_norm": 10.1875,
"learning_rate": 0.0001993418978064979,
"loss": 2.6032,
"step": 119
},
{
"epoch": 0.19,
"grad_norm": 13.25,
"learning_rate": 0.0001992708874098054,
"loss": 3.9633,
"step": 120
},
{
"epoch": 0.2,
"grad_norm": 9.25,
"learning_rate": 0.00019919625363512786,
"loss": 2.4602,
"step": 121
},
{
"epoch": 0.2,
"grad_norm": 71.5,
"learning_rate": 0.00019911799920659093,
"loss": 2.213,
"step": 122
},
{
"epoch": 0.2,
"grad_norm": 16.625,
"learning_rate": 0.00019903612698047383,
"loss": 2.9735,
"step": 123
},
{
"epoch": 0.2,
"grad_norm": 19.5,
"learning_rate": 0.0001989506399451051,
"loss": 3.2784,
"step": 124
},
{
"epoch": 0.2,
"grad_norm": 26.875,
"learning_rate": 0.00019886154122075343,
"loss": 2.0087,
"step": 125
},
{
"epoch": 0.2,
"grad_norm": 8.1875,
"learning_rate": 0.00019876883405951377,
"loss": 2.2212,
"step": 126
},
{
"epoch": 0.2,
"grad_norm": 10.375,
"learning_rate": 0.00019867252184518878,
"loss": 2.2147,
"step": 127
},
{
"epoch": 0.21,
"grad_norm": 7.625,
"learning_rate": 0.0001985726080931651,
"loss": 2.1791,
"step": 128
},
{
"epoch": 0.21,
"grad_norm": 180.0,
"learning_rate": 0.00019846909645028523,
"loss": 2.6698,
"step": 129
},
{
"epoch": 0.21,
"grad_norm": 10.8125,
"learning_rate": 0.00019836199069471437,
"loss": 2.5204,
"step": 130
},
{
"epoch": 0.21,
"grad_norm": 17.5,
"learning_rate": 0.0001982512947358024,
"loss": 2.1382,
"step": 131
},
{
"epoch": 0.21,
"grad_norm": 13.6875,
"learning_rate": 0.00019813701261394136,
"loss": 2.9256,
"step": 132
},
{
"epoch": 0.21,
"grad_norm": 7.875,
"learning_rate": 0.00019801914850041784,
"loss": 2.2253,
"step": 133
},
{
"epoch": 0.22,
"grad_norm": 7.40625,
"learning_rate": 0.00019789770669726087,
"loss": 2.5591,
"step": 134
},
{
"epoch": 0.22,
"grad_norm": 7.0,
"learning_rate": 0.00019777269163708468,
"loss": 1.7038,
"step": 135
},
{
"epoch": 0.22,
"grad_norm": 11.5,
"learning_rate": 0.00019764410788292722,
"loss": 2.7535,
"step": 136
},
{
"epoch": 0.22,
"grad_norm": 19.0,
"learning_rate": 0.00019751196012808325,
"loss": 2.727,
"step": 137
},
{
"epoch": 0.22,
"grad_norm": 9.9375,
"learning_rate": 0.00019737625319593335,
"loss": 2.671,
"step": 138
},
{
"epoch": 0.22,
"grad_norm": 115.0,
"learning_rate": 0.00019723699203976766,
"loss": 9.1845,
"step": 139
},
{
"epoch": 0.23,
"grad_norm": 59.0,
"learning_rate": 0.0001970941817426052,
"loss": 9.8542,
"step": 140
},
{
"epoch": 0.23,
"grad_norm": 1632.0,
"learning_rate": 0.00019694782751700828,
"loss": 7.3106,
"step": 141
},
{
"epoch": 0.23,
"grad_norm": 40.75,
"learning_rate": 0.00019679793470489228,
"loss": 9.6883,
"step": 142
},
{
"epoch": 0.23,
"grad_norm": 816.0,
"learning_rate": 0.00019664450877733062,
"loss": 7.9838,
"step": 143
},
{
"epoch": 0.23,
"grad_norm": 632.0,
"learning_rate": 0.00019648755533435518,
"loss": 4.5732,
"step": 144
},
{
"epoch": 0.23,
"grad_norm": 580.0,
"learning_rate": 0.00019632708010475165,
"loss": 3.554,
"step": 145
},
{
"epoch": 0.24,
"grad_norm": 872.0,
"learning_rate": 0.00019616308894585078,
"loss": 3.2136,
"step": 146
},
{
"epoch": 0.24,
"grad_norm": 852.0,
"learning_rate": 0.0001959955878433143,
"loss": 4.1706,
"step": 147
},
{
"epoch": 0.24,
"grad_norm": 204.0,
"learning_rate": 0.00019582458291091663,
"loss": 3.1317,
"step": 148
},
{
"epoch": 0.24,
"grad_norm": 123.5,
"learning_rate": 0.00019565008039032158,
"loss": 3.9938,
"step": 149
},
{
"epoch": 0.24,
"grad_norm": 1448.0,
"learning_rate": 0.00019547208665085457,
"loss": 7.1668,
"step": 150
},
{
"epoch": 0.24,
"grad_norm": 2192.0,
"learning_rate": 0.0001952906081892703,
"loss": 5.1547,
"step": 151
},
{
"epoch": 0.25,
"grad_norm": 39.0,
"learning_rate": 0.00019510565162951537,
"loss": 3.9528,
"step": 152
},
{
"epoch": 0.25,
"grad_norm": 18.125,
"learning_rate": 0.0001949172237224867,
"loss": 3.2722,
"step": 153
},
{
"epoch": 0.25,
"grad_norm": 1920.0,
"learning_rate": 0.00019472533134578507,
"loss": 8.9408,
"step": 154
},
{
"epoch": 0.25,
"grad_norm": 2304.0,
"learning_rate": 0.00019452998150346401,
"loss": 9.7725,
"step": 155
},
{
"epoch": 0.25,
"eval_loss": 8.848782539367676,
"eval_runtime": 1.7872,
"eval_samples_per_second": 19.024,
"eval_steps_per_second": 19.024,
"step": 155
},
{
"epoch": 0.25,
"grad_norm": 33.25,
"learning_rate": 0.0001943311813257743,
"loss": 9.3742,
"step": 156
},
{
"epoch": 0.25,
"grad_norm": 232.0,
"learning_rate": 0.00019412893806890357,
"loss": 8.3919,
"step": 157
},
{
"epoch": 0.25,
"grad_norm": 492.0,
"learning_rate": 0.00019392325911471155,
"loss": 7.3422,
"step": 158
},
{
"epoch": 0.26,
"grad_norm": 872.0,
"learning_rate": 0.00019371415197046052,
"loss": 9.4552,
"step": 159
},
{
"epoch": 0.26,
"grad_norm": 227.0,
"learning_rate": 0.0001935016242685415,
"loss": 3.9771,
"step": 160
},
{
"epoch": 0.26,
"grad_norm": 116.0,
"learning_rate": 0.00019328568376619543,
"loss": 13.2183,
"step": 161
},
{
"epoch": 0.26,
"grad_norm": 2720.0,
"learning_rate": 0.00019306633834523024,
"loss": 3.3559,
"step": 162
},
{
"epoch": 0.26,
"grad_norm": 137.0,
"learning_rate": 0.00019284359601173294,
"loss": 3.1494,
"step": 163
},
{
"epoch": 0.26,
"grad_norm": 161.0,
"learning_rate": 0.00019261746489577765,
"loss": 7.2183,
"step": 164
},
{
"epoch": 0.27,
"grad_norm": 24.75,
"learning_rate": 0.0001923879532511287,
"loss": 8.7516,
"step": 165
},
{
"epoch": 0.27,
"grad_norm": 856.0,
"learning_rate": 0.0001921550694549393,
"loss": 5.0249,
"step": 166
},
{
"epoch": 0.27,
"grad_norm": 1096.0,
"learning_rate": 0.000191918822007446,
"loss": 5.1011,
"step": 167
},
{
"epoch": 0.27,
"grad_norm": 760.0,
"learning_rate": 0.00019167921953165825,
"loss": 7.5649,
"step": 168
},
{
"epoch": 0.27,
"grad_norm": 58.75,
"learning_rate": 0.0001914362707730437,
"loss": 3.2093,
"step": 169
},
{
"epoch": 0.27,
"grad_norm": 208.0,
"learning_rate": 0.00019118998459920902,
"loss": 5.9444,
"step": 170
},
{
"epoch": 0.28,
"grad_norm": 16.875,
"learning_rate": 0.00019094036999957624,
"loss": 8.3708,
"step": 171
},
{
"epoch": 0.28,
"grad_norm": 324.0,
"learning_rate": 0.00019068743608505455,
"loss": 8.2079,
"step": 172
},
{
"epoch": 0.28,
"grad_norm": 394.0,
"learning_rate": 0.00019043119208770793,
"loss": 7.2584,
"step": 173
},
{
"epoch": 0.28,
"grad_norm": 8.4375,
"learning_rate": 0.00019017164736041795,
"loss": 7.9704,
"step": 174
},
{
"epoch": 0.28,
"grad_norm": 972.0,
"learning_rate": 0.00018990881137654258,
"loss": 9.457,
"step": 175
},
{
"epoch": 0.28,
"grad_norm": 648.0,
"learning_rate": 0.00018964269372957038,
"loss": 5.1865,
"step": 176
},
{
"epoch": 0.29,
"grad_norm": 12.25,
"learning_rate": 0.0001893733041327702,
"loss": 7.7786,
"step": 177
},
{
"epoch": 0.29,
"grad_norm": 11.875,
"learning_rate": 0.0001891006524188368,
"loss": 7.6235,
"step": 178
},
{
"epoch": 0.29,
"grad_norm": 10.5625,
"learning_rate": 0.0001888247485395319,
"loss": 7.4106,
"step": 179
},
{
"epoch": 0.29,
"grad_norm": 13.0625,
"learning_rate": 0.000188545602565321,
"loss": 7.8207,
"step": 180
},
{
"epoch": 0.29,
"grad_norm": 8.75,
"learning_rate": 0.00018826322468500566,
"loss": 7.7207,
"step": 181
},
{
"epoch": 0.29,
"grad_norm": 264.0,
"learning_rate": 0.00018797762520535177,
"loss": 6.7754,
"step": 182
},
{
"epoch": 0.3,
"grad_norm": 6.28125,
"learning_rate": 0.00018768881455071332,
"loss": 7.586,
"step": 183
},
{
"epoch": 0.3,
"grad_norm": 143.0,
"learning_rate": 0.0001873968032626518,
"loss": 5.2382,
"step": 184
},
{
"epoch": 0.3,
"grad_norm": 73.0,
"learning_rate": 0.00018710160199955156,
"loss": 3.6274,
"step": 185
},
{
"epoch": 0.3,
"grad_norm": 11.625,
"learning_rate": 0.00018680322153623075,
"loss": 7.4444,
"step": 186
},
{
"epoch": 0.3,
"grad_norm": 11.3125,
"learning_rate": 0.000186501672763548,
"loss": 7.4017,
"step": 187
},
{
"epoch": 0.3,
"grad_norm": 125.0,
"learning_rate": 0.00018619696668800492,
"loss": 3.8111,
"step": 188
},
{
"epoch": 0.3,
"grad_norm": 8.5,
"learning_rate": 0.00018588911443134448,
"loss": 7.5166,
"step": 189
},
{
"epoch": 0.31,
"grad_norm": 79.5,
"learning_rate": 0.00018557812723014476,
"loss": 7.4288,
"step": 190
},
{
"epoch": 0.31,
"grad_norm": 398.0,
"learning_rate": 0.00018526401643540922,
"loss": 4.29,
"step": 191
},
{
"epoch": 0.31,
"grad_norm": 180.0,
"learning_rate": 0.0001849467935121521,
"loss": 6.6381,
"step": 192
},
{
"epoch": 0.31,
"grad_norm": 6.875,
"learning_rate": 0.00018462647003898006,
"loss": 7.5981,
"step": 193
},
{
"epoch": 0.31,
"grad_norm": 12.5625,
"learning_rate": 0.00018430305770766948,
"loss": 7.3688,
"step": 194
},
{
"epoch": 0.31,
"grad_norm": 500.0,
"learning_rate": 0.0001839765683227398,
"loss": 4.5837,
"step": 195
},
{
"epoch": 0.32,
"grad_norm": 13504.0,
"learning_rate": 0.00018364701380102266,
"loss": 6.6357,
"step": 196
},
{
"epoch": 0.32,
"grad_norm": 6.09375,
"learning_rate": 0.00018331440617122696,
"loss": 7.4372,
"step": 197
},
{
"epoch": 0.32,
"grad_norm": 8.0,
"learning_rate": 0.00018297875757349952,
"loss": 7.3396,
"step": 198
},
{
"epoch": 0.32,
"grad_norm": 5.34375,
"learning_rate": 0.00018264008025898248,
"loss": 7.2063,
"step": 199
},
{
"epoch": 0.32,
"grad_norm": 180.0,
"learning_rate": 0.00018229838658936564,
"loss": 6.5597,
"step": 200
},
{
"epoch": 0.32,
"grad_norm": 6.53125,
"learning_rate": 0.00018195368903643563,
"loss": 7.4723,
"step": 201
},
{
"epoch": 0.33,
"grad_norm": 7.8125,
"learning_rate": 0.0001816060001816205,
"loss": 7.2379,
"step": 202
},
{
"epoch": 0.33,
"grad_norm": 5.0,
"learning_rate": 0.00018125533271553043,
"loss": 7.4611,
"step": 203
},
{
"epoch": 0.33,
"grad_norm": 86.0,
"learning_rate": 0.00018090169943749476,
"loss": 7.1709,
"step": 204
},
{
"epoch": 0.33,
"grad_norm": 7.59375,
"learning_rate": 0.0001805451132550946,
"loss": 7.2826,
"step": 205
},
{
"epoch": 0.33,
"grad_norm": 6.53125,
"learning_rate": 0.00018018558718369186,
"loss": 7.304,
"step": 206
},
{
"epoch": 0.33,
"grad_norm": 10.1875,
"learning_rate": 0.00017982313434595406,
"loss": 7.1153,
"step": 207
},
{
"epoch": 0.34,
"grad_norm": 8.625,
"learning_rate": 0.00017945776797137543,
"loss": 7.4778,
"step": 208
},
{
"epoch": 0.34,
"grad_norm": 10.1875,
"learning_rate": 0.00017908950139579406,
"loss": 7.2837,
"step": 209
},
{
"epoch": 0.34,
"grad_norm": 10.3125,
"learning_rate": 0.00017871834806090501,
"loss": 7.0416,
"step": 210
},
{
"epoch": 0.34,
"grad_norm": 6.78125,
"learning_rate": 0.0001783443215137699,
"loss": 7.2605,
"step": 211
},
{
"epoch": 0.34,
"grad_norm": 6.15625,
"learning_rate": 0.00017796743540632223,
"loss": 7.0665,
"step": 212
},
{
"epoch": 0.34,
"grad_norm": 6.90625,
"learning_rate": 0.00017758770349486923,
"loss": 7.1735,
"step": 213
},
{
"epoch": 0.35,
"grad_norm": 7.8125,
"learning_rate": 0.00017720513963958968,
"loss": 7.0647,
"step": 214
},
{
"epoch": 0.35,
"grad_norm": 5.5,
"learning_rate": 0.00017681975780402807,
"loss": 7.7996,
"step": 215
},
{
"epoch": 0.35,
"grad_norm": 5.15625,
"learning_rate": 0.00017643157205458483,
"loss": 7.4716,
"step": 216
},
{
"epoch": 0.35,
"grad_norm": 14.6875,
"learning_rate": 0.0001760405965600031,
"loss": 7.1858,
"step": 217
},
{
"epoch": 0.35,
"grad_norm": 9.125,
"learning_rate": 0.00017564684559085136,
"loss": 7.2222,
"step": 218
},
{
"epoch": 0.35,
"grad_norm": 7.3125,
"learning_rate": 0.00017525033351900268,
"loss": 7.4119,
"step": 219
},
{
"epoch": 0.35,
"grad_norm": 7.25,
"learning_rate": 0.00017485107481711012,
"loss": 7.3965,
"step": 220
},
{
"epoch": 0.36,
"grad_norm": 11.125,
"learning_rate": 0.00017444908405807845,
"loss": 7.2064,
"step": 221
},
{
"epoch": 0.36,
"grad_norm": 7.5,
"learning_rate": 0.00017404437591453235,
"loss": 7.6118,
"step": 222
},
{
"epoch": 0.36,
"grad_norm": 7.6875,
"learning_rate": 0.00017363696515828062,
"loss": 7.1017,
"step": 223
},
{
"epoch": 0.36,
"grad_norm": 9.0625,
"learning_rate": 0.00017322686665977737,
"loss": 6.9061,
"step": 224
},
{
"epoch": 0.36,
"grad_norm": 12.5,
"learning_rate": 0.00017281409538757883,
"loss": 7.2821,
"step": 225
},
{
"epoch": 0.36,
"grad_norm": 8.0,
"learning_rate": 0.00017239866640779745,
"loss": 7.6545,
"step": 226
},
{
"epoch": 0.37,
"grad_norm": 12.125,
"learning_rate": 0.0001719805948835515,
"loss": 7.4611,
"step": 227
},
{
"epoch": 0.37,
"grad_norm": 6.875,
"learning_rate": 0.00017155989607441213,
"loss": 6.9964,
"step": 228
},
{
"epoch": 0.37,
"grad_norm": 7.4375,
"learning_rate": 0.00017113658533584594,
"loss": 7.3558,
"step": 229
},
{
"epoch": 0.37,
"grad_norm": 8.8125,
"learning_rate": 0.00017071067811865476,
"loss": 7.2276,
"step": 230
},
{
"epoch": 0.37,
"grad_norm": 5.625,
"learning_rate": 0.00017028218996841172,
"loss": 7.2778,
"step": 231
},
{
"epoch": 0.37,
"grad_norm": 6.28125,
"learning_rate": 0.00016985113652489374,
"loss": 7.1171,
"step": 232
},
{
"epoch": 0.38,
"grad_norm": 7.90625,
"learning_rate": 0.00016941753352151055,
"loss": 7.5252,
"step": 233
},
{
"epoch": 0.38,
"grad_norm": 5.71875,
"learning_rate": 0.00016898139678473076,
"loss": 7.3942,
"step": 234
},
{
"epoch": 0.38,
"grad_norm": 6.375,
"learning_rate": 0.00016854274223350397,
"loss": 7.2534,
"step": 235
},
{
"epoch": 0.38,
"grad_norm": 5.65625,
"learning_rate": 0.00016810158587867973,
"loss": 7.3688,
"step": 236
},
{
"epoch": 0.38,
"grad_norm": 4.96875,
"learning_rate": 0.00016765794382242314,
"loss": 7.3569,
"step": 237
},
{
"epoch": 0.38,
"grad_norm": 7.34375,
"learning_rate": 0.00016721183225762727,
"loss": 6.9082,
"step": 238
},
{
"epoch": 0.39,
"grad_norm": 8.5,
"learning_rate": 0.00016676326746732195,
"loss": 7.2176,
"step": 239
},
{
"epoch": 0.39,
"grad_norm": 6.75,
"learning_rate": 0.00016631226582407952,
"loss": 6.8296,
"step": 240
},
{
"epoch": 0.39,
"grad_norm": 6.4375,
"learning_rate": 0.00016585884378941725,
"loss": 7.4089,
"step": 241
},
{
"epoch": 0.39,
"grad_norm": 7.9375,
"learning_rate": 0.00016540301791319645,
"loss": 7.2357,
"step": 242
},
{
"epoch": 0.39,
"grad_norm": 4.84375,
"learning_rate": 0.00016494480483301836,
"loss": 7.3099,
"step": 243
},
{
"epoch": 0.39,
"grad_norm": 7.1875,
"learning_rate": 0.00016448422127361706,
"loss": 6.7821,
"step": 244
},
{
"epoch": 0.4,
"grad_norm": 7.03125,
"learning_rate": 0.00016402128404624882,
"loss": 7.4242,
"step": 245
},
{
"epoch": 0.4,
"grad_norm": 6.84375,
"learning_rate": 0.00016355601004807856,
"loss": 7.0436,
"step": 246
},
{
"epoch": 0.4,
"grad_norm": 7.46875,
"learning_rate": 0.00016308841626156307,
"loss": 7.2183,
"step": 247
},
{
"epoch": 0.4,
"grad_norm": 13.6875,
"learning_rate": 0.00016261851975383137,
"loss": 7.2762,
"step": 248
},
{
"epoch": 0.4,
"grad_norm": 9.25,
"learning_rate": 0.00016214633767606143,
"loss": 7.6098,
"step": 249
},
{
"epoch": 0.4,
"grad_norm": 10.3125,
"learning_rate": 0.00016167188726285434,
"loss": 7.2284,
"step": 250
},
{
"epoch": 0.4,
"grad_norm": 18.625,
"learning_rate": 0.0001611951858316052,
"loss": 7.4437,
"step": 251
},
{
"epoch": 0.41,
"grad_norm": 6.34375,
"learning_rate": 0.00016071625078187114,
"loss": 7.2706,
"step": 252
},
{
"epoch": 0.41,
"grad_norm": 5.375,
"learning_rate": 0.00016023509959473605,
"loss": 7.1823,
"step": 253
},
{
"epoch": 0.41,
"grad_norm": 6.8125,
"learning_rate": 0.00015975174983217275,
"loss": 7.4077,
"step": 254
},
{
"epoch": 0.41,
"grad_norm": 4.90625,
"learning_rate": 0.0001592662191364017,
"loss": 7.3115,
"step": 255
},
{
"epoch": 0.41,
"grad_norm": 6.375,
"learning_rate": 0.00015877852522924732,
"loss": 7.6855,
"step": 256
},
{
"epoch": 0.41,
"grad_norm": 5.5625,
"learning_rate": 0.00015828868591149104,
"loss": 7.3519,
"step": 257
},
{
"epoch": 0.42,
"grad_norm": 6.90625,
"learning_rate": 0.0001577967190622215,
"loss": 7.4478,
"step": 258
},
{
"epoch": 0.42,
"grad_norm": 7.59375,
"learning_rate": 0.00015730264263818212,
"loss": 7.4721,
"step": 259
},
{
"epoch": 0.42,
"grad_norm": 5.375,
"learning_rate": 0.00015680647467311557,
"loss": 7.25,
"step": 260
},
{
"epoch": 0.42,
"grad_norm": 6.28125,
"learning_rate": 0.00015630823327710558,
"loss": 7.278,
"step": 261
},
{
"epoch": 0.42,
"grad_norm": 932.0,
"learning_rate": 0.00015580793663591585,
"loss": 7.8209,
"step": 262
},
{
"epoch": 0.42,
"grad_norm": 13.4375,
"learning_rate": 0.0001553056030103264,
"loss": 7.4705,
"step": 263
},
{
"epoch": 0.43,
"grad_norm": 5.34375,
"learning_rate": 0.00015480125073546704,
"loss": 7.2755,
"step": 264
},
{
"epoch": 0.43,
"grad_norm": 628.0,
"learning_rate": 0.0001542948982201479,
"loss": 7.3964,
"step": 265
},
{
"epoch": 0.43,
"grad_norm": 4.4375,
"learning_rate": 0.00015378656394618787,
"loss": 7.259,
"step": 266
},
{
"epoch": 0.43,
"grad_norm": 29.25,
"learning_rate": 0.00015327626646773976,
"loss": 8.2839,
"step": 267
},
{
"epoch": 0.43,
"grad_norm": 6.78125,
"learning_rate": 0.0001527640244106133,
"loss": 7.4219,
"step": 268
},
{
"epoch": 0.43,
"grad_norm": 6.21875,
"learning_rate": 0.0001522498564715949,
"loss": 7.4376,
"step": 269
},
{
"epoch": 0.44,
"grad_norm": 5.875,
"learning_rate": 0.00015173378141776568,
"loss": 7.3749,
"step": 270
},
{
"epoch": 0.44,
"grad_norm": 5.21875,
"learning_rate": 0.00015121581808581622,
"loss": 7.1775,
"step": 271
},
{
"epoch": 0.44,
"grad_norm": 9.4375,
"learning_rate": 0.00015069598538135906,
"loss": 7.2296,
"step": 272
},
{
"epoch": 0.44,
"grad_norm": 4.46875,
"learning_rate": 0.00015017430227823864,
"loss": 7.3572,
"step": 273
},
{
"epoch": 0.44,
"grad_norm": 5.8125,
"learning_rate": 0.0001496507878178388,
"loss": 7.2276,
"step": 274
},
{
"epoch": 0.44,
"grad_norm": 4.75,
"learning_rate": 0.00014912546110838775,
"loss": 7.5138,
"step": 275
},
{
"epoch": 0.45,
"grad_norm": 10.125,
"learning_rate": 0.0001485983413242606,
"loss": 7.0377,
"step": 276
},
{
"epoch": 0.45,
"grad_norm": 4.65625,
"learning_rate": 0.00014806944770527958,
"loss": 7.1247,
"step": 277
},
{
"epoch": 0.45,
"grad_norm": 7.1875,
"learning_rate": 0.00014753879955601163,
"loss": 7.3482,
"step": 278
},
{
"epoch": 0.45,
"grad_norm": 8.5,
"learning_rate": 0.00014700641624506392,
"loss": 6.9676,
"step": 279
},
{
"epoch": 0.45,
"grad_norm": 6.03125,
"learning_rate": 0.00014647231720437686,
"loss": 7.2884,
"step": 280
},
{
"epoch": 0.45,
"grad_norm": 8.0,
"learning_rate": 0.00014593652192851486,
"loss": 7.558,
"step": 281
},
{
"epoch": 0.45,
"grad_norm": 7.65625,
"learning_rate": 0.00014539904997395468,
"loss": 7.1275,
"step": 282
},
{
"epoch": 0.46,
"grad_norm": 10.875,
"learning_rate": 0.00014485992095837177,
"loss": 6.9928,
"step": 283
},
{
"epoch": 0.46,
"grad_norm": 14.6875,
"learning_rate": 0.00014431915455992414,
"loss": 7.2045,
"step": 284
},
{
"epoch": 0.46,
"grad_norm": 5.96875,
"learning_rate": 0.00014377677051653404,
"loss": 7.1944,
"step": 285
},
{
"epoch": 0.46,
"grad_norm": 7.5,
"learning_rate": 0.00014323278862516775,
"loss": 7.8461,
"step": 286
},
{
"epoch": 0.46,
"grad_norm": 6.40625,
"learning_rate": 0.00014268722874111265,
"loss": 7.1609,
"step": 287
},
{
"epoch": 0.46,
"grad_norm": 1232.0,
"learning_rate": 0.00014214011077725292,
"loss": 8.6244,
"step": 288
},
{
"epoch": 0.47,
"grad_norm": 422.0,
"learning_rate": 0.00014159145470334235,
"loss": 8.2071,
"step": 289
},
{
"epoch": 0.47,
"grad_norm": 6.28125,
"learning_rate": 0.0001410412805452757,
"loss": 7.1693,
"step": 290
},
{
"epoch": 0.47,
"grad_norm": 24.25,
"learning_rate": 0.00014048960838435753,
"loss": 7.1325,
"step": 291
},
{
"epoch": 0.47,
"grad_norm": 5.5,
"learning_rate": 0.00013993645835656953,
"loss": 7.2788,
"step": 292
},
{
"epoch": 0.47,
"grad_norm": 6.5625,
"learning_rate": 0.00013938185065183532,
"loss": 7.3263,
"step": 293
},
{
"epoch": 0.47,
"grad_norm": 72.5,
"learning_rate": 0.0001388258055132835,
"loss": 7.4291,
"step": 294
},
{
"epoch": 0.48,
"grad_norm": 11.125,
"learning_rate": 0.000138268343236509,
"loss": 7.3335,
"step": 295
},
{
"epoch": 0.48,
"grad_norm": 7.09375,
"learning_rate": 0.00013770948416883205,
"loss": 7.4262,
"step": 296
},
{
"epoch": 0.48,
"grad_norm": 5.5,
"learning_rate": 0.00013714924870855571,
"loss": 7.3771,
"step": 297
},
{
"epoch": 0.48,
"grad_norm": 16.375,
"learning_rate": 0.00013658765730422125,
"loss": 7.5395,
"step": 298
},
{
"epoch": 0.48,
"grad_norm": 15.0625,
"learning_rate": 0.00013602473045386165,
"loss": 7.6106,
"step": 299
},
{
"epoch": 0.48,
"grad_norm": 7.90625,
"learning_rate": 0.00013546048870425356,
"loss": 7.3483,
"step": 300
},
{
"epoch": 0.49,
"grad_norm": 12.5625,
"learning_rate": 0.0001348949526501675,
"loss": 8.0617,
"step": 301
},
{
"epoch": 0.49,
"grad_norm": 25.25,
"learning_rate": 0.00013432814293361584,
"loss": 6.8243,
"step": 302
},
{
"epoch": 0.49,
"grad_norm": 6.8125,
"learning_rate": 0.00013376008024309948,
"loss": 7.3006,
"step": 303
},
{
"epoch": 0.49,
"grad_norm": 5.375,
"learning_rate": 0.00013319078531285285,
"loss": 7.3865,
"step": 304
},
{
"epoch": 0.49,
"grad_norm": 7.0,
"learning_rate": 0.00013262027892208694,
"loss": 7.676,
"step": 305
},
{
"epoch": 0.49,
"grad_norm": 13.75,
"learning_rate": 0.00013204858189423097,
"loss": 7.3239,
"step": 306
},
{
"epoch": 0.5,
"grad_norm": 3120.0,
"learning_rate": 0.00013147571509617228,
"loss": 8.0707,
"step": 307
},
{
"epoch": 0.5,
"grad_norm": 6.96875,
"learning_rate": 0.00013090169943749476,
"loss": 7.0195,
"step": 308
},
{
"epoch": 0.5,
"grad_norm": 6.1875,
"learning_rate": 0.00013032655586971552,
"loss": 7.367,
"step": 309
},
{
"epoch": 0.5,
"grad_norm": 18.5,
"learning_rate": 0.00012975030538552032,
"loss": 7.687,
"step": 310
},
{
"epoch": 0.5,
"eval_loss": 7.375539302825928,
"eval_runtime": 1.796,
"eval_samples_per_second": 18.931,
"eval_steps_per_second": 18.931,
"step": 310
},
{
"epoch": 0.5,
"grad_norm": 8.0,
"learning_rate": 0.0001291729690179972,
"loss": 7.4606,
"step": 311
},
{
"epoch": 0.5,
"grad_norm": 8.375,
"learning_rate": 0.00012859456783986893,
"loss": 7.5164,
"step": 312
},
{
"epoch": 0.5,
"grad_norm": 5.65625,
"learning_rate": 0.00012801512296272368,
"loss": 7.685,
"step": 313
},
{
"epoch": 0.51,
"grad_norm": 6.5625,
"learning_rate": 0.0001274346555362446,
"loss": 7.2255,
"step": 314
},
{
"epoch": 0.51,
"grad_norm": 17.125,
"learning_rate": 0.0001268531867474377,
"loss": 7.8333,
"step": 315
},
{
"epoch": 0.51,
"grad_norm": 8.125,
"learning_rate": 0.0001262707378198587,
"loss": 7.4813,
"step": 316
},
{
"epoch": 0.51,
"grad_norm": 7.375,
"learning_rate": 0.00012568733001283827,
"loss": 7.2433,
"step": 317
},
{
"epoch": 0.51,
"grad_norm": 5.90625,
"learning_rate": 0.00012510298462070619,
"loss": 7.4513,
"step": 318
},
{
"epoch": 0.51,
"grad_norm": 5.53125,
"learning_rate": 0.00012451772297201376,
"loss": 7.31,
"step": 319
},
{
"epoch": 0.52,
"grad_norm": 30.0,
"learning_rate": 0.0001239315664287558,
"loss": 7.9245,
"step": 320
},
{
"epoch": 0.52,
"grad_norm": 7.03125,
"learning_rate": 0.00012334453638559057,
"loss": 7.2468,
"step": 321
},
{
"epoch": 0.52,
"grad_norm": 28.5,
"learning_rate": 0.000122756654269059,
"loss": 8.0352,
"step": 322
},
{
"epoch": 0.52,
"grad_norm": 5.34375,
"learning_rate": 0.00012216794153680274,
"loss": 7.2967,
"step": 323
},
{
"epoch": 0.52,
"grad_norm": 8.375,
"learning_rate": 0.00012157841967678063,
"loss": 7.2094,
"step": 324
},
{
"epoch": 0.52,
"grad_norm": 9.4375,
"learning_rate": 0.00012098811020648475,
"loss": 7.2978,
"step": 325
},
{
"epoch": 0.53,
"grad_norm": 11.625,
"learning_rate": 0.00012039703467215488,
"loss": 7.3662,
"step": 326
},
{
"epoch": 0.53,
"grad_norm": 5.28125,
"learning_rate": 0.00011980521464799198,
"loss": 7.2527,
"step": 327
},
{
"epoch": 0.53,
"grad_norm": 9.5625,
"learning_rate": 0.00011921267173537086,
"loss": 7.616,
"step": 328
},
{
"epoch": 0.53,
"grad_norm": 8.0625,
"learning_rate": 0.00011861942756205169,
"loss": 7.1239,
"step": 329
},
{
"epoch": 0.53,
"grad_norm": 6.78125,
"learning_rate": 0.0001180255037813906,
"loss": 7.1963,
"step": 330
},
{
"epoch": 0.53,
"grad_norm": 47.0,
"learning_rate": 0.00011743092207154929,
"loss": 7.1912,
"step": 331
},
{
"epoch": 0.54,
"grad_norm": 7.25,
"learning_rate": 0.00011683570413470383,
"loss": 7.3671,
"step": 332
},
{
"epoch": 0.54,
"grad_norm": 7.09375,
"learning_rate": 0.00011623987169625261,
"loss": 7.2449,
"step": 333
},
{
"epoch": 0.54,
"grad_norm": 7.0625,
"learning_rate": 0.0001156434465040231,
"loss": 7.3294,
"step": 334
},
{
"epoch": 0.54,
"grad_norm": 7.375,
"learning_rate": 0.00011504645032747832,
"loss": 7.3717,
"step": 335
},
{
"epoch": 0.54,
"grad_norm": 5.65625,
"learning_rate": 0.00011444890495692213,
"loss": 7.1599,
"step": 336
},
{
"epoch": 0.54,
"grad_norm": 28.25,
"learning_rate": 0.00011385083220270401,
"loss": 7.4863,
"step": 337
},
{
"epoch": 0.55,
"grad_norm": 5.78125,
"learning_rate": 0.00011325225389442277,
"loss": 7.7702,
"step": 338
},
{
"epoch": 0.55,
"grad_norm": 5.3125,
"learning_rate": 0.00011265319188012994,
"loss": 7.0122,
"step": 339
},
{
"epoch": 0.55,
"grad_norm": 7.15625,
"learning_rate": 0.0001120536680255323,
"loss": 7.4499,
"step": 340
},
{
"epoch": 0.55,
"grad_norm": 5.4375,
"learning_rate": 0.00011145370421319377,
"loss": 7.1818,
"step": 341
},
{
"epoch": 0.55,
"grad_norm": 7.09375,
"learning_rate": 0.00011085332234173664,
"loss": 7.589,
"step": 342
},
{
"epoch": 0.55,
"grad_norm": 12.3125,
"learning_rate": 0.00011025254432504233,
"loss": 7.8762,
"step": 343
},
{
"epoch": 0.55,
"grad_norm": 6.5625,
"learning_rate": 0.00010965139209145152,
"loss": 7.3293,
"step": 344
},
{
"epoch": 0.56,
"grad_norm": 5.71875,
"learning_rate": 0.0001090498875829638,
"loss": 7.4994,
"step": 345
},
{
"epoch": 0.56,
"grad_norm": 5.625,
"learning_rate": 0.00010844805275443673,
"loss": 7.185,
"step": 346
},
{
"epoch": 0.56,
"grad_norm": 6.6875,
"learning_rate": 0.0001078459095727845,
"loss": 7.1749,
"step": 347
},
{
"epoch": 0.56,
"grad_norm": 17.25,
"learning_rate": 0.00010724348001617625,
"loss": 7.6093,
"step": 348
},
{
"epoch": 0.56,
"grad_norm": 7.0625,
"learning_rate": 0.00010664078607323367,
"loss": 7.1092,
"step": 349
},
{
"epoch": 0.56,
"grad_norm": 5.34375,
"learning_rate": 0.00010603784974222861,
"loss": 7.2545,
"step": 350
},
{
"epoch": 0.57,
"grad_norm": 5.28125,
"learning_rate": 0.00010543469303028002,
"loss": 7.2538,
"step": 351
},
{
"epoch": 0.57,
"grad_norm": 17.25,
"learning_rate": 0.00010483133795255071,
"loss": 7.9843,
"step": 352
},
{
"epoch": 0.57,
"grad_norm": 9.875,
"learning_rate": 0.0001042278065314439,
"loss": 7.931,
"step": 353
},
{
"epoch": 0.57,
"grad_norm": 5.5,
"learning_rate": 0.00010362412079579924,
"loss": 7.1528,
"step": 354
},
{
"epoch": 0.57,
"grad_norm": 7.875,
"learning_rate": 0.0001030203027800889,
"loss": 7.2796,
"step": 355
},
{
"epoch": 0.57,
"grad_norm": 5.84375,
"learning_rate": 0.00010241637452361323,
"loss": 7.4538,
"step": 356
},
{
"epoch": 0.58,
"grad_norm": 4.15625,
"learning_rate": 0.0001018123580696964,
"loss": 7.3754,
"step": 357
},
{
"epoch": 0.58,
"grad_norm": 4.03125,
"learning_rate": 0.00010120827546488174,
"loss": 7.5391,
"step": 358
},
{
"epoch": 0.58,
"grad_norm": 9.375,
"learning_rate": 0.00010060414875812709,
"loss": 7.0525,
"step": 359
},
{
"epoch": 0.58,
"grad_norm": 5.28125,
"learning_rate": 0.0001,
"loss": 7.2558,
"step": 360
},
{
"epoch": 0.58,
"grad_norm": 11.6875,
"learning_rate": 9.939585124187292e-05,
"loss": 7.33,
"step": 361
},
{
"epoch": 0.58,
"grad_norm": 5.5625,
"learning_rate": 9.879172453511827e-05,
"loss": 7.0643,
"step": 362
},
{
"epoch": 0.59,
"grad_norm": 5.90625,
"learning_rate": 9.818764193030363e-05,
"loss": 7.3601,
"step": 363
},
{
"epoch": 0.59,
"grad_norm": 8.1875,
"learning_rate": 9.75836254763868e-05,
"loss": 7.1155,
"step": 364
},
{
"epoch": 0.59,
"grad_norm": 4.125,
"learning_rate": 9.697969721991114e-05,
"loss": 7.1084,
"step": 365
},
{
"epoch": 0.59,
"grad_norm": 5.8125,
"learning_rate": 9.63758792042008e-05,
"loss": 7.2947,
"step": 366
},
{
"epoch": 0.59,
"grad_norm": 4.1875,
"learning_rate": 9.577219346855613e-05,
"loss": 7.1426,
"step": 367
},
{
"epoch": 0.59,
"grad_norm": 4.8125,
"learning_rate": 9.516866204744931e-05,
"loss": 7.317,
"step": 368
},
{
"epoch": 0.6,
"grad_norm": 4.0625,
"learning_rate": 9.456530696971999e-05,
"loss": 7.3881,
"step": 369
},
{
"epoch": 0.6,
"grad_norm": 8.625,
"learning_rate": 9.396215025777139e-05,
"loss": 7.28,
"step": 370
},
{
"epoch": 0.6,
"grad_norm": 5.40625,
"learning_rate": 9.335921392676631e-05,
"loss": 7.2141,
"step": 371
},
{
"epoch": 0.6,
"grad_norm": 8.25,
"learning_rate": 9.275651998382377e-05,
"loss": 7.2653,
"step": 372
},
{
"epoch": 0.6,
"grad_norm": 81.5,
"learning_rate": 9.215409042721552e-05,
"loss": 7.1614,
"step": 373
},
{
"epoch": 0.6,
"grad_norm": 3.65625,
"learning_rate": 9.155194724556331e-05,
"loss": 7.1929,
"step": 374
},
{
"epoch": 0.6,
"grad_norm": 6.1875,
"learning_rate": 9.095011241703623e-05,
"loss": 7.0878,
"step": 375
},
{
"epoch": 0.61,
"grad_norm": 4.5,
"learning_rate": 9.034860790854849e-05,
"loss": 7.4207,
"step": 376
},
{
"epoch": 0.61,
"grad_norm": 4.65625,
"learning_rate": 8.974745567495768e-05,
"loss": 7.1018,
"step": 377
},
{
"epoch": 0.61,
"grad_norm": 7.65625,
"learning_rate": 8.914667765826338e-05,
"loss": 7.3069,
"step": 378
},
{
"epoch": 0.61,
"grad_norm": 6.125,
"learning_rate": 8.854629578680624e-05,
"loss": 7.4862,
"step": 379
},
{
"epoch": 0.61,
"grad_norm": 4.71875,
"learning_rate": 8.79463319744677e-05,
"loss": 7.2446,
"step": 380
},
{
"epoch": 0.61,
"grad_norm": 188.0,
"learning_rate": 8.73468081198701e-05,
"loss": 7.2829,
"step": 381
},
{
"epoch": 0.62,
"grad_norm": 4.5625,
"learning_rate": 8.674774610557728e-05,
"loss": 7.3516,
"step": 382
},
{
"epoch": 0.62,
"grad_norm": 7.78125,
"learning_rate": 8.614916779729603e-05,
"loss": 7.1598,
"step": 383
},
{
"epoch": 0.62,
"grad_norm": 6.65625,
"learning_rate": 8.55510950430779e-05,
"loss": 7.307,
"step": 384
},
{
"epoch": 0.62,
"grad_norm": 5.5,
"learning_rate": 8.495354967252169e-05,
"loss": 7.3374,
"step": 385
},
{
"epoch": 0.62,
"grad_norm": 6.75,
"learning_rate": 8.435655349597689e-05,
"loss": 7.5147,
"step": 386
},
{
"epoch": 0.62,
"grad_norm": 3.9375,
"learning_rate": 8.37601283037474e-05,
"loss": 7.6806,
"step": 387
},
{
"epoch": 0.63,
"grad_norm": 8.1875,
"learning_rate": 8.316429586529615e-05,
"loss": 7.2063,
"step": 388
},
{
"epoch": 0.63,
"grad_norm": 3.703125,
"learning_rate": 8.256907792845072e-05,
"loss": 7.2237,
"step": 389
},
{
"epoch": 0.63,
"grad_norm": 7.375,
"learning_rate": 8.197449621860943e-05,
"loss": 6.9902,
"step": 390
},
{
"epoch": 0.63,
"grad_norm": 8.5,
"learning_rate": 8.138057243794833e-05,
"loss": 7.2775,
"step": 391
},
{
"epoch": 0.63,
"grad_norm": 5.65625,
"learning_rate": 8.078732826462915e-05,
"loss": 7.3332,
"step": 392
},
{
"epoch": 0.63,
"grad_norm": 7.71875,
"learning_rate": 8.019478535200806e-05,
"loss": 7.3288,
"step": 393
},
{
"epoch": 0.64,
"grad_norm": 3.703125,
"learning_rate": 7.960296532784515e-05,
"loss": 7.0595,
"step": 394
},
{
"epoch": 0.64,
"grad_norm": 108.0,
"learning_rate": 7.901188979351526e-05,
"loss": 7.1364,
"step": 395
},
{
"epoch": 0.64,
"grad_norm": 6.9375,
"learning_rate": 7.84215803232194e-05,
"loss": 7.3006,
"step": 396
},
{
"epoch": 0.64,
"grad_norm": 5.34375,
"learning_rate": 7.78320584631973e-05,
"loss": 7.1831,
"step": 397
},
{
"epoch": 0.64,
"grad_norm": 5.6875,
"learning_rate": 7.7243345730941e-05,
"loss": 7.1149,
"step": 398
},
{
"epoch": 0.64,
"grad_norm": 7.65625,
"learning_rate": 7.66554636144095e-05,
"loss": 7.1149,
"step": 399
},
{
"epoch": 0.65,
"grad_norm": 7.375,
"learning_rate": 7.606843357124426e-05,
"loss": 7.2536,
"step": 400
},
{
"epoch": 0.65,
"grad_norm": 8.5625,
"learning_rate": 7.548227702798624e-05,
"loss": 7.1493,
"step": 401
},
{
"epoch": 0.65,
"grad_norm": 5.5,
"learning_rate": 7.489701537929384e-05,
"loss": 7.1997,
"step": 402
},
{
"epoch": 0.65,
"grad_norm": 9.25,
"learning_rate": 7.431266998716171e-05,
"loss": 7.3595,
"step": 403
},
{
"epoch": 0.65,
"grad_norm": 9.0,
"learning_rate": 7.372926218014131e-05,
"loss": 7.2021,
"step": 404
},
{
"epoch": 0.65,
"grad_norm": 5.1875,
"learning_rate": 7.314681325256232e-05,
"loss": 7.068,
"step": 405
},
{
"epoch": 0.65,
"grad_norm": 5.0625,
"learning_rate": 7.256534446375542e-05,
"loss": 7.0883,
"step": 406
},
{
"epoch": 0.66,
"grad_norm": 6.8125,
"learning_rate": 7.198487703727632e-05,
"loss": 7.1549,
"step": 407
},
{
"epoch": 0.66,
"grad_norm": 8.8125,
"learning_rate": 7.14054321601311e-05,
"loss": 7.2792,
"step": 408
},
{
"epoch": 0.66,
"grad_norm": 9.625,
"learning_rate": 7.082703098200282e-05,
"loss": 6.8635,
"step": 409
},
{
"epoch": 0.66,
"grad_norm": 5.1875,
"learning_rate": 7.024969461447972e-05,
"loss": 7.2292,
"step": 410
},
{
"epoch": 0.66,
"grad_norm": 6.3125,
"learning_rate": 6.967344413028452e-05,
"loss": 7.1733,
"step": 411
},
{
"epoch": 0.66,
"grad_norm": 6.9375,
"learning_rate": 6.909830056250527e-05,
"loss": 7.4127,
"step": 412
},
{
"epoch": 0.67,
"grad_norm": 8.0,
"learning_rate": 6.852428490382773e-05,
"loss": 7.2303,
"step": 413
},
{
"epoch": 0.67,
"grad_norm": 4.1875,
"learning_rate": 6.795141810576906e-05,
"loss": 7.0902,
"step": 414
},
{
"epoch": 0.67,
"grad_norm": 9.5625,
"learning_rate": 6.73797210779131e-05,
"loss": 7.3092,
"step": 415
},
{
"epoch": 0.67,
"grad_norm": 10.3125,
"learning_rate": 6.680921468714719e-05,
"loss": 7.0385,
"step": 416
},
{
"epoch": 0.67,
"grad_norm": 6.15625,
"learning_rate": 6.623991975690051e-05,
"loss": 7.2035,
"step": 417
},
{
"epoch": 0.67,
"grad_norm": 7.8125,
"learning_rate": 6.567185706638417e-05,
"loss": 7.327,
"step": 418
},
{
"epoch": 0.68,
"grad_norm": 6.875,
"learning_rate": 6.510504734983249e-05,
"loss": 7.2225,
"step": 419
},
{
"epoch": 0.68,
"grad_norm": 8.3125,
"learning_rate": 6.453951129574644e-05,
"loss": 6.8114,
"step": 420
},
{
"epoch": 0.68,
"grad_norm": 5.84375,
"learning_rate": 6.397526954613839e-05,
"loss": 7.0789,
"step": 421
},
{
"epoch": 0.68,
"grad_norm": 7.40625,
"learning_rate": 6.341234269577879e-05,
"loss": 7.179,
"step": 422
},
{
"epoch": 0.68,
"grad_norm": 4.6875,
"learning_rate": 6.285075129144428e-05,
"loss": 7.2077,
"step": 423
},
{
"epoch": 0.68,
"grad_norm": 4.875,
"learning_rate": 6.229051583116796e-05,
"loss": 7.2929,
"step": 424
},
{
"epoch": 0.69,
"grad_norm": 7.3125,
"learning_rate": 6.173165676349103e-05,
"loss": 7.2718,
"step": 425
},
{
"epoch": 0.69,
"grad_norm": 3.625,
"learning_rate": 6.117419448671651e-05,
"loss": 7.2209,
"step": 426
},
{
"epoch": 0.69,
"grad_norm": 8.0625,
"learning_rate": 6.0618149348164696e-05,
"loss": 7.1679,
"step": 427
},
{
"epoch": 0.69,
"grad_norm": 4.0625,
"learning_rate": 6.006354164343046e-05,
"loss": 7.1983,
"step": 428
},
{
"epoch": 0.69,
"grad_norm": 3.6875,
"learning_rate": 5.9510391615642466e-05,
"loss": 7.1982,
"step": 429
},
{
"epoch": 0.69,
"grad_norm": 6.28125,
"learning_rate": 5.8958719454724346e-05,
"loss": 7.2756,
"step": 430
},
{
"epoch": 0.7,
"grad_norm": 6.53125,
"learning_rate": 5.840854529665767e-05,
"loss": 7.0365,
"step": 431
},
{
"epoch": 0.7,
"grad_norm": 7.625,
"learning_rate": 5.785988922274711e-05,
"loss": 7.34,
"step": 432
},
{
"epoch": 0.7,
"grad_norm": 7.53125,
"learning_rate": 5.7312771258887386e-05,
"loss": 7.1793,
"step": 433
},
{
"epoch": 0.7,
"grad_norm": 5.5,
"learning_rate": 5.676721137483225e-05,
"loss": 7.227,
"step": 434
},
{
"epoch": 0.7,
"grad_norm": 10.25,
"learning_rate": 5.622322948346594e-05,
"loss": 7.2284,
"step": 435
},
{
"epoch": 0.7,
"grad_norm": 5.21875,
"learning_rate": 5.568084544007588e-05,
"loss": 7.6541,
"step": 436
},
{
"epoch": 0.7,
"grad_norm": 7.625,
"learning_rate": 5.5140079041628214e-05,
"loss": 7.5761,
"step": 437
},
{
"epoch": 0.71,
"grad_norm": 4.3125,
"learning_rate": 5.4600950026045326e-05,
"loss": 7.1913,
"step": 438
},
{
"epoch": 0.71,
"grad_norm": 5.03125,
"learning_rate": 5.406347807148515e-05,
"loss": 7.2958,
"step": 439
},
{
"epoch": 0.71,
"grad_norm": 5.65625,
"learning_rate": 5.3527682795623146e-05,
"loss": 7.2452,
"step": 440
},
{
"epoch": 0.71,
"grad_norm": 6.15625,
"learning_rate": 5.2993583754936126e-05,
"loss": 7.1716,
"step": 441
},
{
"epoch": 0.71,
"grad_norm": 27.75,
"learning_rate": 5.246120044398839e-05,
"loss": 7.2082,
"step": 442
},
{
"epoch": 0.71,
"grad_norm": 6.8125,
"learning_rate": 5.193055229472045e-05,
"loss": 7.09,
"step": 443
},
{
"epoch": 0.72,
"grad_norm": 4.5,
"learning_rate": 5.14016586757394e-05,
"loss": 7.2108,
"step": 444
},
{
"epoch": 0.72,
"grad_norm": 4.25,
"learning_rate": 5.087453889161229e-05,
"loss": 7.3688,
"step": 445
},
{
"epoch": 0.72,
"grad_norm": 10.75,
"learning_rate": 5.0349212182161254e-05,
"loss": 7.3458,
"step": 446
},
{
"epoch": 0.72,
"grad_norm": 7.875,
"learning_rate": 4.98256977217614e-05,
"loss": 7.0711,
"step": 447
},
{
"epoch": 0.72,
"grad_norm": 7.5,
"learning_rate": 4.9304014618640995e-05,
"loss": 7.1625,
"step": 448
},
{
"epoch": 0.72,
"grad_norm": 7.15625,
"learning_rate": 4.87841819141838e-05,
"loss": 6.8236,
"step": 449
},
{
"epoch": 0.73,
"grad_norm": 53.75,
"learning_rate": 4.826621858223431e-05,
"loss": 7.1651,
"step": 450
},
{
"epoch": 0.73,
"grad_norm": 9.0,
"learning_rate": 4.7750143528405126e-05,
"loss": 7.2596,
"step": 451
},
{
"epoch": 0.73,
"grad_norm": 5.21875,
"learning_rate": 4.723597558938672e-05,
"loss": 7.1167,
"step": 452
},
{
"epoch": 0.73,
"grad_norm": 22.25,
"learning_rate": 4.672373353226023e-05,
"loss": 7.1937,
"step": 453
},
{
"epoch": 0.73,
"grad_norm": 6.25,
"learning_rate": 4.6213436053812144e-05,
"loss": 7.1933,
"step": 454
},
{
"epoch": 0.73,
"grad_norm": 5.59375,
"learning_rate": 4.5705101779852135e-05,
"loss": 7.4558,
"step": 455
},
{
"epoch": 0.74,
"grad_norm": 7.125,
"learning_rate": 4.519874926453302e-05,
"loss": 6.9894,
"step": 456
},
{
"epoch": 0.74,
"grad_norm": 7.40625,
"learning_rate": 4.469439698967359e-05,
"loss": 7.0223,
"step": 457
},
{
"epoch": 0.74,
"grad_norm": 7.5625,
"learning_rate": 4.419206336408418e-05,
"loss": 7.1284,
"step": 458
},
{
"epoch": 0.74,
"grad_norm": 6.0625,
"learning_rate": 4.3691766722894435e-05,
"loss": 7.3841,
"step": 459
},
{
"epoch": 0.74,
"grad_norm": 6.21875,
"learning_rate": 4.3193525326884435e-05,
"loss": 7.3895,
"step": 460
},
{
"epoch": 0.74,
"grad_norm": 7.5625,
"learning_rate": 4.26973573618179e-05,
"loss": 7.3107,
"step": 461
},
{
"epoch": 0.75,
"grad_norm": 10.75,
"learning_rate": 4.220328093777851e-05,
"loss": 7.2527,
"step": 462
},
{
"epoch": 0.75,
"grad_norm": 8.6875,
"learning_rate": 4.1711314088509e-05,
"loss": 7.1957,
"step": 463
},
{
"epoch": 0.75,
"grad_norm": 5.0625,
"learning_rate": 4.12214747707527e-05,
"loss": 7.1818,
"step": 464
},
{
"epoch": 0.75,
"grad_norm": 4.59375,
"learning_rate": 4.0733780863598335e-05,
"loss": 7.286,
"step": 465
},
{
"epoch": 0.75,
"eval_loss": 7.306603908538818,
"eval_runtime": 1.801,
"eval_samples_per_second": 18.878,
"eval_steps_per_second": 18.878,
"step": 465
},
{
"epoch": 0.75,
"grad_norm": 5.6875,
"learning_rate": 4.0248250167827275e-05,
"loss": 7.279,
"step": 466
},
{
"epoch": 0.75,
"grad_norm": 26.125,
"learning_rate": 3.976490040526394e-05,
"loss": 7.1727,
"step": 467
},
{
"epoch": 0.75,
"grad_norm": 7.5625,
"learning_rate": 3.9283749218128885e-05,
"loss": 7.3203,
"step": 468
},
{
"epoch": 0.76,
"grad_norm": 5.46875,
"learning_rate": 3.88048141683948e-05,
"loss": 7.1131,
"step": 469
},
{
"epoch": 0.76,
"grad_norm": 6.96875,
"learning_rate": 3.832811273714569e-05,
"loss": 7.4104,
"step": 470
},
{
"epoch": 0.76,
"grad_norm": 6.09375,
"learning_rate": 3.785366232393861e-05,
"loss": 7.0627,
"step": 471
},
{
"epoch": 0.76,
"grad_norm": 88.5,
"learning_rate": 3.738148024616863e-05,
"loss": 7.2023,
"step": 472
},
{
"epoch": 0.76,
"grad_norm": 76.0,
"learning_rate": 3.691158373843694e-05,
"loss": 7.2856,
"step": 473
},
{
"epoch": 0.76,
"grad_norm": 6.46875,
"learning_rate": 3.644398995192147e-05,
"loss": 7.5069,
"step": 474
},
{
"epoch": 0.77,
"grad_norm": 4.28125,
"learning_rate": 3.597871595375121e-05,
"loss": 7.2348,
"step": 475
},
{
"epoch": 0.77,
"grad_norm": 6.15625,
"learning_rate": 3.5515778726382966e-05,
"loss": 7.3896,
"step": 476
},
{
"epoch": 0.77,
"grad_norm": 7.9375,
"learning_rate": 3.5055195166981645e-05,
"loss": 7.3101,
"step": 477
},
{
"epoch": 0.77,
"grad_norm": 6.1875,
"learning_rate": 3.459698208680359e-05,
"loss": 7.4878,
"step": 478
},
{
"epoch": 0.77,
"grad_norm": 9.25,
"learning_rate": 3.4141156210582756e-05,
"loss": 7.7337,
"step": 479
},
{
"epoch": 0.77,
"grad_norm": 7.71875,
"learning_rate": 3.36877341759205e-05,
"loss": 7.576,
"step": 480
},
{
"epoch": 0.78,
"grad_norm": 7.28125,
"learning_rate": 3.3236732532678096e-05,
"loss": 7.3218,
"step": 481
},
{
"epoch": 0.78,
"grad_norm": 4.125,
"learning_rate": 3.2788167742372725e-05,
"loss": 7.2678,
"step": 482
},
{
"epoch": 0.78,
"grad_norm": 4.8125,
"learning_rate": 3.234205617757686e-05,
"loss": 7.0266,
"step": 483
},
{
"epoch": 0.78,
"grad_norm": 9.75,
"learning_rate": 3.1898414121320276e-05,
"loss": 6.8119,
"step": 484
},
{
"epoch": 0.78,
"grad_norm": 11.9375,
"learning_rate": 3.1457257766496015e-05,
"loss": 7.1597,
"step": 485
},
{
"epoch": 0.78,
"grad_norm": 7.71875,
"learning_rate": 3.101860321526924e-05,
"loss": 7.6388,
"step": 486
},
{
"epoch": 0.79,
"grad_norm": 4.125,
"learning_rate": 3.0582466478489455e-05,
"loss": 7.367,
"step": 487
},
{
"epoch": 0.79,
"grad_norm": 4.0625,
"learning_rate": 3.0148863475106314e-05,
"loss": 7.2989,
"step": 488
},
{
"epoch": 0.79,
"grad_norm": 4.8125,
"learning_rate": 2.9717810031588277e-05,
"loss": 7.2247,
"step": 489
},
{
"epoch": 0.79,
"grad_norm": 8.6875,
"learning_rate": 2.9289321881345254e-05,
"loss": 7.1624,
"step": 490
},
{
"epoch": 0.79,
"grad_norm": 4.375,
"learning_rate": 2.886341466415412e-05,
"loss": 7.1773,
"step": 491
},
{
"epoch": 0.79,
"grad_norm": 4.375,
"learning_rate": 2.84401039255879e-05,
"loss": 7.2401,
"step": 492
},
{
"epoch": 0.8,
"grad_norm": 10.125,
"learning_rate": 2.8019405116448516e-05,
"loss": 7.462,
"step": 493
},
{
"epoch": 0.8,
"grad_norm": 8.1875,
"learning_rate": 2.7601333592202583e-05,
"loss": 6.8809,
"step": 494
},
{
"epoch": 0.8,
"grad_norm": 7.1875,
"learning_rate": 2.7185904612421176e-05,
"loss": 7.1306,
"step": 495
},
{
"epoch": 0.8,
"grad_norm": 6144.0,
"learning_rate": 2.677313334022268e-05,
"loss": 7.896,
"step": 496
},
{
"epoch": 0.8,
"grad_norm": 6.40625,
"learning_rate": 2.6363034841719392e-05,
"loss": 7.2291,
"step": 497
},
{
"epoch": 0.8,
"grad_norm": 8.6875,
"learning_rate": 2.59556240854677e-05,
"loss": 7.538,
"step": 498
},
{
"epoch": 0.8,
"grad_norm": 8.1875,
"learning_rate": 2.5550915941921526e-05,
"loss": 7.2563,
"step": 499
},
{
"epoch": 0.81,
"grad_norm": 588.0,
"learning_rate": 2.514892518288988e-05,
"loss": 7.2489,
"step": 500
},
{
"epoch": 0.81,
"grad_norm": 8.1875,
"learning_rate": 2.4749666480997337e-05,
"loss": 6.9217,
"step": 501
},
{
"epoch": 0.81,
"grad_norm": 6.96875,
"learning_rate": 2.4353154409148637e-05,
"loss": 7.2923,
"step": 502
},
{
"epoch": 0.81,
"grad_norm": 6.625,
"learning_rate": 2.3959403439996907e-05,
"loss": 7.3927,
"step": 503
},
{
"epoch": 0.81,
"grad_norm": 5.53125,
"learning_rate": 2.356842794541516e-05,
"loss": 7.0782,
"step": 504
},
{
"epoch": 0.81,
"grad_norm": 4.78125,
"learning_rate": 2.318024219597196e-05,
"loss": 7.1464,
"step": 505
},
{
"epoch": 0.82,
"grad_norm": 55.75,
"learning_rate": 2.2794860360410342e-05,
"loss": 6.8995,
"step": 506
},
{
"epoch": 0.82,
"grad_norm": 10.4375,
"learning_rate": 2.241229650513077e-05,
"loss": 7.1063,
"step": 507
},
{
"epoch": 0.82,
"grad_norm": 6.375,
"learning_rate": 2.2032564593677774e-05,
"loss": 7.1596,
"step": 508
},
{
"epoch": 0.82,
"grad_norm": 11.6875,
"learning_rate": 2.165567848623009e-05,
"loss": 7.2437,
"step": 509
},
{
"epoch": 0.82,
"grad_norm": 4.53125,
"learning_rate": 2.1281651939094992e-05,
"loss": 7.1674,
"step": 510
},
{
"epoch": 0.82,
"grad_norm": 3.984375,
"learning_rate": 2.0910498604205986e-05,
"loss": 7.1779,
"step": 511
},
{
"epoch": 0.83,
"grad_norm": 8.3125,
"learning_rate": 2.0542232028624586e-05,
"loss": 7.0682,
"step": 512
},
{
"epoch": 0.83,
"grad_norm": 7.8125,
"learning_rate": 2.0176865654045974e-05,
"loss": 7.4579,
"step": 513
},
{
"epoch": 0.83,
"grad_norm": 6.53125,
"learning_rate": 1.981441281630816e-05,
"loss": 7.3214,
"step": 514
},
{
"epoch": 0.83,
"grad_norm": 11.6875,
"learning_rate": 1.94548867449054e-05,
"loss": 7.0165,
"step": 515
},
{
"epoch": 0.83,
"grad_norm": 28.75,
"learning_rate": 1.9098300562505266e-05,
"loss": 7.1816,
"step": 516
},
{
"epoch": 0.83,
"grad_norm": 5.78125,
"learning_rate": 1.8744667284469575e-05,
"loss": 7.3203,
"step": 517
},
{
"epoch": 0.84,
"grad_norm": 4.9375,
"learning_rate": 1.8393999818379525e-05,
"loss": 7.1352,
"step": 518
},
{
"epoch": 0.84,
"grad_norm": 7.4375,
"learning_rate": 1.804631096356435e-05,
"loss": 7.1253,
"step": 519
},
{
"epoch": 0.84,
"grad_norm": 7.65625,
"learning_rate": 1.7701613410634365e-05,
"loss": 6.9686,
"step": 520
},
{
"epoch": 0.84,
"grad_norm": 38.5,
"learning_rate": 1.735991974101756e-05,
"loss": 7.4466,
"step": 521
},
{
"epoch": 0.84,
"grad_norm": 5.125,
"learning_rate": 1.7021242426500493e-05,
"loss": 7.0422,
"step": 522
},
{
"epoch": 0.84,
"grad_norm": 8.0625,
"learning_rate": 1.6685593828773095e-05,
"loss": 7.2777,
"step": 523
},
{
"epoch": 0.85,
"grad_norm": 4.4375,
"learning_rate": 1.6352986198977325e-05,
"loss": 7.1547,
"step": 524
},
{
"epoch": 0.85,
"grad_norm": 6.3125,
"learning_rate": 1.6023431677260214e-05,
"loss": 6.975,
"step": 525
},
{
"epoch": 0.85,
"grad_norm": 7.5,
"learning_rate": 1.5696942292330576e-05,
"loss": 7.4417,
"step": 526
},
{
"epoch": 0.85,
"grad_norm": 5.625,
"learning_rate": 1.5373529961019974e-05,
"loss": 7.2602,
"step": 527
},
{
"epoch": 0.85,
"grad_norm": 4.78125,
"learning_rate": 1.5053206487847914e-05,
"loss": 7.2853,
"step": 528
},
{
"epoch": 0.85,
"grad_norm": 8.4375,
"learning_rate": 1.4735983564590783e-05,
"loss": 7.3027,
"step": 529
},
{
"epoch": 0.85,
"grad_norm": 183.0,
"learning_rate": 1.442187276985526e-05,
"loss": 7.3557,
"step": 530
},
{
"epoch": 0.86,
"grad_norm": 6.96875,
"learning_rate": 1.4110885568655564e-05,
"loss": 7.5958,
"step": 531
},
{
"epoch": 0.86,
"grad_norm": 7.65625,
"learning_rate": 1.3803033311995072e-05,
"loss": 6.8963,
"step": 532
},
{
"epoch": 0.86,
"grad_norm": 7.125,
"learning_rate": 1.3498327236452013e-05,
"loss": 7.6564,
"step": 533
},
{
"epoch": 0.86,
"grad_norm": 7.03125,
"learning_rate": 1.3196778463769255e-05,
"loss": 7.232,
"step": 534
},
{
"epoch": 0.86,
"grad_norm": 3.984375,
"learning_rate": 1.2898398000448443e-05,
"loss": 7.1699,
"step": 535
},
{
"epoch": 0.86,
"grad_norm": 6.75,
"learning_rate": 1.260319673734821e-05,
"loss": 7.1442,
"step": 536
},
{
"epoch": 0.87,
"grad_norm": 6.0625,
"learning_rate": 1.2311185449286677e-05,
"loss": 7.0898,
"step": 537
},
{
"epoch": 0.87,
"grad_norm": 9.75,
"learning_rate": 1.2022374794648228e-05,
"loss": 7.1733,
"step": 538
},
{
"epoch": 0.87,
"grad_norm": 9.3125,
"learning_rate": 1.1736775314994342e-05,
"loss": 6.8209,
"step": 539
},
{
"epoch": 0.87,
"grad_norm": 9.5625,
"learning_rate": 1.1454397434679021e-05,
"loss": 7.2807,
"step": 540
},
{
"epoch": 0.87,
"grad_norm": 7.8125,
"learning_rate": 1.1175251460468117e-05,
"loss": 7.2368,
"step": 541
},
{
"epoch": 0.87,
"grad_norm": 6.34375,
"learning_rate": 1.0899347581163221e-05,
"loss": 7.4665,
"step": 542
},
{
"epoch": 0.88,
"grad_norm": 5.9375,
"learning_rate": 1.062669586722983e-05,
"loss": 7.2071,
"step": 543
},
{
"epoch": 0.88,
"grad_norm": 6.03125,
"learning_rate": 1.0357306270429624e-05,
"loss": 7.148,
"step": 544
},
{
"epoch": 0.88,
"grad_norm": 4.5,
"learning_rate": 1.0091188623457415e-05,
"loss": 7.1838,
"step": 545
},
{
"epoch": 0.88,
"grad_norm": 6.6875,
"learning_rate": 9.828352639582072e-06,
"loss": 7.2099,
"step": 546
},
{
"epoch": 0.88,
"grad_norm": 11.875,
"learning_rate": 9.568807912292077e-06,
"loss": 7.2912,
"step": 547
},
{
"epoch": 0.88,
"grad_norm": 8.875,
"learning_rate": 9.31256391494546e-06,
"loss": 7.1883,
"step": 548
},
{
"epoch": 0.89,
"grad_norm": 7.4375,
"learning_rate": 9.05963000042378e-06,
"loss": 7.5636,
"step": 549
},
{
"epoch": 0.89,
"grad_norm": 9.6875,
"learning_rate": 8.810015400790994e-06,
"loss": 7.1998,
"step": 550
},
{
"epoch": 0.89,
"grad_norm": 9.1875,
"learning_rate": 8.563729226956319e-06,
"loss": 7.3323,
"step": 551
},
{
"epoch": 0.89,
"grad_norm": 9.375,
"learning_rate": 8.32078046834176e-06,
"loss": 7.272,
"step": 552
},
{
"epoch": 0.89,
"grad_norm": 7.5625,
"learning_rate": 8.081177992554013e-06,
"loss": 7.573,
"step": 553
},
{
"epoch": 0.89,
"grad_norm": 4.375,
"learning_rate": 7.844930545060703e-06,
"loss": 7.2687,
"step": 554
},
{
"epoch": 0.9,
"grad_norm": 7.15625,
"learning_rate": 7.612046748871327e-06,
"loss": 7.0265,
"step": 555
},
{
"epoch": 0.9,
"grad_norm": 9.875,
"learning_rate": 7.382535104222366e-06,
"loss": 7.5013,
"step": 556
},
{
"epoch": 0.9,
"grad_norm": 7.0625,
"learning_rate": 7.156403988267069e-06,
"loss": 7.6454,
"step": 557
},
{
"epoch": 0.9,
"grad_norm": 7.96875,
"learning_rate": 6.9336616547697965e-06,
"loss": 7.377,
"step": 558
},
{
"epoch": 0.9,
"grad_norm": 6.53125,
"learning_rate": 6.714316233804574e-06,
"loss": 7.2638,
"step": 559
},
{
"epoch": 0.9,
"grad_norm": 6.5625,
"learning_rate": 6.498375731458528e-06,
"loss": 7.3772,
"step": 560
},
{
"epoch": 0.9,
"grad_norm": 7.71875,
"learning_rate": 6.28584802953951e-06,
"loss": 7.4035,
"step": 561
},
{
"epoch": 0.91,
"grad_norm": 5.0,
"learning_rate": 6.076740885288479e-06,
"loss": 7.2213,
"step": 562
},
{
"epoch": 0.91,
"grad_norm": 5.5625,
"learning_rate": 5.8710619310964445e-06,
"loss": 7.3617,
"step": 563
},
{
"epoch": 0.91,
"grad_norm": 5.8125,
"learning_rate": 5.668818674225685e-06,
"loss": 7.4216,
"step": 564
},
{
"epoch": 0.91,
"grad_norm": 4.65625,
"learning_rate": 5.470018496535967e-06,
"loss": 7.2892,
"step": 565
},
{
"epoch": 0.91,
"grad_norm": 13.75,
"learning_rate": 5.274668654214932e-06,
"loss": 7.1227,
"step": 566
},
{
"epoch": 0.91,
"grad_norm": 6.5,
"learning_rate": 5.08277627751329e-06,
"loss": 7.4076,
"step": 567
},
{
"epoch": 0.92,
"grad_norm": 5.65625,
"learning_rate": 4.8943483704846475e-06,
"loss": 7.0772,
"step": 568
},
{
"epoch": 0.92,
"grad_norm": 7.1875,
"learning_rate": 4.709391810729713e-06,
"loss": 6.7843,
"step": 569
},
{
"epoch": 0.92,
"grad_norm": 4.40625,
"learning_rate": 4.527913349145441e-06,
"loss": 7.2041,
"step": 570
},
{
"epoch": 0.92,
"grad_norm": 6.25,
"learning_rate": 4.349919609678455e-06,
"loss": 7.3247,
"step": 571
},
{
"epoch": 0.92,
"grad_norm": 4.75,
"learning_rate": 4.175417089083378e-06,
"loss": 7.1626,
"step": 572
},
{
"epoch": 0.92,
"grad_norm": 5.375,
"learning_rate": 4.004412156685711e-06,
"loss": 7.3347,
"step": 573
},
{
"epoch": 0.93,
"grad_norm": 8.6875,
"learning_rate": 3.836911054149239e-06,
"loss": 6.8028,
"step": 574
},
{
"epoch": 0.93,
"grad_norm": 5.34375,
"learning_rate": 3.6729198952483724e-06,
"loss": 7.1392,
"step": 575
},
{
"epoch": 0.93,
"grad_norm": 6.3125,
"learning_rate": 3.512444665644865e-06,
"loss": 7.2341,
"step": 576
},
{
"epoch": 0.93,
"grad_norm": 7.6875,
"learning_rate": 3.355491222669371e-06,
"loss": 7.2214,
"step": 577
},
{
"epoch": 0.93,
"grad_norm": 6.3125,
"learning_rate": 3.202065295107726e-06,
"loss": 7.2928,
"step": 578
},
{
"epoch": 0.93,
"grad_norm": 9.5,
"learning_rate": 3.052172482991711e-06,
"loss": 7.2009,
"step": 579
},
{
"epoch": 0.94,
"grad_norm": 4.125,
"learning_rate": 2.905818257394799e-06,
"loss": 7.1589,
"step": 580
},
{
"epoch": 0.94,
"grad_norm": 4.09375,
"learning_rate": 2.7630079602323442e-06,
"loss": 7.1614,
"step": 581
},
{
"epoch": 0.94,
"grad_norm": 5.59375,
"learning_rate": 2.6237468040666512e-06,
"loss": 7.1077,
"step": 582
},
{
"epoch": 0.94,
"grad_norm": 5.875,
"learning_rate": 2.4880398719167586e-06,
"loss": 7.1149,
"step": 583
},
{
"epoch": 0.94,
"grad_norm": 5.0,
"learning_rate": 2.3558921170727888e-06,
"loss": 7.4501,
"step": 584
},
{
"epoch": 0.94,
"grad_norm": 6.75,
"learning_rate": 2.2273083629153147e-06,
"loss": 7.0099,
"step": 585
},
{
"epoch": 0.95,
"grad_norm": 5.6875,
"learning_rate": 2.1022933027391555e-06,
"loss": 7.5229,
"step": 586
},
{
"epoch": 0.95,
"grad_norm": 5.3125,
"learning_rate": 1.9808514995821593e-06,
"loss": 7.1471,
"step": 587
},
{
"epoch": 0.95,
"grad_norm": 7.5,
"learning_rate": 1.8629873860586566e-06,
"loss": 7.0446,
"step": 588
},
{
"epoch": 0.95,
"grad_norm": 8.375,
"learning_rate": 1.7487052641976032e-06,
"loss": 7.0627,
"step": 589
},
{
"epoch": 0.95,
"grad_norm": 4.9375,
"learning_rate": 1.6380093052856483e-06,
"loss": 7.0851,
"step": 590
},
{
"epoch": 0.95,
"grad_norm": 6.34375,
"learning_rate": 1.5309035497147684e-06,
"loss": 7.5415,
"step": 591
},
{
"epoch": 0.95,
"grad_norm": 6.0625,
"learning_rate": 1.4273919068349184e-06,
"loss": 7.3371,
"step": 592
},
{
"epoch": 0.96,
"grad_norm": 5.6875,
"learning_rate": 1.3274781548112458e-06,
"loss": 7.2418,
"step": 593
},
{
"epoch": 0.96,
"grad_norm": 5.71875,
"learning_rate": 1.231165940486234e-06,
"loss": 7.7067,
"step": 594
},
{
"epoch": 0.96,
"grad_norm": 7.40625,
"learning_rate": 1.1384587792465872e-06,
"loss": 7.2579,
"step": 595
},
{
"epoch": 0.96,
"grad_norm": 7.03125,
"learning_rate": 1.0493600548948878e-06,
"loss": 7.477,
"step": 596
},
{
"epoch": 0.96,
"grad_norm": 7.0625,
"learning_rate": 9.638730195261625e-07,
"loss": 7.0403,
"step": 597
},
{
"epoch": 0.96,
"grad_norm": 5.09375,
"learning_rate": 8.820007934090879e-07,
"loss": 7.3635,
"step": 598
},
{
"epoch": 0.97,
"grad_norm": 7.96875,
"learning_rate": 8.037463648721488e-07,
"loss": 7.5187,
"step": 599
},
{
"epoch": 0.97,
"grad_norm": 362.0,
"learning_rate": 7.291125901946027e-07,
"loss": 7.179,
"step": 600
},
{
"epoch": 0.97,
"grad_norm": 11.8125,
"learning_rate": 6.581021935021304e-07,
"loss": 7.0963,
"step": 601
},
{
"epoch": 0.97,
"grad_norm": 5.78125,
"learning_rate": 5.907177666674812e-07,
"loss": 7.2989,
"step": 602
},
{
"epoch": 0.97,
"grad_norm": 6.53125,
"learning_rate": 5.269617692158613e-07,
"loss": 7.1139,
"step": 603
},
{
"epoch": 0.97,
"grad_norm": 7.21875,
"learning_rate": 4.668365282351372e-07,
"loss": 7.4399,
"step": 604
},
{
"epoch": 0.98,
"grad_norm": 5.25,
"learning_rate": 4.103442382909051e-07,
"loss": 7.2847,
"step": 605
},
{
"epoch": 0.98,
"grad_norm": 9.625,
"learning_rate": 3.5748696134639825e-07,
"loss": 7.1542,
"step": 606
},
{
"epoch": 0.98,
"grad_norm": 5.90625,
"learning_rate": 3.0826662668720364e-07,
"loss": 7.3664,
"step": 607
},
{
"epoch": 0.98,
"grad_norm": 6.625,
"learning_rate": 2.6268503085089547e-07,
"loss": 7.3751,
"step": 608
},
{
"epoch": 0.98,
"grad_norm": 13.875,
"learning_rate": 2.2074383756137686e-07,
"loss": 7.2611,
"step": 609
},
{
"epoch": 0.98,
"grad_norm": 7.03125,
"learning_rate": 1.824445776682504e-07,
"loss": 7.6417,
"step": 610
},
{
"epoch": 0.99,
"grad_norm": 7.34375,
"learning_rate": 1.477886490908742e-07,
"loss": 7.269,
"step": 611
},
{
"epoch": 0.99,
"grad_norm": 4.625,
"learning_rate": 1.1677731676733584e-07,
"loss": 7.2,
"step": 612
},
{
"epoch": 0.99,
"grad_norm": 7.3125,
"learning_rate": 8.941171260835601e-08,
"loss": 7.2481,
"step": 613
},
{
"epoch": 0.99,
"grad_norm": 5.34375,
"learning_rate": 6.569283545587724e-08,
"loss": 7.1505,
"step": 614
},
{
"epoch": 0.99,
"grad_norm": 4.875,
"learning_rate": 4.562155104665955e-08,
"loss": 7.1732,
"step": 615
},
{
"epoch": 0.99,
"grad_norm": 4.28125,
"learning_rate": 2.9198591980705848e-08,
"loss": 7.1312,
"step": 616
},
{
"epoch": 1.0,
"grad_norm": 5.25,
"learning_rate": 1.642455769444995e-08,
"loss": 7.2225,
"step": 617
},
{
"epoch": 1.0,
"grad_norm": 4.875,
"learning_rate": 7.2999144389296335e-09,
"loss": 7.3122,
"step": 618
},
{
"epoch": 1.0,
"grad_norm": 6.34375,
"learning_rate": 1.8249952627669154e-09,
"loss": 7.2489,
"step": 619
},
{
"epoch": 1.0,
"grad_norm": 9.9375,
"learning_rate": 0.0,
"loss": 6.8919,
"step": 620
},
{
"epoch": 1.0,
"eval_loss": 7.301526069641113,
"eval_runtime": 1.7862,
"eval_samples_per_second": 19.035,
"eval_steps_per_second": 19.035,
"step": 620
}
],
"logging_steps": 1,
"max_steps": 620,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 7881494668247040.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}