{ "best_metric": 0.2655787765979767, "best_model_checkpoint": "/scratch/czm5kz/llama2-7b_8_100_0.0003_sg_finetuned_combined/checkpoint-1680", "epoch": 100.0, "eval_steps": 20, "global_step": 1700, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.29, "grad_norm": 2.5130302906036377, "learning_rate": 0.0002994705882352941, "loss": 5.7234, "step": 5 }, { "epoch": 0.59, "grad_norm": 2.1954381465911865, "learning_rate": 0.0002985882352941176, "loss": 4.7563, "step": 10 }, { "epoch": 0.88, "grad_norm": 2.519700050354004, "learning_rate": 0.0002978823529411764, "loss": 4.2519, "step": 15 }, { "epoch": 1.18, "grad_norm": 2.1008927822113037, "learning_rate": 0.00029699999999999996, "loss": 3.8694, "step": 20 }, { "epoch": 1.18, "eval_loss": 3.4059982299804688, "eval_runtime": 1.9717, "eval_samples_per_second": 67.962, "eval_steps_per_second": 8.622, "step": 20 }, { "epoch": 1.47, "grad_norm": 1.9939683675765991, "learning_rate": 0.0002961176470588235, "loss": 3.0042, "step": 25 }, { "epoch": 1.76, "grad_norm": 1.9956856966018677, "learning_rate": 0.00029523529411764704, "loss": 2.7303, "step": 30 }, { "epoch": 2.06, "grad_norm": 2.552236318588257, "learning_rate": 0.0002943529411764706, "loss": 2.7002, "step": 35 }, { "epoch": 2.35, "grad_norm": 2.5456089973449707, "learning_rate": 0.00029347058823529413, "loss": 2.3598, "step": 40 }, { "epoch": 2.35, "eval_loss": 2.0730819702148438, "eval_runtime": 1.9745, "eval_samples_per_second": 67.864, "eval_steps_per_second": 8.61, "step": 40 }, { "epoch": 2.65, "grad_norm": 2.949130058288574, "learning_rate": 0.0002925882352941176, "loss": 1.9988, "step": 45 }, { "epoch": 2.94, "grad_norm": 2.44789457321167, "learning_rate": 0.00029170588235294116, "loss": 1.9843, "step": 50 }, { "epoch": 3.24, "grad_norm": 5.215721130371094, "learning_rate": 0.00029082352941176465, "loss": 1.5394, "step": 55 }, { "epoch": 3.53, "grad_norm": 3.831580638885498, "learning_rate": 0.0002899411764705882, "loss": 1.5503, "step": 60 }, { "epoch": 3.53, "eval_loss": 1.2807282209396362, "eval_runtime": 1.9725, "eval_samples_per_second": 67.933, "eval_steps_per_second": 8.618, "step": 60 }, { "epoch": 3.82, "grad_norm": 3.5273172855377197, "learning_rate": 0.00028905882352941173, "loss": 1.4236, "step": 65 }, { "epoch": 4.12, "grad_norm": 4.674720287322998, "learning_rate": 0.0002881764705882353, "loss": 1.1729, "step": 70 }, { "epoch": 4.41, "grad_norm": 5.43109655380249, "learning_rate": 0.0002872941176470588, "loss": 0.9213, "step": 75 }, { "epoch": 4.71, "grad_norm": 6.3855814933776855, "learning_rate": 0.0002864117647058823, "loss": 0.9106, "step": 80 }, { "epoch": 4.71, "eval_loss": 0.7538219094276428, "eval_runtime": 1.9756, "eval_samples_per_second": 67.827, "eval_steps_per_second": 8.605, "step": 80 }, { "epoch": 5.0, "grad_norm": 5.1275529861450195, "learning_rate": 0.00028552941176470585, "loss": 1.0219, "step": 85 }, { "epoch": 5.29, "grad_norm": 3.2476627826690674, "learning_rate": 0.0002846470588235294, "loss": 0.6821, "step": 90 }, { "epoch": 5.59, "grad_norm": 3.561427116394043, "learning_rate": 0.00028376470588235294, "loss": 0.6022, "step": 95 }, { "epoch": 5.88, "grad_norm": 4.358293056488037, "learning_rate": 0.0002828823529411765, "loss": 0.6565, "step": 100 }, { "epoch": 5.88, "eval_loss": 0.5009134411811829, "eval_runtime": 1.9816, "eval_samples_per_second": 67.622, "eval_steps_per_second": 8.579, "step": 100 }, { "epoch": 6.18, "grad_norm": 3.2790002822875977, "learning_rate": 0.00028199999999999997, "loss": 0.6397, "step": 105 }, { "epoch": 6.47, "grad_norm": 2.5752627849578857, "learning_rate": 0.0002811176470588235, "loss": 0.4518, "step": 110 }, { "epoch": 6.76, "grad_norm": 3.419771909713745, "learning_rate": 0.000280235294117647, "loss": 0.4741, "step": 115 }, { "epoch": 7.06, "grad_norm": 2.2088027000427246, "learning_rate": 0.00027935294117647054, "loss": 0.5214, "step": 120 }, { "epoch": 7.06, "eval_loss": 0.40768566727638245, "eval_runtime": 1.9831, "eval_samples_per_second": 67.57, "eval_steps_per_second": 8.572, "step": 120 }, { "epoch": 7.35, "grad_norm": 2.9267308712005615, "learning_rate": 0.0002784705882352941, "loss": 0.4434, "step": 125 }, { "epoch": 7.65, "grad_norm": 3.3790996074676514, "learning_rate": 0.00027758823529411763, "loss": 0.4631, "step": 130 }, { "epoch": 7.94, "grad_norm": 2.3158915042877197, "learning_rate": 0.0002767058823529412, "loss": 0.4464, "step": 135 }, { "epoch": 8.24, "grad_norm": 2.231661081314087, "learning_rate": 0.00027582352941176466, "loss": 0.4275, "step": 140 }, { "epoch": 8.24, "eval_loss": 0.3691316843032837, "eval_runtime": 1.9948, "eval_samples_per_second": 67.174, "eval_steps_per_second": 8.522, "step": 140 }, { "epoch": 8.53, "grad_norm": 2.341855525970459, "learning_rate": 0.0002749411764705882, "loss": 0.3938, "step": 145 }, { "epoch": 8.82, "grad_norm": 1.6353213787078857, "learning_rate": 0.00027405882352941175, "loss": 0.4794, "step": 150 }, { "epoch": 9.12, "grad_norm": 1.7900513410568237, "learning_rate": 0.0002731764705882353, "loss": 0.4073, "step": 155 }, { "epoch": 9.41, "grad_norm": 2.082562208175659, "learning_rate": 0.0002722941176470588, "loss": 0.3793, "step": 160 }, { "epoch": 9.41, "eval_loss": 0.3403892517089844, "eval_runtime": 1.988, "eval_samples_per_second": 67.405, "eval_steps_per_second": 8.551, "step": 160 }, { "epoch": 9.71, "grad_norm": 1.9803985357284546, "learning_rate": 0.0002714117647058823, "loss": 0.3943, "step": 165 }, { "epoch": 10.0, "grad_norm": 1.6986087560653687, "learning_rate": 0.00027052941176470587, "loss": 0.4254, "step": 170 }, { "epoch": 10.29, "grad_norm": 1.120859146118164, "learning_rate": 0.0002696470588235294, "loss": 0.3333, "step": 175 }, { "epoch": 10.59, "grad_norm": 2.3081905841827393, "learning_rate": 0.0002687647058823529, "loss": 0.3671, "step": 180 }, { "epoch": 10.59, "eval_loss": 0.32923150062561035, "eval_runtime": 1.9871, "eval_samples_per_second": 67.435, "eval_steps_per_second": 8.555, "step": 180 }, { "epoch": 10.88, "grad_norm": 3.328174352645874, "learning_rate": 0.00026788235294117644, "loss": 0.3888, "step": 185 }, { "epoch": 11.18, "grad_norm": 2.408024549484253, "learning_rate": 0.000267, "loss": 0.3964, "step": 190 }, { "epoch": 11.47, "grad_norm": 1.6264761686325073, "learning_rate": 0.00026611764705882353, "loss": 0.3579, "step": 195 }, { "epoch": 11.76, "grad_norm": 1.857405662536621, "learning_rate": 0.00026523529411764707, "loss": 0.375, "step": 200 }, { "epoch": 11.76, "eval_loss": 0.3141331374645233, "eval_runtime": 1.9899, "eval_samples_per_second": 67.341, "eval_steps_per_second": 8.543, "step": 200 }, { "epoch": 12.06, "grad_norm": 1.0782090425491333, "learning_rate": 0.00026435294117647056, "loss": 0.3808, "step": 205 }, { "epoch": 12.35, "grad_norm": 1.4318054914474487, "learning_rate": 0.0002634705882352941, "loss": 0.3148, "step": 210 }, { "epoch": 12.65, "grad_norm": 1.6774518489837646, "learning_rate": 0.0002625882352941176, "loss": 0.3729, "step": 215 }, { "epoch": 12.94, "grad_norm": 1.2252236604690552, "learning_rate": 0.00026170588235294113, "loss": 0.379, "step": 220 }, { "epoch": 12.94, "eval_loss": 0.3084084689617157, "eval_runtime": 1.9898, "eval_samples_per_second": 67.342, "eval_steps_per_second": 8.543, "step": 220 }, { "epoch": 13.24, "grad_norm": 0.9840493202209473, "learning_rate": 0.0002608235294117647, "loss": 0.3292, "step": 225 }, { "epoch": 13.53, "grad_norm": 2.3673970699310303, "learning_rate": 0.0002599411764705882, "loss": 0.3803, "step": 230 }, { "epoch": 13.82, "grad_norm": 1.2279037237167358, "learning_rate": 0.00025905882352941176, "loss": 0.3444, "step": 235 }, { "epoch": 14.12, "grad_norm": 0.928492546081543, "learning_rate": 0.00025817647058823525, "loss": 0.334, "step": 240 }, { "epoch": 14.12, "eval_loss": 0.30614903569221497, "eval_runtime": 1.9906, "eval_samples_per_second": 67.317, "eval_steps_per_second": 8.54, "step": 240 }, { "epoch": 14.41, "grad_norm": 1.6385815143585205, "learning_rate": 0.0002572941176470588, "loss": 0.309, "step": 245 }, { "epoch": 14.71, "grad_norm": 0.9644414782524109, "learning_rate": 0.00025641176470588234, "loss": 0.3318, "step": 250 }, { "epoch": 15.0, "grad_norm": 1.3966537714004517, "learning_rate": 0.0002555294117647059, "loss": 0.3974, "step": 255 }, { "epoch": 15.29, "grad_norm": 1.0618793964385986, "learning_rate": 0.0002546470588235294, "loss": 0.3027, "step": 260 }, { "epoch": 15.29, "eval_loss": 0.29958730936050415, "eval_runtime": 1.9894, "eval_samples_per_second": 67.357, "eval_steps_per_second": 8.545, "step": 260 }, { "epoch": 15.59, "grad_norm": 1.31105637550354, "learning_rate": 0.0002537647058823529, "loss": 0.3196, "step": 265 }, { "epoch": 15.88, "grad_norm": 1.4833605289459229, "learning_rate": 0.00025288235294117646, "loss": 0.3485, "step": 270 }, { "epoch": 16.18, "grad_norm": 0.8568434119224548, "learning_rate": 0.00025199999999999995, "loss": 0.3391, "step": 275 }, { "epoch": 16.47, "grad_norm": 1.2687785625457764, "learning_rate": 0.0002511176470588235, "loss": 0.3179, "step": 280 }, { "epoch": 16.47, "eval_loss": 0.29810982942581177, "eval_runtime": 1.9921, "eval_samples_per_second": 67.267, "eval_steps_per_second": 8.534, "step": 280 }, { "epoch": 16.76, "grad_norm": 1.125921368598938, "learning_rate": 0.00025023529411764703, "loss": 0.3231, "step": 285 }, { "epoch": 17.06, "grad_norm": 0.751727819442749, "learning_rate": 0.0002493529411764706, "loss": 0.349, "step": 290 }, { "epoch": 17.35, "grad_norm": 1.0114847421646118, "learning_rate": 0.0002484705882352941, "loss": 0.3027, "step": 295 }, { "epoch": 17.65, "grad_norm": 2.5540404319763184, "learning_rate": 0.0002475882352941176, "loss": 0.3021, "step": 300 }, { "epoch": 17.65, "eval_loss": 0.29670611023902893, "eval_runtime": 2.0093, "eval_samples_per_second": 66.689, "eval_steps_per_second": 8.461, "step": 300 }, { "epoch": 17.94, "grad_norm": 0.9592204689979553, "learning_rate": 0.00024670588235294115, "loss": 0.3553, "step": 305 }, { "epoch": 18.24, "grad_norm": 0.6442326307296753, "learning_rate": 0.0002458235294117647, "loss": 0.3101, "step": 310 }, { "epoch": 18.53, "grad_norm": 1.0425697565078735, "learning_rate": 0.00024494117647058824, "loss": 0.3155, "step": 315 }, { "epoch": 18.82, "grad_norm": 3.3176205158233643, "learning_rate": 0.00024405882352941172, "loss": 0.3634, "step": 320 }, { "epoch": 18.82, "eval_loss": 0.2934125065803528, "eval_runtime": 1.9921, "eval_samples_per_second": 67.267, "eval_steps_per_second": 8.534, "step": 320 }, { "epoch": 19.12, "grad_norm": 0.5256841778755188, "learning_rate": 0.00024317647058823527, "loss": 0.3197, "step": 325 }, { "epoch": 19.41, "grad_norm": 0.7443175315856934, "learning_rate": 0.00024229411764705878, "loss": 0.3117, "step": 330 }, { "epoch": 19.71, "grad_norm": 0.8534731864929199, "learning_rate": 0.00024141176470588233, "loss": 0.322, "step": 335 }, { "epoch": 20.0, "grad_norm": 1.1688783168792725, "learning_rate": 0.00024052941176470587, "loss": 0.3293, "step": 340 }, { "epoch": 20.0, "eval_loss": 0.2844869792461395, "eval_runtime": 1.9933, "eval_samples_per_second": 67.224, "eval_steps_per_second": 8.528, "step": 340 }, { "epoch": 20.29, "grad_norm": 0.8509626388549805, "learning_rate": 0.00023964705882352939, "loss": 0.2899, "step": 345 }, { "epoch": 20.59, "grad_norm": 1.0683010816574097, "learning_rate": 0.00023876470588235293, "loss": 0.3368, "step": 350 }, { "epoch": 20.88, "grad_norm": 0.8359882235527039, "learning_rate": 0.00023788235294117644, "loss": 0.3189, "step": 355 }, { "epoch": 21.18, "grad_norm": 0.746294379234314, "learning_rate": 0.000237, "loss": 0.2907, "step": 360 }, { "epoch": 21.18, "eval_loss": 0.28352469205856323, "eval_runtime": 1.9943, "eval_samples_per_second": 67.191, "eval_steps_per_second": 8.524, "step": 360 }, { "epoch": 21.47, "grad_norm": 0.6525380611419678, "learning_rate": 0.00023611764705882353, "loss": 0.3088, "step": 365 }, { "epoch": 21.76, "grad_norm": 0.7996423840522766, "learning_rate": 0.00023523529411764702, "loss": 0.3241, "step": 370 }, { "epoch": 22.06, "grad_norm": 0.6835091710090637, "learning_rate": 0.00023435294117647056, "loss": 0.3078, "step": 375 }, { "epoch": 22.35, "grad_norm": 0.7048420310020447, "learning_rate": 0.00023347058823529408, "loss": 0.3098, "step": 380 }, { "epoch": 22.35, "eval_loss": 0.28362563252449036, "eval_runtime": 1.9954, "eval_samples_per_second": 67.156, "eval_steps_per_second": 8.52, "step": 380 }, { "epoch": 22.65, "grad_norm": 0.7149125337600708, "learning_rate": 0.00023258823529411762, "loss": 0.2859, "step": 385 }, { "epoch": 22.94, "grad_norm": 0.8284432291984558, "learning_rate": 0.00023170588235294116, "loss": 0.3193, "step": 390 }, { "epoch": 23.24, "grad_norm": 0.7711451053619385, "learning_rate": 0.00023082352941176468, "loss": 0.2894, "step": 395 }, { "epoch": 23.53, "grad_norm": 0.7395182847976685, "learning_rate": 0.00022994117647058822, "loss": 0.2911, "step": 400 }, { "epoch": 23.53, "eval_loss": 0.28144964575767517, "eval_runtime": 1.9967, "eval_samples_per_second": 67.11, "eval_steps_per_second": 8.514, "step": 400 }, { "epoch": 23.82, "grad_norm": 0.694525957107544, "learning_rate": 0.00022905882352941174, "loss": 0.3048, "step": 405 }, { "epoch": 24.12, "grad_norm": 0.6153783798217773, "learning_rate": 0.00022817647058823528, "loss": 0.2975, "step": 410 }, { "epoch": 24.41, "grad_norm": 0.7303423285484314, "learning_rate": 0.00022729411764705883, "loss": 0.2838, "step": 415 }, { "epoch": 24.71, "grad_norm": 0.6618799567222595, "learning_rate": 0.00022641176470588234, "loss": 0.326, "step": 420 }, { "epoch": 24.71, "eval_loss": 0.28115326166152954, "eval_runtime": 1.9929, "eval_samples_per_second": 67.238, "eval_steps_per_second": 8.53, "step": 420 }, { "epoch": 25.0, "grad_norm": 0.8111241459846497, "learning_rate": 0.00022552941176470586, "loss": 0.3084, "step": 425 }, { "epoch": 25.29, "grad_norm": 0.6205019354820251, "learning_rate": 0.00022464705882352937, "loss": 0.2873, "step": 430 }, { "epoch": 25.59, "grad_norm": 0.8169652223587036, "learning_rate": 0.00022376470588235292, "loss": 0.2965, "step": 435 }, { "epoch": 25.88, "grad_norm": 0.9366277456283569, "learning_rate": 0.00022288235294117643, "loss": 0.3002, "step": 440 }, { "epoch": 25.88, "eval_loss": 0.27707499265670776, "eval_runtime": 1.9949, "eval_samples_per_second": 67.171, "eval_steps_per_second": 8.522, "step": 440 }, { "epoch": 26.18, "grad_norm": 0.7006655931472778, "learning_rate": 0.00022199999999999998, "loss": 0.3185, "step": 445 }, { "epoch": 26.47, "grad_norm": 0.6538874506950378, "learning_rate": 0.00022111764705882352, "loss": 0.2813, "step": 450 }, { "epoch": 26.76, "grad_norm": 0.8294863104820251, "learning_rate": 0.00022023529411764703, "loss": 0.2989, "step": 455 }, { "epoch": 27.06, "grad_norm": 0.6035735011100769, "learning_rate": 0.00021935294117647058, "loss": 0.2996, "step": 460 }, { "epoch": 27.06, "eval_loss": 0.27492547035217285, "eval_runtime": 1.9946, "eval_samples_per_second": 67.183, "eval_steps_per_second": 8.523, "step": 460 }, { "epoch": 27.35, "grad_norm": 0.44762280583381653, "learning_rate": 0.00021847058823529412, "loss": 0.2766, "step": 465 }, { "epoch": 27.65, "grad_norm": 0.6129966974258423, "learning_rate": 0.00021758823529411764, "loss": 0.2849, "step": 470 }, { "epoch": 27.94, "grad_norm": 1.0713130235671997, "learning_rate": 0.00021670588235294118, "loss": 0.3428, "step": 475 }, { "epoch": 28.24, "grad_norm": 0.5866985321044922, "learning_rate": 0.00021582352941176467, "loss": 0.2759, "step": 480 }, { "epoch": 28.24, "eval_loss": 0.27579402923583984, "eval_runtime": 1.9947, "eval_samples_per_second": 67.176, "eval_steps_per_second": 8.522, "step": 480 }, { "epoch": 28.53, "grad_norm": 0.5810605883598328, "learning_rate": 0.0002149411764705882, "loss": 0.2954, "step": 485 }, { "epoch": 28.82, "grad_norm": 0.6972721815109253, "learning_rate": 0.00021405882352941173, "loss": 0.3118, "step": 490 }, { "epoch": 29.12, "grad_norm": 0.5709840059280396, "learning_rate": 0.00021317647058823527, "loss": 0.2901, "step": 495 }, { "epoch": 29.41, "grad_norm": 0.5941194295883179, "learning_rate": 0.0002122941176470588, "loss": 0.3009, "step": 500 }, { "epoch": 29.41, "eval_loss": 0.27494505047798157, "eval_runtime": 1.9954, "eval_samples_per_second": 67.154, "eval_steps_per_second": 8.52, "step": 500 }, { "epoch": 29.71, "grad_norm": 0.5286316275596619, "learning_rate": 0.00021141176470588233, "loss": 0.2753, "step": 505 }, { "epoch": 30.0, "grad_norm": 0.6919939517974854, "learning_rate": 0.00021052941176470587, "loss": 0.3041, "step": 510 }, { "epoch": 30.29, "grad_norm": 0.7751806974411011, "learning_rate": 0.0002096470588235294, "loss": 0.2994, "step": 515 }, { "epoch": 30.59, "grad_norm": 0.6728924512863159, "learning_rate": 0.00020876470588235293, "loss": 0.2758, "step": 520 }, { "epoch": 30.59, "eval_loss": 0.2755880057811737, "eval_runtime": 1.9941, "eval_samples_per_second": 67.197, "eval_steps_per_second": 8.525, "step": 520 }, { "epoch": 30.88, "grad_norm": 0.6528185606002808, "learning_rate": 0.00020788235294117647, "loss": 0.3003, "step": 525 }, { "epoch": 31.18, "grad_norm": 0.6421308517456055, "learning_rate": 0.00020699999999999996, "loss": 0.2887, "step": 530 }, { "epoch": 31.47, "grad_norm": 0.8102923631668091, "learning_rate": 0.0002061176470588235, "loss": 0.2871, "step": 535 }, { "epoch": 31.76, "grad_norm": 0.6277897357940674, "learning_rate": 0.00020523529411764702, "loss": 0.2909, "step": 540 }, { "epoch": 31.76, "eval_loss": 0.27382102608680725, "eval_runtime": 1.9939, "eval_samples_per_second": 67.207, "eval_steps_per_second": 8.526, "step": 540 }, { "epoch": 32.06, "grad_norm": 0.6897189021110535, "learning_rate": 0.00020435294117647056, "loss": 0.3024, "step": 545 }, { "epoch": 32.35, "grad_norm": 0.4933074712753296, "learning_rate": 0.0002034705882352941, "loss": 0.2617, "step": 550 }, { "epoch": 32.65, "grad_norm": 0.6854521632194519, "learning_rate": 0.00020258823529411762, "loss": 0.2882, "step": 555 }, { "epoch": 32.94, "grad_norm": 0.6881256103515625, "learning_rate": 0.00020170588235294117, "loss": 0.3115, "step": 560 }, { "epoch": 32.94, "eval_loss": 0.274933785200119, "eval_runtime": 1.9951, "eval_samples_per_second": 67.166, "eval_steps_per_second": 8.521, "step": 560 }, { "epoch": 33.24, "grad_norm": 0.6826930642127991, "learning_rate": 0.00020082352941176468, "loss": 0.3037, "step": 565 }, { "epoch": 33.53, "grad_norm": 0.624535322189331, "learning_rate": 0.00019994117647058823, "loss": 0.2821, "step": 570 }, { "epoch": 33.82, "grad_norm": 0.6433701515197754, "learning_rate": 0.00019905882352941177, "loss": 0.2946, "step": 575 }, { "epoch": 34.12, "grad_norm": 0.5257152318954468, "learning_rate": 0.00019817647058823528, "loss": 0.2963, "step": 580 }, { "epoch": 34.12, "eval_loss": 0.2734356224536896, "eval_runtime": 1.9971, "eval_samples_per_second": 67.096, "eval_steps_per_second": 8.512, "step": 580 }, { "epoch": 34.41, "grad_norm": 0.5631464719772339, "learning_rate": 0.0001972941176470588, "loss": 0.2992, "step": 585 }, { "epoch": 34.71, "grad_norm": 0.6825907230377197, "learning_rate": 0.00019641176470588232, "loss": 0.2819, "step": 590 }, { "epoch": 35.0, "grad_norm": 0.7912545204162598, "learning_rate": 0.00019552941176470586, "loss": 0.3009, "step": 595 }, { "epoch": 35.29, "grad_norm": 0.6068854928016663, "learning_rate": 0.0001946470588235294, "loss": 0.2938, "step": 600 }, { "epoch": 35.29, "eval_loss": 0.2715306282043457, "eval_runtime": 1.997, "eval_samples_per_second": 67.101, "eval_steps_per_second": 8.513, "step": 600 }, { "epoch": 35.59, "grad_norm": 0.6175413131713867, "learning_rate": 0.00019376470588235292, "loss": 0.2853, "step": 605 }, { "epoch": 35.88, "grad_norm": 0.6325289011001587, "learning_rate": 0.00019288235294117646, "loss": 0.2825, "step": 610 }, { "epoch": 36.18, "grad_norm": 0.5814202427864075, "learning_rate": 0.00019199999999999998, "loss": 0.2785, "step": 615 }, { "epoch": 36.47, "grad_norm": 0.6043010950088501, "learning_rate": 0.00019111764705882352, "loss": 0.2707, "step": 620 }, { "epoch": 36.47, "eval_loss": 0.27390608191490173, "eval_runtime": 1.9949, "eval_samples_per_second": 67.173, "eval_steps_per_second": 8.522, "step": 620 }, { "epoch": 36.76, "grad_norm": 0.7618833780288696, "learning_rate": 0.00019023529411764706, "loss": 0.3051, "step": 625 }, { "epoch": 37.06, "grad_norm": 0.49574750661849976, "learning_rate": 0.00018935294117647058, "loss": 0.3009, "step": 630 }, { "epoch": 37.35, "grad_norm": 0.6526892185211182, "learning_rate": 0.00018847058823529412, "loss": 0.2764, "step": 635 }, { "epoch": 37.65, "grad_norm": 0.674574613571167, "learning_rate": 0.0001875882352941176, "loss": 0.2903, "step": 640 }, { "epoch": 37.65, "eval_loss": 0.27166232466697693, "eval_runtime": 1.9963, "eval_samples_per_second": 67.125, "eval_steps_per_second": 8.516, "step": 640 }, { "epoch": 37.94, "grad_norm": 0.7739295363426208, "learning_rate": 0.00018670588235294115, "loss": 0.295, "step": 645 }, { "epoch": 38.24, "grad_norm": 0.5252580642700195, "learning_rate": 0.00018582352941176467, "loss": 0.2616, "step": 650 }, { "epoch": 38.53, "grad_norm": 0.8815963268280029, "learning_rate": 0.00018494117647058821, "loss": 0.3151, "step": 655 }, { "epoch": 38.82, "grad_norm": 0.6178475618362427, "learning_rate": 0.00018405882352941176, "loss": 0.2902, "step": 660 }, { "epoch": 38.82, "eval_loss": 0.27082693576812744, "eval_runtime": 1.995, "eval_samples_per_second": 67.167, "eval_steps_per_second": 8.521, "step": 660 }, { "epoch": 39.12, "grad_norm": 0.6863218545913696, "learning_rate": 0.00018317647058823527, "loss": 0.2846, "step": 665 }, { "epoch": 39.41, "grad_norm": 0.5531587600708008, "learning_rate": 0.00018229411764705882, "loss": 0.2857, "step": 670 }, { "epoch": 39.71, "grad_norm": 0.580801248550415, "learning_rate": 0.00018141176470588236, "loss": 0.2876, "step": 675 }, { "epoch": 40.0, "grad_norm": 0.559680163860321, "learning_rate": 0.00018052941176470587, "loss": 0.2825, "step": 680 }, { "epoch": 40.0, "eval_loss": 0.27086344361305237, "eval_runtime": 2.0106, "eval_samples_per_second": 66.647, "eval_steps_per_second": 8.455, "step": 680 }, { "epoch": 40.29, "grad_norm": 0.4894808232784271, "learning_rate": 0.00017964705882352942, "loss": 0.2762, "step": 685 }, { "epoch": 40.59, "grad_norm": 0.4080250859260559, "learning_rate": 0.0001787647058823529, "loss": 0.2552, "step": 690 }, { "epoch": 40.88, "grad_norm": 0.7825730443000793, "learning_rate": 0.00017788235294117645, "loss": 0.3105, "step": 695 }, { "epoch": 41.18, "grad_norm": 0.6776964068412781, "learning_rate": 0.00017699999999999997, "loss": 0.2815, "step": 700 }, { "epoch": 41.18, "eval_loss": 0.2690856158733368, "eval_runtime": 1.9978, "eval_samples_per_second": 67.073, "eval_steps_per_second": 8.509, "step": 700 }, { "epoch": 41.47, "grad_norm": 0.7837787866592407, "learning_rate": 0.0001761176470588235, "loss": 0.2957, "step": 705 }, { "epoch": 41.76, "grad_norm": 0.63068687915802, "learning_rate": 0.00017523529411764705, "loss": 0.2904, "step": 710 }, { "epoch": 42.06, "grad_norm": 0.6573776602745056, "learning_rate": 0.00017435294117647057, "loss": 0.2979, "step": 715 }, { "epoch": 42.35, "grad_norm": 0.6143296360969543, "learning_rate": 0.0001734705882352941, "loss": 0.2618, "step": 720 }, { "epoch": 42.35, "eval_loss": 0.2710973918437958, "eval_runtime": 1.995, "eval_samples_per_second": 67.167, "eval_steps_per_second": 8.521, "step": 720 }, { "epoch": 42.65, "grad_norm": 0.5681264400482178, "learning_rate": 0.00017258823529411763, "loss": 0.2918, "step": 725 }, { "epoch": 42.94, "grad_norm": 0.8199780583381653, "learning_rate": 0.00017170588235294117, "loss": 0.291, "step": 730 }, { "epoch": 43.24, "grad_norm": 0.5809566378593445, "learning_rate": 0.0001708235294117647, "loss": 0.287, "step": 735 }, { "epoch": 43.53, "grad_norm": 0.7246254682540894, "learning_rate": 0.00016994117647058823, "loss": 0.2792, "step": 740 }, { "epoch": 43.53, "eval_loss": 0.2726564109325409, "eval_runtime": 1.9952, "eval_samples_per_second": 67.162, "eval_steps_per_second": 8.521, "step": 740 }, { "epoch": 43.82, "grad_norm": 0.6743197441101074, "learning_rate": 0.00016905882352941174, "loss": 0.2871, "step": 745 }, { "epoch": 44.12, "grad_norm": 0.6329538822174072, "learning_rate": 0.00016817647058823526, "loss": 0.269, "step": 750 }, { "epoch": 44.41, "grad_norm": 0.6352816820144653, "learning_rate": 0.0001672941176470588, "loss": 0.2671, "step": 755 }, { "epoch": 44.71, "grad_norm": 0.6283583045005798, "learning_rate": 0.00016641176470588235, "loss": 0.2907, "step": 760 }, { "epoch": 44.71, "eval_loss": 0.27082738280296326, "eval_runtime": 1.9938, "eval_samples_per_second": 67.208, "eval_steps_per_second": 8.526, "step": 760 }, { "epoch": 45.0, "grad_norm": 0.658597469329834, "learning_rate": 0.00016552941176470586, "loss": 0.3013, "step": 765 }, { "epoch": 45.29, "grad_norm": 0.4039109945297241, "learning_rate": 0.0001646470588235294, "loss": 0.2666, "step": 770 }, { "epoch": 45.59, "grad_norm": 0.5540700554847717, "learning_rate": 0.00016376470588235292, "loss": 0.3015, "step": 775 }, { "epoch": 45.88, "grad_norm": 0.7095409035682678, "learning_rate": 0.00016288235294117646, "loss": 0.2822, "step": 780 }, { "epoch": 45.88, "eval_loss": 0.26980897784233093, "eval_runtime": 1.9956, "eval_samples_per_second": 67.148, "eval_steps_per_second": 8.519, "step": 780 }, { "epoch": 46.18, "grad_norm": 0.5550056099891663, "learning_rate": 0.000162, "loss": 0.2906, "step": 785 }, { "epoch": 46.47, "grad_norm": 0.5746665000915527, "learning_rate": 0.00016111764705882352, "loss": 0.2841, "step": 790 }, { "epoch": 46.76, "grad_norm": 0.6271633505821228, "learning_rate": 0.00016023529411764704, "loss": 0.2783, "step": 795 }, { "epoch": 47.06, "grad_norm": 0.6846187710762024, "learning_rate": 0.00015935294117647056, "loss": 0.2969, "step": 800 }, { "epoch": 47.06, "eval_loss": 0.268819659948349, "eval_runtime": 1.9933, "eval_samples_per_second": 67.226, "eval_steps_per_second": 8.529, "step": 800 }, { "epoch": 47.35, "grad_norm": 0.4498227834701538, "learning_rate": 0.0001584705882352941, "loss": 0.2682, "step": 805 }, { "epoch": 47.65, "grad_norm": 0.4453158676624298, "learning_rate": 0.00015758823529411761, "loss": 0.2835, "step": 810 }, { "epoch": 47.94, "grad_norm": 0.5814605355262756, "learning_rate": 0.00015670588235294116, "loss": 0.2936, "step": 815 }, { "epoch": 48.24, "grad_norm": 0.5212646722793579, "learning_rate": 0.0001558235294117647, "loss": 0.2578, "step": 820 }, { "epoch": 48.24, "eval_loss": 0.26919475197792053, "eval_runtime": 1.9948, "eval_samples_per_second": 67.174, "eval_steps_per_second": 8.522, "step": 820 }, { "epoch": 48.53, "grad_norm": 0.5765751004219055, "learning_rate": 0.00015494117647058822, "loss": 0.2831, "step": 825 }, { "epoch": 48.82, "grad_norm": 0.719187319278717, "learning_rate": 0.00015405882352941176, "loss": 0.3037, "step": 830 }, { "epoch": 49.12, "grad_norm": 0.5635544061660767, "learning_rate": 0.0001531764705882353, "loss": 0.2855, "step": 835 }, { "epoch": 49.41, "grad_norm": 0.6977357864379883, "learning_rate": 0.00015229411764705882, "loss": 0.2715, "step": 840 }, { "epoch": 49.41, "eval_loss": 0.2697232961654663, "eval_runtime": 1.9956, "eval_samples_per_second": 67.147, "eval_steps_per_second": 8.519, "step": 840 }, { "epoch": 49.71, "grad_norm": 0.5384779572486877, "learning_rate": 0.00015141176470588236, "loss": 0.2872, "step": 845 }, { "epoch": 50.0, "grad_norm": 0.820724606513977, "learning_rate": 0.00015052941176470585, "loss": 0.2855, "step": 850 }, { "epoch": 50.29, "grad_norm": 0.6426348686218262, "learning_rate": 0.0001496470588235294, "loss": 0.2732, "step": 855 }, { "epoch": 50.59, "grad_norm": 0.49072563648223877, "learning_rate": 0.00014876470588235294, "loss": 0.2791, "step": 860 }, { "epoch": 50.59, "eval_loss": 0.27140939235687256, "eval_runtime": 1.9964, "eval_samples_per_second": 67.122, "eval_steps_per_second": 8.516, "step": 860 }, { "epoch": 50.88, "grad_norm": 0.6572851538658142, "learning_rate": 0.00014788235294117645, "loss": 0.2788, "step": 865 }, { "epoch": 51.18, "grad_norm": 0.5061244368553162, "learning_rate": 0.000147, "loss": 0.2984, "step": 870 }, { "epoch": 51.47, "grad_norm": 0.6135215163230896, "learning_rate": 0.0001461176470588235, "loss": 0.2914, "step": 875 }, { "epoch": 51.76, "grad_norm": 0.556154191493988, "learning_rate": 0.00014523529411764705, "loss": 0.2746, "step": 880 }, { "epoch": 51.76, "eval_loss": 0.2693510055541992, "eval_runtime": 1.9947, "eval_samples_per_second": 67.177, "eval_steps_per_second": 8.522, "step": 880 }, { "epoch": 52.06, "grad_norm": 0.4937208592891693, "learning_rate": 0.00014435294117647057, "loss": 0.2796, "step": 885 }, { "epoch": 52.35, "grad_norm": 0.5362540483474731, "learning_rate": 0.00014347058823529409, "loss": 0.2741, "step": 890 }, { "epoch": 52.65, "grad_norm": 0.4936700761318207, "learning_rate": 0.00014258823529411763, "loss": 0.2876, "step": 895 }, { "epoch": 52.94, "grad_norm": 0.6930960416793823, "learning_rate": 0.00014170588235294117, "loss": 0.2916, "step": 900 }, { "epoch": 52.94, "eval_loss": 0.2691669166088104, "eval_runtime": 1.9969, "eval_samples_per_second": 67.104, "eval_steps_per_second": 8.513, "step": 900 }, { "epoch": 53.24, "grad_norm": 0.5722822546958923, "learning_rate": 0.0001408235294117647, "loss": 0.2753, "step": 905 }, { "epoch": 53.53, "grad_norm": 0.4354608952999115, "learning_rate": 0.00013994117647058823, "loss": 0.2508, "step": 910 }, { "epoch": 53.82, "grad_norm": 0.7151679396629333, "learning_rate": 0.00013905882352941175, "loss": 0.3153, "step": 915 }, { "epoch": 54.12, "grad_norm": 0.5432928204536438, "learning_rate": 0.0001381764705882353, "loss": 0.295, "step": 920 }, { "epoch": 54.12, "eval_loss": 0.26740530133247375, "eval_runtime": 1.9964, "eval_samples_per_second": 67.121, "eval_steps_per_second": 8.515, "step": 920 }, { "epoch": 54.41, "grad_norm": 0.50511234998703, "learning_rate": 0.0001372941176470588, "loss": 0.2678, "step": 925 }, { "epoch": 54.71, "grad_norm": 0.7931966781616211, "learning_rate": 0.00013641176470588235, "loss": 0.2921, "step": 930 }, { "epoch": 55.0, "grad_norm": 1.1295318603515625, "learning_rate": 0.00013552941176470587, "loss": 0.2874, "step": 935 }, { "epoch": 55.29, "grad_norm": 0.5579237341880798, "learning_rate": 0.0001346470588235294, "loss": 0.2781, "step": 940 }, { "epoch": 55.29, "eval_loss": 0.2684433162212372, "eval_runtime": 1.9968, "eval_samples_per_second": 67.108, "eval_steps_per_second": 8.514, "step": 940 }, { "epoch": 55.59, "grad_norm": 0.516738772392273, "learning_rate": 0.00013376470588235292, "loss": 0.2622, "step": 945 }, { "epoch": 55.88, "grad_norm": 0.6151651740074158, "learning_rate": 0.00013288235294117647, "loss": 0.2957, "step": 950 }, { "epoch": 56.18, "grad_norm": 0.7047178149223328, "learning_rate": 0.00013199999999999998, "loss": 0.2893, "step": 955 }, { "epoch": 56.47, "grad_norm": 0.5808413624763489, "learning_rate": 0.00013111764705882353, "loss": 0.2727, "step": 960 }, { "epoch": 56.47, "eval_loss": 0.2685576379299164, "eval_runtime": 1.994, "eval_samples_per_second": 67.203, "eval_steps_per_second": 8.526, "step": 960 }, { "epoch": 56.76, "grad_norm": 0.6171568632125854, "learning_rate": 0.00013023529411764704, "loss": 0.2781, "step": 965 }, { "epoch": 57.06, "grad_norm": 0.5795710682868958, "learning_rate": 0.00012935294117647056, "loss": 0.2989, "step": 970 }, { "epoch": 57.35, "grad_norm": 0.4831099510192871, "learning_rate": 0.0001284705882352941, "loss": 0.2719, "step": 975 }, { "epoch": 57.65, "grad_norm": 0.6539974808692932, "learning_rate": 0.00012758823529411764, "loss": 0.2783, "step": 980 }, { "epoch": 57.65, "eval_loss": 0.2687915861606598, "eval_runtime": 1.9961, "eval_samples_per_second": 67.13, "eval_steps_per_second": 8.516, "step": 980 }, { "epoch": 57.94, "grad_norm": 0.6757897734642029, "learning_rate": 0.00012670588235294116, "loss": 0.2885, "step": 985 }, { "epoch": 58.24, "grad_norm": 0.5477202534675598, "learning_rate": 0.0001258235294117647, "loss": 0.2931, "step": 990 }, { "epoch": 58.53, "grad_norm": 0.6801700592041016, "learning_rate": 0.00012494117647058822, "loss": 0.2692, "step": 995 }, { "epoch": 58.82, "grad_norm": 0.5633025765419006, "learning_rate": 0.00012405882352941176, "loss": 0.2747, "step": 1000 }, { "epoch": 58.82, "eval_loss": 0.269478976726532, "eval_runtime": 1.9946, "eval_samples_per_second": 67.181, "eval_steps_per_second": 8.523, "step": 1000 }, { "epoch": 59.12, "grad_norm": 0.6582775115966797, "learning_rate": 0.00012317647058823528, "loss": 0.2863, "step": 1005 }, { "epoch": 59.41, "grad_norm": 0.6495881676673889, "learning_rate": 0.00012229411764705882, "loss": 0.2797, "step": 1010 }, { "epoch": 59.71, "grad_norm": 0.5010665655136108, "learning_rate": 0.00012141176470588235, "loss": 0.2832, "step": 1015 }, { "epoch": 60.0, "grad_norm": 0.5570430755615234, "learning_rate": 0.00012052941176470588, "loss": 0.2755, "step": 1020 }, { "epoch": 60.0, "eval_loss": 0.26759082078933716, "eval_runtime": 1.9963, "eval_samples_per_second": 67.125, "eval_steps_per_second": 8.516, "step": 1020 }, { "epoch": 60.29, "grad_norm": 0.5243686437606812, "learning_rate": 0.0001196470588235294, "loss": 0.2639, "step": 1025 }, { "epoch": 60.59, "grad_norm": 0.598508358001709, "learning_rate": 0.00011876470588235293, "loss": 0.2876, "step": 1030 }, { "epoch": 60.88, "grad_norm": 0.595809280872345, "learning_rate": 0.00011788235294117645, "loss": 0.2703, "step": 1035 }, { "epoch": 61.18, "grad_norm": 0.6807280778884888, "learning_rate": 0.000117, "loss": 0.2889, "step": 1040 }, { "epoch": 61.18, "eval_loss": 0.267448753118515, "eval_runtime": 1.997, "eval_samples_per_second": 67.102, "eval_steps_per_second": 8.513, "step": 1040 }, { "epoch": 61.47, "grad_norm": 0.4826294183731079, "learning_rate": 0.00011611764705882353, "loss": 0.2689, "step": 1045 }, { "epoch": 61.76, "grad_norm": 0.5328644514083862, "learning_rate": 0.00011523529411764704, "loss": 0.2928, "step": 1050 }, { "epoch": 62.06, "grad_norm": 0.6303868889808655, "learning_rate": 0.00011435294117647057, "loss": 0.2778, "step": 1055 }, { "epoch": 62.35, "grad_norm": 0.6396406292915344, "learning_rate": 0.0001134705882352941, "loss": 0.2898, "step": 1060 }, { "epoch": 62.35, "eval_loss": 0.26878321170806885, "eval_runtime": 2.0027, "eval_samples_per_second": 66.908, "eval_steps_per_second": 8.488, "step": 1060 }, { "epoch": 62.65, "grad_norm": 0.6628149151802063, "learning_rate": 0.00011258823529411765, "loss": 0.2777, "step": 1065 }, { "epoch": 62.94, "grad_norm": 0.5486224293708801, "learning_rate": 0.00011170588235294117, "loss": 0.2738, "step": 1070 }, { "epoch": 63.24, "grad_norm": 0.5262264609336853, "learning_rate": 0.00011082352941176469, "loss": 0.2782, "step": 1075 }, { "epoch": 63.53, "grad_norm": 0.5544824600219727, "learning_rate": 0.00010994117647058822, "loss": 0.2603, "step": 1080 }, { "epoch": 63.53, "eval_loss": 0.268815815448761, "eval_runtime": 1.9965, "eval_samples_per_second": 67.116, "eval_steps_per_second": 8.515, "step": 1080 }, { "epoch": 63.82, "grad_norm": 0.5544533729553223, "learning_rate": 0.00010905882352941175, "loss": 0.2955, "step": 1085 }, { "epoch": 64.12, "grad_norm": 0.5809686183929443, "learning_rate": 0.00010817647058823529, "loss": 0.2811, "step": 1090 }, { "epoch": 64.41, "grad_norm": 0.4934030771255493, "learning_rate": 0.00010729411764705882, "loss": 0.2832, "step": 1095 }, { "epoch": 64.71, "grad_norm": 0.6920121312141418, "learning_rate": 0.00010641176470588235, "loss": 0.2655, "step": 1100 }, { "epoch": 64.71, "eval_loss": 0.2684324383735657, "eval_runtime": 1.996, "eval_samples_per_second": 67.134, "eval_steps_per_second": 8.517, "step": 1100 }, { "epoch": 65.0, "grad_norm": 0.8359130620956421, "learning_rate": 0.00010552941176470587, "loss": 0.2858, "step": 1105 }, { "epoch": 65.29, "grad_norm": 0.47680842876434326, "learning_rate": 0.0001046470588235294, "loss": 0.256, "step": 1110 }, { "epoch": 65.59, "grad_norm": 0.5028588771820068, "learning_rate": 0.00010376470588235293, "loss": 0.2834, "step": 1115 }, { "epoch": 65.88, "grad_norm": 0.7377525568008423, "learning_rate": 0.00010288235294117647, "loss": 0.3062, "step": 1120 }, { "epoch": 65.88, "eval_loss": 0.2679174244403839, "eval_runtime": 1.996, "eval_samples_per_second": 67.134, "eval_steps_per_second": 8.517, "step": 1120 }, { "epoch": 66.18, "grad_norm": 0.5834652185440063, "learning_rate": 0.000102, "loss": 0.2754, "step": 1125 }, { "epoch": 66.47, "grad_norm": 0.5451943874359131, "learning_rate": 0.00010111764705882352, "loss": 0.2775, "step": 1130 }, { "epoch": 66.76, "grad_norm": 0.7877181768417358, "learning_rate": 0.00010023529411764704, "loss": 0.2968, "step": 1135 }, { "epoch": 67.06, "grad_norm": 0.706478476524353, "learning_rate": 9.935294117647057e-05, "loss": 0.2679, "step": 1140 }, { "epoch": 67.06, "eval_loss": 0.26736557483673096, "eval_runtime": 1.9963, "eval_samples_per_second": 67.124, "eval_steps_per_second": 8.516, "step": 1140 }, { "epoch": 67.35, "grad_norm": 0.5112646818161011, "learning_rate": 9.847058823529412e-05, "loss": 0.2893, "step": 1145 }, { "epoch": 67.65, "grad_norm": 0.5195066332817078, "learning_rate": 9.758823529411765e-05, "loss": 0.26, "step": 1150 }, { "epoch": 67.94, "grad_norm": 0.7363670468330383, "learning_rate": 9.670588235294116e-05, "loss": 0.2879, "step": 1155 }, { "epoch": 68.24, "grad_norm": 0.5193796753883362, "learning_rate": 9.582352941176469e-05, "loss": 0.2792, "step": 1160 }, { "epoch": 68.24, "eval_loss": 0.26693105697631836, "eval_runtime": 1.9965, "eval_samples_per_second": 67.117, "eval_steps_per_second": 8.515, "step": 1160 }, { "epoch": 68.53, "grad_norm": 0.5236190557479858, "learning_rate": 9.494117647058822e-05, "loss": 0.2693, "step": 1165 }, { "epoch": 68.82, "grad_norm": 0.5888519287109375, "learning_rate": 9.405882352941176e-05, "loss": 0.3004, "step": 1170 }, { "epoch": 69.12, "grad_norm": 0.45090892910957336, "learning_rate": 9.31764705882353e-05, "loss": 0.2667, "step": 1175 }, { "epoch": 69.41, "grad_norm": 0.553806483745575, "learning_rate": 9.229411764705881e-05, "loss": 0.2606, "step": 1180 }, { "epoch": 69.41, "eval_loss": 0.26794832944869995, "eval_runtime": 1.9956, "eval_samples_per_second": 67.149, "eval_steps_per_second": 8.519, "step": 1180 }, { "epoch": 69.71, "grad_norm": 0.5653892755508423, "learning_rate": 9.141176470588234e-05, "loss": 0.2884, "step": 1185 }, { "epoch": 70.0, "grad_norm": 0.6985815167427063, "learning_rate": 9.052941176470587e-05, "loss": 0.2787, "step": 1190 }, { "epoch": 70.29, "grad_norm": 0.6280454397201538, "learning_rate": 8.96470588235294e-05, "loss": 0.2744, "step": 1195 }, { "epoch": 70.59, "grad_norm": 0.669928252696991, "learning_rate": 8.876470588235294e-05, "loss": 0.2762, "step": 1200 }, { "epoch": 70.59, "eval_loss": 0.26804956793785095, "eval_runtime": 1.9958, "eval_samples_per_second": 67.141, "eval_steps_per_second": 8.518, "step": 1200 }, { "epoch": 70.88, "grad_norm": 0.5315139293670654, "learning_rate": 8.788235294117647e-05, "loss": 0.2856, "step": 1205 }, { "epoch": 71.18, "grad_norm": 0.5895591378211975, "learning_rate": 8.699999999999999e-05, "loss": 0.2916, "step": 1210 }, { "epoch": 71.47, "grad_norm": 0.484454870223999, "learning_rate": 8.611764705882352e-05, "loss": 0.255, "step": 1215 }, { "epoch": 71.76, "grad_norm": 0.624661922454834, "learning_rate": 8.523529411764705e-05, "loss": 0.3034, "step": 1220 }, { "epoch": 71.76, "eval_loss": 0.2677695155143738, "eval_runtime": 1.9953, "eval_samples_per_second": 67.159, "eval_steps_per_second": 8.52, "step": 1220 }, { "epoch": 72.06, "grad_norm": 0.46470680832862854, "learning_rate": 8.435294117647059e-05, "loss": 0.2518, "step": 1225 }, { "epoch": 72.35, "grad_norm": 0.5925688147544861, "learning_rate": 8.347058823529412e-05, "loss": 0.2665, "step": 1230 }, { "epoch": 72.65, "grad_norm": 0.5845850706100464, "learning_rate": 8.258823529411763e-05, "loss": 0.2842, "step": 1235 }, { "epoch": 72.94, "grad_norm": 0.7143939137458801, "learning_rate": 8.170588235294116e-05, "loss": 0.2874, "step": 1240 }, { "epoch": 72.94, "eval_loss": 0.26679080724716187, "eval_runtime": 1.996, "eval_samples_per_second": 67.134, "eval_steps_per_second": 8.517, "step": 1240 }, { "epoch": 73.24, "grad_norm": 0.7805367708206177, "learning_rate": 8.08235294117647e-05, "loss": 0.2572, "step": 1245 }, { "epoch": 73.53, "grad_norm": 0.6749709844589233, "learning_rate": 7.994117647058824e-05, "loss": 0.2832, "step": 1250 }, { "epoch": 73.82, "grad_norm": 0.618565559387207, "learning_rate": 7.905882352941177e-05, "loss": 0.2604, "step": 1255 }, { "epoch": 74.12, "grad_norm": 0.5196065306663513, "learning_rate": 7.817647058823528e-05, "loss": 0.3091, "step": 1260 }, { "epoch": 74.12, "eval_loss": 0.2668515741825104, "eval_runtime": 1.996, "eval_samples_per_second": 67.133, "eval_steps_per_second": 8.517, "step": 1260 }, { "epoch": 74.41, "grad_norm": 0.6182024478912354, "learning_rate": 7.729411764705881e-05, "loss": 0.2786, "step": 1265 }, { "epoch": 74.71, "grad_norm": 0.7030547857284546, "learning_rate": 7.641176470588234e-05, "loss": 0.2864, "step": 1270 }, { "epoch": 75.0, "grad_norm": 0.48524388670921326, "learning_rate": 7.552941176470588e-05, "loss": 0.2613, "step": 1275 }, { "epoch": 75.29, "grad_norm": 0.5933300256729126, "learning_rate": 7.46470588235294e-05, "loss": 0.2954, "step": 1280 }, { "epoch": 75.29, "eval_loss": 0.2671739161014557, "eval_runtime": 1.9942, "eval_samples_per_second": 67.196, "eval_steps_per_second": 8.525, "step": 1280 }, { "epoch": 75.59, "grad_norm": 0.6050323843955994, "learning_rate": 7.376470588235293e-05, "loss": 0.2572, "step": 1285 }, { "epoch": 75.88, "grad_norm": 0.5846253633499146, "learning_rate": 7.288235294117647e-05, "loss": 0.268, "step": 1290 }, { "epoch": 76.18, "grad_norm": 0.5491711497306824, "learning_rate": 7.199999999999999e-05, "loss": 0.2706, "step": 1295 }, { "epoch": 76.47, "grad_norm": 0.5089874863624573, "learning_rate": 7.111764705882352e-05, "loss": 0.2591, "step": 1300 }, { "epoch": 76.47, "eval_loss": 0.2666688561439514, "eval_runtime": 1.9966, "eval_samples_per_second": 67.114, "eval_steps_per_second": 8.514, "step": 1300 }, { "epoch": 76.76, "grad_norm": 0.7518558502197266, "learning_rate": 7.023529411764706e-05, "loss": 0.282, "step": 1305 }, { "epoch": 77.06, "grad_norm": 0.624338686466217, "learning_rate": 6.935294117647058e-05, "loss": 0.2991, "step": 1310 }, { "epoch": 77.35, "grad_norm": 0.5179735422134399, "learning_rate": 6.847058823529412e-05, "loss": 0.2667, "step": 1315 }, { "epoch": 77.65, "grad_norm": 0.6224756240844727, "learning_rate": 6.758823529411764e-05, "loss": 0.28, "step": 1320 }, { "epoch": 77.65, "eval_loss": 0.26699915528297424, "eval_runtime": 1.9958, "eval_samples_per_second": 67.139, "eval_steps_per_second": 8.518, "step": 1320 }, { "epoch": 77.94, "grad_norm": 0.6646649837493896, "learning_rate": 6.670588235294117e-05, "loss": 0.2726, "step": 1325 }, { "epoch": 78.24, "grad_norm": 0.4936932325363159, "learning_rate": 6.582352941176471e-05, "loss": 0.2663, "step": 1330 }, { "epoch": 78.53, "grad_norm": 0.5740850567817688, "learning_rate": 6.494117647058822e-05, "loss": 0.2642, "step": 1335 }, { "epoch": 78.82, "grad_norm": 0.5269001722335815, "learning_rate": 6.405882352941175e-05, "loss": 0.2818, "step": 1340 }, { "epoch": 78.82, "eval_loss": 0.2666696310043335, "eval_runtime": 1.9974, "eval_samples_per_second": 67.088, "eval_steps_per_second": 8.511, "step": 1340 }, { "epoch": 79.12, "grad_norm": 0.6438319087028503, "learning_rate": 6.31764705882353e-05, "loss": 0.3015, "step": 1345 }, { "epoch": 79.41, "grad_norm": 0.5811217427253723, "learning_rate": 6.229411764705881e-05, "loss": 0.2438, "step": 1350 }, { "epoch": 79.71, "grad_norm": 0.5257366895675659, "learning_rate": 6.141176470588236e-05, "loss": 0.2775, "step": 1355 }, { "epoch": 80.0, "grad_norm": 0.67855304479599, "learning_rate": 6.052941176470587e-05, "loss": 0.2922, "step": 1360 }, { "epoch": 80.0, "eval_loss": 0.2664375901222229, "eval_runtime": 1.9959, "eval_samples_per_second": 67.136, "eval_steps_per_second": 8.517, "step": 1360 }, { "epoch": 80.29, "grad_norm": 0.6666358113288879, "learning_rate": 5.964705882352941e-05, "loss": 0.2827, "step": 1365 }, { "epoch": 80.59, "grad_norm": 0.5569036602973938, "learning_rate": 5.876470588235294e-05, "loss": 0.2741, "step": 1370 }, { "epoch": 80.88, "grad_norm": 0.5340925455093384, "learning_rate": 5.788235294117646e-05, "loss": 0.2587, "step": 1375 }, { "epoch": 81.18, "grad_norm": 0.6019856929779053, "learning_rate": 5.6999999999999996e-05, "loss": 0.2883, "step": 1380 }, { "epoch": 81.18, "eval_loss": 0.266279011964798, "eval_runtime": 1.9934, "eval_samples_per_second": 67.222, "eval_steps_per_second": 8.528, "step": 1380 }, { "epoch": 81.47, "grad_norm": 0.4570218026638031, "learning_rate": 5.6117647058823526e-05, "loss": 0.2521, "step": 1385 }, { "epoch": 81.76, "grad_norm": 0.6152669191360474, "learning_rate": 5.5235294117647055e-05, "loss": 0.2826, "step": 1390 }, { "epoch": 82.06, "grad_norm": 0.537754237651825, "learning_rate": 5.4352941176470585e-05, "loss": 0.2782, "step": 1395 }, { "epoch": 82.35, "grad_norm": 0.7443511486053467, "learning_rate": 5.347058823529411e-05, "loss": 0.2717, "step": 1400 }, { "epoch": 82.35, "eval_loss": 0.2660791873931885, "eval_runtime": 1.9952, "eval_samples_per_second": 67.162, "eval_steps_per_second": 8.521, "step": 1400 }, { "epoch": 82.65, "grad_norm": 0.45767509937286377, "learning_rate": 5.2588235294117644e-05, "loss": 0.2595, "step": 1405 }, { "epoch": 82.94, "grad_norm": 0.7017338871955872, "learning_rate": 5.170588235294117e-05, "loss": 0.2865, "step": 1410 }, { "epoch": 83.24, "grad_norm": 0.4680289030075073, "learning_rate": 5.0823529411764696e-05, "loss": 0.2761, "step": 1415 }, { "epoch": 83.53, "grad_norm": 0.5954050421714783, "learning_rate": 4.994117647058823e-05, "loss": 0.2874, "step": 1420 }, { "epoch": 83.53, "eval_loss": 0.2670120894908905, "eval_runtime": 1.9957, "eval_samples_per_second": 67.146, "eval_steps_per_second": 8.519, "step": 1420 }, { "epoch": 83.82, "grad_norm": 0.5585493445396423, "learning_rate": 4.905882352941176e-05, "loss": 0.274, "step": 1425 }, { "epoch": 84.12, "grad_norm": 0.6001076698303223, "learning_rate": 4.817647058823529e-05, "loss": 0.2579, "step": 1430 }, { "epoch": 84.41, "grad_norm": 0.5703680515289307, "learning_rate": 4.729411764705882e-05, "loss": 0.2602, "step": 1435 }, { "epoch": 84.71, "grad_norm": 0.5915176868438721, "learning_rate": 4.6411764705882343e-05, "loss": 0.2676, "step": 1440 }, { "epoch": 84.71, "eval_loss": 0.2662909924983978, "eval_runtime": 2.007, "eval_samples_per_second": 66.766, "eval_steps_per_second": 8.47, "step": 1440 }, { "epoch": 85.0, "grad_norm": 0.659074068069458, "learning_rate": 4.552941176470588e-05, "loss": 0.2864, "step": 1445 }, { "epoch": 85.29, "grad_norm": 0.6976066827774048, "learning_rate": 4.464705882352941e-05, "loss": 0.277, "step": 1450 }, { "epoch": 85.59, "grad_norm": 0.6575342416763306, "learning_rate": 4.376470588235293e-05, "loss": 0.2886, "step": 1455 }, { "epoch": 85.88, "grad_norm": 0.4784369170665741, "learning_rate": 4.288235294117647e-05, "loss": 0.2637, "step": 1460 }, { "epoch": 85.88, "eval_loss": 0.2663717269897461, "eval_runtime": 1.9942, "eval_samples_per_second": 67.195, "eval_steps_per_second": 8.525, "step": 1460 }, { "epoch": 86.18, "grad_norm": 0.5686805844306946, "learning_rate": 4.2e-05, "loss": 0.2677, "step": 1465 }, { "epoch": 86.47, "grad_norm": 0.6093593239784241, "learning_rate": 4.111764705882353e-05, "loss": 0.2594, "step": 1470 }, { "epoch": 86.76, "grad_norm": 0.7324638366699219, "learning_rate": 4.023529411764706e-05, "loss": 0.2916, "step": 1475 }, { "epoch": 87.06, "grad_norm": 0.6090161204338074, "learning_rate": 3.935294117647058e-05, "loss": 0.2539, "step": 1480 }, { "epoch": 87.06, "eval_loss": 0.26603055000305176, "eval_runtime": 1.9935, "eval_samples_per_second": 67.218, "eval_steps_per_second": 8.528, "step": 1480 }, { "epoch": 87.35, "grad_norm": 0.7536705136299133, "learning_rate": 3.8470588235294116e-05, "loss": 0.2886, "step": 1485 }, { "epoch": 87.65, "grad_norm": 0.5662975311279297, "learning_rate": 3.7588235294117645e-05, "loss": 0.2741, "step": 1490 }, { "epoch": 87.94, "grad_norm": 0.5700023174285889, "learning_rate": 3.6705882352941175e-05, "loss": 0.2767, "step": 1495 }, { "epoch": 88.24, "grad_norm": 0.5497795343399048, "learning_rate": 3.5823529411764704e-05, "loss": 0.2648, "step": 1500 }, { "epoch": 88.24, "eval_loss": 0.2659338712692261, "eval_runtime": 1.9949, "eval_samples_per_second": 67.171, "eval_steps_per_second": 8.522, "step": 1500 }, { "epoch": 88.53, "grad_norm": 0.5747610926628113, "learning_rate": 3.4941176470588234e-05, "loss": 0.2627, "step": 1505 }, { "epoch": 88.82, "grad_norm": 0.4665067195892334, "learning_rate": 3.405882352941176e-05, "loss": 0.2847, "step": 1510 }, { "epoch": 89.12, "grad_norm": 0.7121643424034119, "learning_rate": 3.317647058823529e-05, "loss": 0.2676, "step": 1515 }, { "epoch": 89.41, "grad_norm": 0.5986043214797974, "learning_rate": 3.229411764705882e-05, "loss": 0.2782, "step": 1520 }, { "epoch": 89.41, "eval_loss": 0.2659454941749573, "eval_runtime": 1.9957, "eval_samples_per_second": 67.143, "eval_steps_per_second": 8.518, "step": 1520 }, { "epoch": 89.71, "grad_norm": 0.5361090898513794, "learning_rate": 3.141176470588235e-05, "loss": 0.2644, "step": 1525 }, { "epoch": 90.0, "grad_norm": 0.7882270812988281, "learning_rate": 3.052941176470588e-05, "loss": 0.2778, "step": 1530 }, { "epoch": 90.29, "grad_norm": 0.6407003402709961, "learning_rate": 2.9647058823529407e-05, "loss": 0.2635, "step": 1535 }, { "epoch": 90.59, "grad_norm": 0.6531354188919067, "learning_rate": 2.876470588235294e-05, "loss": 0.275, "step": 1540 }, { "epoch": 90.59, "eval_loss": 0.2659270465373993, "eval_runtime": 1.9941, "eval_samples_per_second": 67.198, "eval_steps_per_second": 8.525, "step": 1540 }, { "epoch": 90.88, "grad_norm": 0.6199626326560974, "learning_rate": 2.788235294117647e-05, "loss": 0.2681, "step": 1545 }, { "epoch": 91.18, "grad_norm": 0.6273800134658813, "learning_rate": 2.6999999999999996e-05, "loss": 0.287, "step": 1550 }, { "epoch": 91.47, "grad_norm": 0.6072767972946167, "learning_rate": 2.6117647058823525e-05, "loss": 0.2583, "step": 1555 }, { "epoch": 91.76, "grad_norm": 0.5658175945281982, "learning_rate": 2.5235294117647058e-05, "loss": 0.2717, "step": 1560 }, { "epoch": 91.76, "eval_loss": 0.26581788063049316, "eval_runtime": 1.994, "eval_samples_per_second": 67.201, "eval_steps_per_second": 8.525, "step": 1560 }, { "epoch": 92.06, "grad_norm": 0.6367130279541016, "learning_rate": 2.4352941176470587e-05, "loss": 0.2819, "step": 1565 }, { "epoch": 92.35, "grad_norm": 0.6430366039276123, "learning_rate": 2.3470588235294114e-05, "loss": 0.2759, "step": 1570 }, { "epoch": 92.65, "grad_norm": 0.6165040731430054, "learning_rate": 2.2588235294117643e-05, "loss": 0.2715, "step": 1575 }, { "epoch": 92.94, "grad_norm": 0.5176894664764404, "learning_rate": 2.1705882352941176e-05, "loss": 0.2646, "step": 1580 }, { "epoch": 92.94, "eval_loss": 0.2657198905944824, "eval_runtime": 1.9962, "eval_samples_per_second": 67.126, "eval_steps_per_second": 8.516, "step": 1580 }, { "epoch": 93.24, "grad_norm": 0.7515372037887573, "learning_rate": 2.0823529411764705e-05, "loss": 0.2631, "step": 1585 }, { "epoch": 93.53, "grad_norm": 0.4853091835975647, "learning_rate": 1.994117647058823e-05, "loss": 0.2573, "step": 1590 }, { "epoch": 93.82, "grad_norm": 0.4851018190383911, "learning_rate": 1.905882352941176e-05, "loss": 0.271, "step": 1595 }, { "epoch": 94.12, "grad_norm": 0.503040075302124, "learning_rate": 1.817647058823529e-05, "loss": 0.2863, "step": 1600 }, { "epoch": 94.12, "eval_loss": 0.26566752791404724, "eval_runtime": 1.9956, "eval_samples_per_second": 67.147, "eval_steps_per_second": 8.519, "step": 1600 }, { "epoch": 94.41, "grad_norm": 0.560333788394928, "learning_rate": 1.7294117647058823e-05, "loss": 0.2546, "step": 1605 }, { "epoch": 94.71, "grad_norm": 0.5879210233688354, "learning_rate": 1.641176470588235e-05, "loss": 0.2733, "step": 1610 }, { "epoch": 95.0, "grad_norm": 0.6487263441085815, "learning_rate": 1.5529411764705882e-05, "loss": 0.2894, "step": 1615 }, { "epoch": 95.29, "grad_norm": 0.6337783336639404, "learning_rate": 1.464705882352941e-05, "loss": 0.2731, "step": 1620 }, { "epoch": 95.29, "eval_loss": 0.2656712234020233, "eval_runtime": 1.9955, "eval_samples_per_second": 67.153, "eval_steps_per_second": 8.519, "step": 1620 }, { "epoch": 95.59, "grad_norm": 0.5507830381393433, "learning_rate": 1.376470588235294e-05, "loss": 0.2801, "step": 1625 }, { "epoch": 95.88, "grad_norm": 0.7097357511520386, "learning_rate": 1.2882352941176469e-05, "loss": 0.274, "step": 1630 }, { "epoch": 96.18, "grad_norm": 0.5612406730651855, "learning_rate": 1.1999999999999999e-05, "loss": 0.2571, "step": 1635 }, { "epoch": 96.47, "grad_norm": 0.7559302449226379, "learning_rate": 1.1117647058823528e-05, "loss": 0.2795, "step": 1640 }, { "epoch": 96.47, "eval_loss": 0.2656753361225128, "eval_runtime": 1.9935, "eval_samples_per_second": 67.217, "eval_steps_per_second": 8.528, "step": 1640 }, { "epoch": 96.76, "grad_norm": 0.542195737361908, "learning_rate": 1.0235294117647058e-05, "loss": 0.2549, "step": 1645 }, { "epoch": 97.06, "grad_norm": 0.6926820874214172, "learning_rate": 9.352941176470587e-06, "loss": 0.2598, "step": 1650 }, { "epoch": 97.35, "grad_norm": 0.5095303654670715, "learning_rate": 8.470588235294117e-06, "loss": 0.2753, "step": 1655 }, { "epoch": 97.65, "grad_norm": 0.5196524262428284, "learning_rate": 7.588235294117647e-06, "loss": 0.2501, "step": 1660 }, { "epoch": 97.65, "eval_loss": 0.26558738946914673, "eval_runtime": 1.9934, "eval_samples_per_second": 67.221, "eval_steps_per_second": 8.528, "step": 1660 }, { "epoch": 97.94, "grad_norm": 0.6086786985397339, "learning_rate": 6.705882352941176e-06, "loss": 0.2955, "step": 1665 }, { "epoch": 98.24, "grad_norm": 0.41819947957992554, "learning_rate": 5.823529411764706e-06, "loss": 0.2543, "step": 1670 }, { "epoch": 98.53, "grad_norm": 0.6898114681243896, "learning_rate": 4.941176470588235e-06, "loss": 0.288, "step": 1675 }, { "epoch": 98.82, "grad_norm": 0.5956689119338989, "learning_rate": 4.058823529411764e-06, "loss": 0.2623, "step": 1680 }, { "epoch": 98.82, "eval_loss": 0.2655787765979767, "eval_runtime": 1.9955, "eval_samples_per_second": 67.152, "eval_steps_per_second": 8.519, "step": 1680 }, { "epoch": 99.12, "grad_norm": 0.5556439757347107, "learning_rate": 3.1764705882352935e-06, "loss": 0.2614, "step": 1685 }, { "epoch": 99.41, "grad_norm": 0.46832630038261414, "learning_rate": 2.2941176470588234e-06, "loss": 0.2647, "step": 1690 }, { "epoch": 99.71, "grad_norm": 0.49263256788253784, "learning_rate": 1.4117647058823527e-06, "loss": 0.273, "step": 1695 }, { "epoch": 100.0, "grad_norm": 0.6977939605712891, "learning_rate": 5.294117647058823e-07, "loss": 0.2655, "step": 1700 }, { "epoch": 100.0, "eval_loss": 0.26560500264167786, "eval_runtime": 1.9947, "eval_samples_per_second": 67.178, "eval_steps_per_second": 8.523, "step": 1700 } ], "logging_steps": 5, "max_steps": 1700, "num_input_tokens_seen": 0, "num_train_epochs": 100, "save_steps": 20, "total_flos": 1.523488749060096e+16, "train_batch_size": 4, "trial_name": null, "trial_params": null }