vuongnhathien's picture
End of training
2e77566 verified
raw
history blame
20.7 kB
{
"best_metric": 0.2525734305381775,
"best_model_checkpoint": "./convnext-tiny-upgrade-384-batch-16/checkpoint-10990",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 10990,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09,
"grad_norm": 39.919612884521484,
"learning_rate": 2.999387175598269e-05,
"loss": 2.9864,
"step": 100
},
{
"epoch": 0.18,
"grad_norm": 24.833358764648438,
"learning_rate": 2.9975492031314045e-05,
"loss": 2.0393,
"step": 200
},
{
"epoch": 0.27,
"grad_norm": 26.59770965576172,
"learning_rate": 2.994487584405244e-05,
"loss": 1.5895,
"step": 300
},
{
"epoch": 0.36,
"grad_norm": 49.16465759277344,
"learning_rate": 2.990204821066006e-05,
"loss": 1.232,
"step": 400
},
{
"epoch": 0.45,
"grad_norm": 30.683462142944336,
"learning_rate": 2.984704412556199e-05,
"loss": 1.1329,
"step": 500
},
{
"epoch": 0.55,
"grad_norm": 31.109006881713867,
"learning_rate": 2.977990853255228e-05,
"loss": 1.003,
"step": 600
},
{
"epoch": 0.64,
"grad_norm": 30.210790634155273,
"learning_rate": 2.970069628807043e-05,
"loss": 0.9696,
"step": 700
},
{
"epoch": 0.73,
"grad_norm": 29.36758041381836,
"learning_rate": 2.9609472116378222e-05,
"loss": 0.9048,
"step": 800
},
{
"epoch": 0.82,
"grad_norm": 39.95523452758789,
"learning_rate": 2.9506310556673573e-05,
"loss": 0.8202,
"step": 900
},
{
"epoch": 0.91,
"grad_norm": 25.76544761657715,
"learning_rate": 2.9391295902184625e-05,
"loss": 0.855,
"step": 1000
},
{
"epoch": 1.0,
"eval_accuracy": 0.8576540755467197,
"eval_loss": 0.49257978796958923,
"eval_runtime": 68.5947,
"eval_samples_per_second": 36.665,
"eval_steps_per_second": 2.303,
"step": 1099
},
{
"epoch": 1.0,
"grad_norm": 39.65526580810547,
"learning_rate": 2.9264522131293818e-05,
"loss": 0.8411,
"step": 1100
},
{
"epoch": 1.09,
"grad_norm": 33.58937454223633,
"learning_rate": 2.9126092830748217e-05,
"loss": 0.7447,
"step": 1200
},
{
"epoch": 1.18,
"grad_norm": 32.78460693359375,
"learning_rate": 2.897612111101888e-05,
"loss": 0.7118,
"step": 1300
},
{
"epoch": 1.27,
"grad_norm": 23.4544734954834,
"learning_rate": 2.8814729513878365e-05,
"loss": 0.6763,
"step": 1400
},
{
"epoch": 1.36,
"grad_norm": 22.840782165527344,
"learning_rate": 2.864204991227195e-05,
"loss": 0.6592,
"step": 1500
},
{
"epoch": 1.46,
"grad_norm": 30.469961166381836,
"learning_rate": 2.8458223402564366e-05,
"loss": 0.6426,
"step": 1600
},
{
"epoch": 1.55,
"grad_norm": 22.303668975830078,
"learning_rate": 2.826340018925006e-05,
"loss": 0.6948,
"step": 1700
},
{
"epoch": 1.64,
"grad_norm": 31.617197036743164,
"learning_rate": 2.8057739462221215e-05,
"loss": 0.6894,
"step": 1800
},
{
"epoch": 1.73,
"grad_norm": 29.403879165649414,
"learning_rate": 2.7841409266693838e-05,
"loss": 0.584,
"step": 1900
},
{
"epoch": 1.82,
"grad_norm": 20.05999183654785,
"learning_rate": 2.761458636589813e-05,
"loss": 0.5967,
"step": 2000
},
{
"epoch": 1.91,
"grad_norm": 30.453641891479492,
"learning_rate": 2.7377456096645395e-05,
"loss": 0.6243,
"step": 2100
},
{
"epoch": 2.0,
"eval_accuracy": 0.8910536779324055,
"eval_loss": 0.3875384032726288,
"eval_runtime": 68.6682,
"eval_samples_per_second": 36.625,
"eval_steps_per_second": 2.301,
"step": 2198
},
{
"epoch": 2.0,
"grad_norm": 22.794034957885742,
"learning_rate": 2.7130212217889484e-05,
"loss": 0.5952,
"step": 2200
},
{
"epoch": 2.09,
"grad_norm": 21.936601638793945,
"learning_rate": 2.6873056752406504e-05,
"loss": 0.53,
"step": 2300
},
{
"epoch": 2.18,
"grad_norm": 26.926753997802734,
"learning_rate": 2.6606199821722166e-05,
"loss": 0.5591,
"step": 2400
},
{
"epoch": 2.27,
"grad_norm": 23.042003631591797,
"learning_rate": 2.632985947442167e-05,
"loss": 0.5301,
"step": 2500
},
{
"epoch": 2.37,
"grad_norm": 19.099658966064453,
"learning_rate": 2.6044261507982356e-05,
"loss": 0.5852,
"step": 2600
},
{
"epoch": 2.46,
"grad_norm": 33.43634796142578,
"learning_rate": 2.5749639284274782e-05,
"loss": 0.4954,
"step": 2700
},
{
"epoch": 2.55,
"grad_norm": 20.47861099243164,
"learning_rate": 2.5446233538882924e-05,
"loss": 0.5428,
"step": 2800
},
{
"epoch": 2.64,
"grad_norm": 45.73374938964844,
"learning_rate": 2.513429218439932e-05,
"loss": 0.5164,
"step": 2900
},
{
"epoch": 2.73,
"grad_norm": 17.044771194458008,
"learning_rate": 2.4814070107855878e-05,
"loss": 0.5257,
"step": 3000
},
{
"epoch": 2.82,
"grad_norm": 23.656314849853516,
"learning_rate": 2.448582896245591e-05,
"loss": 0.5295,
"step": 3100
},
{
"epoch": 2.91,
"grad_norm": 59.598411560058594,
"learning_rate": 2.4149836953777488e-05,
"loss": 0.4776,
"step": 3200
},
{
"epoch": 3.0,
"eval_accuracy": 0.9125248508946322,
"eval_loss": 0.3229917287826538,
"eval_runtime": 69.7961,
"eval_samples_per_second": 36.034,
"eval_steps_per_second": 2.264,
"step": 3297
},
{
"epoch": 3.0,
"grad_norm": 14.091382026672363,
"learning_rate": 2.3806368620622876e-05,
"loss": 0.5338,
"step": 3300
},
{
"epoch": 3.09,
"grad_norm": 34.719451904296875,
"learning_rate": 2.345570461069312e-05,
"loss": 0.4399,
"step": 3400
},
{
"epoch": 3.18,
"grad_norm": 29.300067901611328,
"learning_rate": 2.3098131451271016e-05,
"loss": 0.4671,
"step": 3500
},
{
"epoch": 3.28,
"grad_norm": 23.41453742980957,
"learning_rate": 2.2733941315099883e-05,
"loss": 0.4643,
"step": 3600
},
{
"epoch": 3.37,
"grad_norm": 18.566978454589844,
"learning_rate": 2.2363431781649483e-05,
"loss": 0.4193,
"step": 3700
},
{
"epoch": 3.46,
"grad_norm": 25.832847595214844,
"learning_rate": 2.1986905593964048e-05,
"loss": 0.4921,
"step": 3800
},
{
"epoch": 3.55,
"grad_norm": 25.94115447998047,
"learning_rate": 2.1604670411291174e-05,
"loss": 0.4778,
"step": 3900
},
{
"epoch": 3.64,
"grad_norm": 20.953384399414062,
"learning_rate": 2.121703855769373e-05,
"loss": 0.4823,
"step": 4000
},
{
"epoch": 3.73,
"grad_norm": 20.484058380126953,
"learning_rate": 2.0824326766850072e-05,
"loss": 0.4804,
"step": 4100
},
{
"epoch": 3.82,
"grad_norm": 6.4666290283203125,
"learning_rate": 2.042685592325123e-05,
"loss": 0.4492,
"step": 4200
},
{
"epoch": 3.91,
"grad_norm": 36.683509826660156,
"learning_rate": 2.0024950800006463e-05,
"loss": 0.4535,
"step": 4300
},
{
"epoch": 4.0,
"eval_accuracy": 0.9204771371769384,
"eval_loss": 0.2854064702987671,
"eval_runtime": 68.5965,
"eval_samples_per_second": 36.664,
"eval_steps_per_second": 2.303,
"step": 4396
},
{
"epoch": 4.0,
"grad_norm": 8.934366226196289,
"learning_rate": 1.961893979347137e-05,
"loss": 0.3936,
"step": 4400
},
{
"epoch": 4.09,
"grad_norm": 19.066776275634766,
"learning_rate": 1.9209154654915524e-05,
"loss": 0.4025,
"step": 4500
},
{
"epoch": 4.19,
"grad_norm": 35.145851135253906,
"learning_rate": 1.879593021944875e-05,
"loss": 0.4269,
"step": 4600
},
{
"epoch": 4.28,
"grad_norm": 53.010009765625,
"learning_rate": 1.837960413242765e-05,
"loss": 0.4041,
"step": 4700
},
{
"epoch": 4.37,
"grad_norm": 20.637130737304688,
"learning_rate": 1.796051657356582e-05,
"loss": 0.3865,
"step": 4800
},
{
"epoch": 4.46,
"grad_norm": 44.37509536743164,
"learning_rate": 1.7539009978973312e-05,
"loss": 0.3781,
"step": 4900
},
{
"epoch": 4.55,
"grad_norm": 26.981489181518555,
"learning_rate": 1.711542876135233e-05,
"loss": 0.3876,
"step": 5000
},
{
"epoch": 4.64,
"grad_norm": 36.75742721557617,
"learning_rate": 1.669011902857791e-05,
"loss": 0.3919,
"step": 5100
},
{
"epoch": 4.73,
"grad_norm": 15.431539535522461,
"learning_rate": 1.6263428300893422e-05,
"loss": 0.4433,
"step": 5200
},
{
"epoch": 4.82,
"grad_norm": 26.744518280029297,
"learning_rate": 1.5835705226952112e-05,
"loss": 0.4334,
"step": 5300
},
{
"epoch": 4.91,
"grad_norm": 7.719830513000488,
"learning_rate": 1.540729929893649e-05,
"loss": 0.4204,
"step": 5400
},
{
"epoch": 5.0,
"eval_accuracy": 0.9168986083499006,
"eval_loss": 0.29153481125831604,
"eval_runtime": 68.6941,
"eval_samples_per_second": 36.612,
"eval_steps_per_second": 2.3,
"step": 5495
},
{
"epoch": 5.0,
"grad_norm": 18.945358276367188,
"learning_rate": 1.4978560566988603e-05,
"loss": 0.4077,
"step": 5500
},
{
"epoch": 5.1,
"grad_norm": 32.922447204589844,
"learning_rate": 1.454983935318433e-05,
"loss": 0.356,
"step": 5600
},
{
"epoch": 5.19,
"grad_norm": 23.755434036254883,
"learning_rate": 1.4121485965285485e-05,
"loss": 0.3569,
"step": 5700
},
{
"epoch": 5.28,
"grad_norm": 23.00724983215332,
"learning_rate": 1.3693850410503614e-05,
"loss": 0.379,
"step": 5800
},
{
"epoch": 5.37,
"grad_norm": 15.597938537597656,
"learning_rate": 1.326728210950942e-05,
"loss": 0.3608,
"step": 5900
},
{
"epoch": 5.46,
"grad_norm": 16.457727432250977,
"learning_rate": 1.2842129610921378e-05,
"loss": 0.3613,
"step": 6000
},
{
"epoch": 5.55,
"grad_norm": 20.524658203125,
"learning_rate": 1.2418740306506923e-05,
"loss": 0.3631,
"step": 6100
},
{
"epoch": 5.64,
"grad_norm": 31.883596420288086,
"learning_rate": 1.1997460147328984e-05,
"loss": 0.343,
"step": 6200
},
{
"epoch": 5.73,
"grad_norm": 18.492212295532227,
"learning_rate": 1.1578633361069559e-05,
"loss": 0.3839,
"step": 6300
},
{
"epoch": 5.82,
"grad_norm": 18.56053924560547,
"learning_rate": 1.1162602170761611e-05,
"loss": 0.3661,
"step": 6400
},
{
"epoch": 5.91,
"grad_norm": 34.4693489074707,
"learning_rate": 1.0749706515158863e-05,
"loss": 0.3756,
"step": 6500
},
{
"epoch": 6.0,
"eval_accuracy": 0.9192842942345925,
"eval_loss": 0.29144659638404846,
"eval_runtime": 68.5475,
"eval_samples_per_second": 36.69,
"eval_steps_per_second": 2.305,
"step": 6594
},
{
"epoch": 6.01,
"grad_norm": 18.55854034423828,
"learning_rate": 1.0340283770972167e-05,
"loss": 0.3684,
"step": 6600
},
{
"epoch": 6.1,
"grad_norm": 23.47572135925293,
"learning_rate": 9.93466847719919e-06,
"loss": 0.3282,
"step": 6700
},
{
"epoch": 6.19,
"grad_norm": 26.671112060546875,
"learning_rate": 9.533192061772919e-06,
"loss": 0.3611,
"step": 6800
},
{
"epoch": 6.28,
"grad_norm": 7.420229434967041,
"learning_rate": 9.136182570752153e-06,
"loss": 0.3547,
"step": 6900
},
{
"epoch": 6.37,
"grad_norm": 13.92644214630127,
"learning_rate": 8.743964400275304e-06,
"loss": 0.3029,
"step": 7000
},
{
"epoch": 6.46,
"grad_norm": 12.337591171264648,
"learning_rate": 8.356858031496596e-06,
"loss": 0.3657,
"step": 7100
},
{
"epoch": 6.55,
"grad_norm": 10.994024276733398,
"learning_rate": 7.975179768721187e-06,
"loss": 0.3179,
"step": 7200
},
{
"epoch": 6.64,
"grad_norm": 26.956287384033203,
"learning_rate": 7.599241480953112e-06,
"loss": 0.3009,
"step": 7300
},
{
"epoch": 6.73,
"grad_norm": 29.974740982055664,
"learning_rate": 7.229350347067426e-06,
"loss": 0.3297,
"step": 7400
},
{
"epoch": 6.82,
"grad_norm": 11.850204467773438,
"learning_rate": 6.865808604814564e-06,
"loss": 0.3175,
"step": 7500
},
{
"epoch": 6.92,
"grad_norm": 4.603498935699463,
"learning_rate": 6.508913303862144e-06,
"loss": 0.3603,
"step": 7600
},
{
"epoch": 7.0,
"eval_accuracy": 0.9272365805168986,
"eval_loss": 0.26446533203125,
"eval_runtime": 68.6296,
"eval_samples_per_second": 36.646,
"eval_steps_per_second": 2.302,
"step": 7693
},
{
"epoch": 7.01,
"grad_norm": 24.59402084350586,
"learning_rate": 6.1589560630758656e-06,
"loss": 0.2715,
"step": 7700
},
{
"epoch": 7.1,
"grad_norm": 37.25328063964844,
"learning_rate": 5.8162228322380155e-06,
"loss": 0.3105,
"step": 7800
},
{
"epoch": 7.19,
"grad_norm": 17.36973762512207,
"learning_rate": 5.480993658398129e-06,
"loss": 0.3269,
"step": 7900
},
{
"epoch": 7.28,
"grad_norm": 12.080437660217285,
"learning_rate": 5.153542457046737e-06,
"loss": 0.3391,
"step": 8000
},
{
"epoch": 7.37,
"grad_norm": 32.013824462890625,
"learning_rate": 4.834136788299248e-06,
"loss": 0.3345,
"step": 8100
},
{
"epoch": 7.46,
"grad_norm": 36.49650573730469,
"learning_rate": 4.523037638272822e-06,
"loss": 0.311,
"step": 8200
},
{
"epoch": 7.55,
"grad_norm": 60.167484283447266,
"learning_rate": 4.220499205834783e-06,
"loss": 0.2984,
"step": 8300
},
{
"epoch": 7.64,
"grad_norm": 22.172080993652344,
"learning_rate": 3.926768694896931e-06,
"loss": 0.3295,
"step": 8400
},
{
"epoch": 7.73,
"grad_norm": 10.280725479125977,
"learning_rate": 3.6420861124254607e-06,
"loss": 0.2991,
"step": 8500
},
{
"epoch": 7.83,
"grad_norm": 31.965059280395508,
"learning_rate": 3.3666840723314145e-06,
"loss": 0.3375,
"step": 8600
},
{
"epoch": 7.92,
"grad_norm": 15.358006477355957,
"learning_rate": 3.1007876054020724e-06,
"loss": 0.2885,
"step": 8700
},
{
"epoch": 8.0,
"eval_accuracy": 0.9280318091451292,
"eval_loss": 0.2599093019962311,
"eval_runtime": 70.1401,
"eval_samples_per_second": 35.857,
"eval_steps_per_second": 2.253,
"step": 8792
},
{
"epoch": 8.01,
"grad_norm": 18.399208068847656,
"learning_rate": 2.8446139754284486e-06,
"loss": 0.2717,
"step": 8800
},
{
"epoch": 8.1,
"grad_norm": 16.944734573364258,
"learning_rate": 2.5983725016792574e-06,
"loss": 0.2986,
"step": 8900
},
{
"epoch": 8.19,
"grad_norm": 18.830663681030273,
"learning_rate": 2.36226438786627e-06,
"loss": 0.2738,
"step": 9000
},
{
"epoch": 8.28,
"grad_norm": 19.215288162231445,
"learning_rate": 2.1364825577409424e-06,
"loss": 0.2496,
"step": 9100
},
{
"epoch": 8.37,
"grad_norm": 3.411406993865967,
"learning_rate": 1.9212114974565664e-06,
"loss": 0.2954,
"step": 9200
},
{
"epoch": 8.46,
"grad_norm": 24.457778930664062,
"learning_rate": 1.7166271048247796e-06,
"loss": 0.2678,
"step": 9300
},
{
"epoch": 8.55,
"grad_norm": 7.872967720031738,
"learning_rate": 1.5228965455896054e-06,
"loss": 0.2968,
"step": 9400
},
{
"epoch": 8.64,
"grad_norm": 27.78696060180664,
"learning_rate": 1.3401781168364591e-06,
"loss": 0.3052,
"step": 9500
},
{
"epoch": 8.74,
"grad_norm": 35.54745101928711,
"learning_rate": 1.1686211176477208e-06,
"loss": 0.2943,
"step": 9600
},
{
"epoch": 8.83,
"grad_norm": 19.328489303588867,
"learning_rate": 1.00836572711058e-06,
"loss": 0.3274,
"step": 9700
},
{
"epoch": 8.92,
"grad_norm": 35.09791564941406,
"learning_rate": 8.595428897768071e-07,
"loss": 0.2753,
"step": 9800
},
{
"epoch": 9.0,
"eval_accuracy": 0.9292246520874752,
"eval_loss": 0.25652942061424255,
"eval_runtime": 69.0906,
"eval_samples_per_second": 36.401,
"eval_steps_per_second": 2.287,
"step": 9891
},
{
"epoch": 9.01,
"grad_norm": 34.884788513183594,
"learning_rate": 7.222742086680756e-07,
"loss": 0.2635,
"step": 9900
},
{
"epoch": 9.1,
"grad_norm": 4.236999988555908,
"learning_rate": 5.966718459142196e-07,
"loss": 0.2781,
"step": 10000
},
{
"epoch": 9.19,
"grad_norm": 22.74588966369629,
"learning_rate": 4.82838431105655e-07,
"loss": 0.2801,
"step": 10100
},
{
"epoch": 9.28,
"grad_norm": 31.477947235107422,
"learning_rate": 3.808669774348167e-07,
"loss": 0.2562,
"step": 10200
},
{
"epoch": 9.37,
"grad_norm": 26.103023529052734,
"learning_rate": 2.908408056951578e-07,
"loss": 0.3041,
"step": 10300
},
{
"epoch": 9.46,
"grad_norm": 20.699371337890625,
"learning_rate": 2.1283347619979243e-07,
"loss": 0.2755,
"step": 10400
},
{
"epoch": 9.55,
"grad_norm": 30.313541412353516,
"learning_rate": 1.4690872867542892e-07,
"loss": 0.3044,
"step": 10500
},
{
"epoch": 9.65,
"grad_norm": 31.09404754638672,
"learning_rate": 9.312043018067762e-08,
"loss": 0.3142,
"step": 10600
},
{
"epoch": 9.74,
"grad_norm": 23.177568435668945,
"learning_rate": 5.1512531091333914e-08,
"loss": 0.2853,
"step": 10700
},
{
"epoch": 9.83,
"grad_norm": 45.65806198120117,
"learning_rate": 2.211902918855313e-08,
"loss": 0.3018,
"step": 10800
},
{
"epoch": 9.92,
"grad_norm": 26.684669494628906,
"learning_rate": 4.963941879295164e-09,
"loss": 0.2902,
"step": 10900
},
{
"epoch": 10.0,
"eval_accuracy": 0.9292246520874752,
"eval_loss": 0.2525734305381775,
"eval_runtime": 68.818,
"eval_samples_per_second": 36.546,
"eval_steps_per_second": 2.296,
"step": 10990
},
{
"epoch": 10.0,
"step": 10990,
"total_flos": 1.301428412334932e+19,
"train_loss": 0.49425973189321837,
"train_runtime": 10500.3736,
"train_samples_per_second": 16.743,
"train_steps_per_second": 1.047
}
],
"logging_steps": 100,
"max_steps": 10990,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 1.301428412334932e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}