whisper-large-v2-ca-15awr / trainer_state.json
wanasash's picture
End of training
775ca4b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 15.600624024960998,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.078003120124805,
"grad_norm": 6.3628339767456055,
"learning_rate": 5.000000000000001e-07,
"loss": 1.5177,
"step": 25
},
{
"epoch": 0.15600624024961,
"grad_norm": 5.831232070922852,
"learning_rate": 9.800000000000001e-07,
"loss": 1.1971,
"step": 50
},
{
"epoch": 0.23400936037441497,
"grad_norm": 4.637149333953857,
"learning_rate": 1.48e-06,
"loss": 0.8901,
"step": 75
},
{
"epoch": 0.31201248049922,
"grad_norm": 4.270649433135986,
"learning_rate": 1.98e-06,
"loss": 0.7938,
"step": 100
},
{
"epoch": 0.39001560062402496,
"grad_norm": 5.095465183258057,
"learning_rate": 2.4800000000000004e-06,
"loss": 0.721,
"step": 125
},
{
"epoch": 0.46801872074882994,
"grad_norm": 5.3378520011901855,
"learning_rate": 2.9800000000000003e-06,
"loss": 0.7105,
"step": 150
},
{
"epoch": 0.5460218408736349,
"grad_norm": 4.5053558349609375,
"learning_rate": 3.46e-06,
"loss": 0.6863,
"step": 175
},
{
"epoch": 0.62402496099844,
"grad_norm": 4.717932224273682,
"learning_rate": 3.96e-06,
"loss": 0.6458,
"step": 200
},
{
"epoch": 0.7020280811232449,
"grad_norm": 4.187346458435059,
"learning_rate": 4.4600000000000005e-06,
"loss": 0.6414,
"step": 225
},
{
"epoch": 0.7800312012480499,
"grad_norm": 3.789942979812622,
"learning_rate": 4.960000000000001e-06,
"loss": 0.6233,
"step": 250
},
{
"epoch": 0.858034321372855,
"grad_norm": 5.197734832763672,
"learning_rate": 5.460000000000001e-06,
"loss": 0.601,
"step": 275
},
{
"epoch": 0.9360374414976599,
"grad_norm": 4.216986656188965,
"learning_rate": 5.9600000000000005e-06,
"loss": 0.626,
"step": 300
},
{
"epoch": 1.0140405616224648,
"grad_norm": 3.443754196166992,
"learning_rate": 6.460000000000001e-06,
"loss": 0.5809,
"step": 325
},
{
"epoch": 1.0920436817472698,
"grad_norm": 4.23879861831665,
"learning_rate": 6.96e-06,
"loss": 0.4931,
"step": 350
},
{
"epoch": 1.1700468018720749,
"grad_norm": 4.616464614868164,
"learning_rate": 7.440000000000001e-06,
"loss": 0.4833,
"step": 375
},
{
"epoch": 1.24804992199688,
"grad_norm": 3.7008609771728516,
"learning_rate": 7.94e-06,
"loss": 0.4792,
"step": 400
},
{
"epoch": 1.3260530421216847,
"grad_norm": 4.368692874908447,
"learning_rate": 8.44e-06,
"loss": 0.4924,
"step": 425
},
{
"epoch": 1.4040561622464898,
"grad_norm": 3.1923470497131348,
"learning_rate": 8.94e-06,
"loss": 0.4968,
"step": 450
},
{
"epoch": 1.4820592823712948,
"grad_norm": 3.799287796020508,
"learning_rate": 9.440000000000001e-06,
"loss": 0.4759,
"step": 475
},
{
"epoch": 1.5600624024960998,
"grad_norm": 3.8596694469451904,
"learning_rate": 9.940000000000001e-06,
"loss": 0.4884,
"step": 500
},
{
"epoch": 1.6380655226209049,
"grad_norm": 3.965977907180786,
"learning_rate": 9.951111111111111e-06,
"loss": 0.5001,
"step": 525
},
{
"epoch": 1.71606864274571,
"grad_norm": 11.547295570373535,
"learning_rate": 9.895555555555557e-06,
"loss": 0.4972,
"step": 550
},
{
"epoch": 1.794071762870515,
"grad_norm": 3.6599745750427246,
"learning_rate": 9.84e-06,
"loss": 0.4955,
"step": 575
},
{
"epoch": 1.8720748829953198,
"grad_norm": 3.5883848667144775,
"learning_rate": 9.784444444444445e-06,
"loss": 0.477,
"step": 600
},
{
"epoch": 1.9500780031201248,
"grad_norm": 3.8101539611816406,
"learning_rate": 9.72888888888889e-06,
"loss": 0.4649,
"step": 625
},
{
"epoch": 2.0280811232449296,
"grad_norm": 3.238903284072876,
"learning_rate": 9.673333333333334e-06,
"loss": 0.4188,
"step": 650
},
{
"epoch": 2.1060842433697347,
"grad_norm": 13.860888481140137,
"learning_rate": 9.617777777777778e-06,
"loss": 0.3054,
"step": 675
},
{
"epoch": 2.1840873634945397,
"grad_norm": 3.1065433025360107,
"learning_rate": 9.562222222222223e-06,
"loss": 0.3189,
"step": 700
},
{
"epoch": 2.2620904836193447,
"grad_norm": 3.5035383701324463,
"learning_rate": 9.506666666666667e-06,
"loss": 0.3336,
"step": 725
},
{
"epoch": 2.3400936037441498,
"grad_norm": 3.15653395652771,
"learning_rate": 9.451111111111112e-06,
"loss": 0.3049,
"step": 750
},
{
"epoch": 2.418096723868955,
"grad_norm": 3.5808379650115967,
"learning_rate": 9.395555555555556e-06,
"loss": 0.3039,
"step": 775
},
{
"epoch": 2.49609984399376,
"grad_norm": 3.342785358428955,
"learning_rate": 9.340000000000002e-06,
"loss": 0.3245,
"step": 800
},
{
"epoch": 2.574102964118565,
"grad_norm": 3.4457006454467773,
"learning_rate": 9.284444444444444e-06,
"loss": 0.3191,
"step": 825
},
{
"epoch": 2.6521060842433695,
"grad_norm": 2.653717517852783,
"learning_rate": 9.22888888888889e-06,
"loss": 0.3094,
"step": 850
},
{
"epoch": 2.7301092043681745,
"grad_norm": 3.676502227783203,
"learning_rate": 9.173333333333334e-06,
"loss": 0.3132,
"step": 875
},
{
"epoch": 2.8081123244929795,
"grad_norm": 2.7381463050842285,
"learning_rate": 9.117777777777778e-06,
"loss": 0.325,
"step": 900
},
{
"epoch": 2.8861154446177846,
"grad_norm": 3.100311040878296,
"learning_rate": 9.062222222222224e-06,
"loss": 0.3068,
"step": 925
},
{
"epoch": 2.9641185647425896,
"grad_norm": 3.143117904663086,
"learning_rate": 9.006666666666666e-06,
"loss": 0.3131,
"step": 950
},
{
"epoch": 3.0421216848673946,
"grad_norm": 2.2605462074279785,
"learning_rate": 8.951111111111112e-06,
"loss": 0.2532,
"step": 975
},
{
"epoch": 3.1201248049921997,
"grad_norm": 2.582007884979248,
"learning_rate": 8.895555555555556e-06,
"loss": 0.18,
"step": 1000
},
{
"epoch": 3.1201248049921997,
"eval_loss": 0.5584835410118103,
"eval_runtime": 521.3754,
"eval_samples_per_second": 2.457,
"eval_steps_per_second": 0.155,
"eval_wer": 0.37714912815510304,
"step": 1000
},
{
"epoch": 3.1981279251170047,
"grad_norm": 2.988361120223999,
"learning_rate": 8.84e-06,
"loss": 0.1874,
"step": 1025
},
{
"epoch": 3.2761310452418098,
"grad_norm": 4.793211936950684,
"learning_rate": 8.784444444444446e-06,
"loss": 0.1892,
"step": 1050
},
{
"epoch": 3.354134165366615,
"grad_norm": 2.9947147369384766,
"learning_rate": 8.72888888888889e-06,
"loss": 0.1888,
"step": 1075
},
{
"epoch": 3.43213728549142,
"grad_norm": 3.2537567615509033,
"learning_rate": 8.673333333333334e-06,
"loss": 0.1949,
"step": 1100
},
{
"epoch": 3.510140405616225,
"grad_norm": 2.6582891941070557,
"learning_rate": 8.617777777777778e-06,
"loss": 0.1933,
"step": 1125
},
{
"epoch": 3.58814352574103,
"grad_norm": 2.752394914627075,
"learning_rate": 8.562222222222224e-06,
"loss": 0.1967,
"step": 1150
},
{
"epoch": 3.6661466458658345,
"grad_norm": 2.853532552719116,
"learning_rate": 8.506666666666668e-06,
"loss": 0.1978,
"step": 1175
},
{
"epoch": 3.7441497659906395,
"grad_norm": 2.4331047534942627,
"learning_rate": 8.451111111111112e-06,
"loss": 0.2051,
"step": 1200
},
{
"epoch": 3.8221528861154446,
"grad_norm": 2.7613861560821533,
"learning_rate": 8.395555555555557e-06,
"loss": 0.1948,
"step": 1225
},
{
"epoch": 3.9001560062402496,
"grad_norm": 2.684624433517456,
"learning_rate": 8.34e-06,
"loss": 0.2048,
"step": 1250
},
{
"epoch": 3.9781591263650546,
"grad_norm": 4.769908428192139,
"learning_rate": 8.284444444444446e-06,
"loss": 0.2045,
"step": 1275
},
{
"epoch": 4.056162246489859,
"grad_norm": 2.2372353076934814,
"learning_rate": 8.22888888888889e-06,
"loss": 0.1319,
"step": 1300
},
{
"epoch": 4.134165366614664,
"grad_norm": 7.478884220123291,
"learning_rate": 8.173333333333334e-06,
"loss": 0.1106,
"step": 1325
},
{
"epoch": 4.212168486739469,
"grad_norm": 3.3369452953338623,
"learning_rate": 8.11777777777778e-06,
"loss": 0.1115,
"step": 1350
},
{
"epoch": 4.290171606864274,
"grad_norm": 2.1033334732055664,
"learning_rate": 8.062222222222222e-06,
"loss": 0.1148,
"step": 1375
},
{
"epoch": 4.368174726989079,
"grad_norm": 1.9185432195663452,
"learning_rate": 8.006666666666667e-06,
"loss": 0.1082,
"step": 1400
},
{
"epoch": 4.446177847113884,
"grad_norm": 2.428751230239868,
"learning_rate": 7.951111111111111e-06,
"loss": 0.1129,
"step": 1425
},
{
"epoch": 4.5241809672386895,
"grad_norm": 2.757439374923706,
"learning_rate": 7.895555555555557e-06,
"loss": 0.1184,
"step": 1450
},
{
"epoch": 4.6021840873634945,
"grad_norm": 2.134047508239746,
"learning_rate": 7.840000000000001e-06,
"loss": 0.1109,
"step": 1475
},
{
"epoch": 4.6801872074882995,
"grad_norm": 3.3976402282714844,
"learning_rate": 7.784444444444445e-06,
"loss": 0.1209,
"step": 1500
},
{
"epoch": 4.758190327613105,
"grad_norm": 2.2624588012695312,
"learning_rate": 7.72888888888889e-06,
"loss": 0.116,
"step": 1525
},
{
"epoch": 4.83619344773791,
"grad_norm": 2.8306870460510254,
"learning_rate": 7.673333333333333e-06,
"loss": 0.1174,
"step": 1550
},
{
"epoch": 4.914196567862715,
"grad_norm": 2.3666274547576904,
"learning_rate": 7.617777777777778e-06,
"loss": 0.1127,
"step": 1575
},
{
"epoch": 4.99219968798752,
"grad_norm": 2.4004135131835938,
"learning_rate": 7.562222222222223e-06,
"loss": 0.1143,
"step": 1600
},
{
"epoch": 5.070202808112325,
"grad_norm": 1.8675142526626587,
"learning_rate": 7.506666666666668e-06,
"loss": 0.0694,
"step": 1625
},
{
"epoch": 5.14820592823713,
"grad_norm": 1.7827036380767822,
"learning_rate": 7.451111111111111e-06,
"loss": 0.0613,
"step": 1650
},
{
"epoch": 5.226209048361935,
"grad_norm": 1.6281778812408447,
"learning_rate": 7.395555555555556e-06,
"loss": 0.062,
"step": 1675
},
{
"epoch": 5.30421216848674,
"grad_norm": 3.8592145442962646,
"learning_rate": 7.340000000000001e-06,
"loss": 0.0672,
"step": 1700
},
{
"epoch": 5.382215288611545,
"grad_norm": 2.766690731048584,
"learning_rate": 7.284444444444445e-06,
"loss": 0.0594,
"step": 1725
},
{
"epoch": 5.460218408736349,
"grad_norm": 3.115182399749756,
"learning_rate": 7.22888888888889e-06,
"loss": 0.0625,
"step": 1750
},
{
"epoch": 5.538221528861154,
"grad_norm": 1.6196472644805908,
"learning_rate": 7.173333333333335e-06,
"loss": 0.06,
"step": 1775
},
{
"epoch": 5.616224648985959,
"grad_norm": 2.140605926513672,
"learning_rate": 7.117777777777778e-06,
"loss": 0.0633,
"step": 1800
},
{
"epoch": 5.694227769110764,
"grad_norm": 2.190873622894287,
"learning_rate": 7.062222222222223e-06,
"loss": 0.0678,
"step": 1825
},
{
"epoch": 5.772230889235569,
"grad_norm": 2.369245767593384,
"learning_rate": 7.006666666666667e-06,
"loss": 0.0657,
"step": 1850
},
{
"epoch": 5.850234009360374,
"grad_norm": 1.8441182374954224,
"learning_rate": 6.951111111111112e-06,
"loss": 0.0652,
"step": 1875
},
{
"epoch": 5.928237129485179,
"grad_norm": 1.8645758628845215,
"learning_rate": 6.8955555555555565e-06,
"loss": 0.0643,
"step": 1900
},
{
"epoch": 6.006240249609984,
"grad_norm": 2.1204230785369873,
"learning_rate": 6.8400000000000014e-06,
"loss": 0.0614,
"step": 1925
},
{
"epoch": 6.084243369734789,
"grad_norm": 1.5469348430633545,
"learning_rate": 6.784444444444445e-06,
"loss": 0.0364,
"step": 1950
},
{
"epoch": 6.162246489859594,
"grad_norm": 2.485136032104492,
"learning_rate": 6.7288888888888895e-06,
"loss": 0.0356,
"step": 1975
},
{
"epoch": 6.240249609984399,
"grad_norm": 1.3276453018188477,
"learning_rate": 6.6733333333333335e-06,
"loss": 0.0328,
"step": 2000
},
{
"epoch": 6.240249609984399,
"eval_loss": 0.6914765238761902,
"eval_runtime": 519.148,
"eval_samples_per_second": 2.468,
"eval_steps_per_second": 0.156,
"eval_wer": 0.3699548835507865,
"step": 2000
},
{
"epoch": 6.318252730109204,
"grad_norm": 2.1222565174102783,
"learning_rate": 6.617777777777778e-06,
"loss": 0.0363,
"step": 2025
},
{
"epoch": 6.396255850234009,
"grad_norm": 1.7250409126281738,
"learning_rate": 6.562222222222223e-06,
"loss": 0.036,
"step": 2050
},
{
"epoch": 6.4742589703588145,
"grad_norm": 1.5123999118804932,
"learning_rate": 6.5066666666666665e-06,
"loss": 0.0382,
"step": 2075
},
{
"epoch": 6.5522620904836195,
"grad_norm": 1.8385040760040283,
"learning_rate": 6.451111111111111e-06,
"loss": 0.0362,
"step": 2100
},
{
"epoch": 6.6302652106084246,
"grad_norm": 1.9721713066101074,
"learning_rate": 6.395555555555556e-06,
"loss": 0.0374,
"step": 2125
},
{
"epoch": 6.70826833073323,
"grad_norm": 2.1653904914855957,
"learning_rate": 6.34e-06,
"loss": 0.0372,
"step": 2150
},
{
"epoch": 6.786271450858035,
"grad_norm": 1.9096741676330566,
"learning_rate": 6.284444444444445e-06,
"loss": 0.0362,
"step": 2175
},
{
"epoch": 6.86427457098284,
"grad_norm": 1.5271517038345337,
"learning_rate": 6.22888888888889e-06,
"loss": 0.0375,
"step": 2200
},
{
"epoch": 6.942277691107645,
"grad_norm": 1.6990188360214233,
"learning_rate": 6.173333333333333e-06,
"loss": 0.0377,
"step": 2225
},
{
"epoch": 7.02028081123245,
"grad_norm": 1.4633820056915283,
"learning_rate": 6.117777777777778e-06,
"loss": 0.0362,
"step": 2250
},
{
"epoch": 7.098283931357254,
"grad_norm": 0.841683566570282,
"learning_rate": 6.062222222222223e-06,
"loss": 0.0197,
"step": 2275
},
{
"epoch": 7.176287051482059,
"grad_norm": 1.274117112159729,
"learning_rate": 6.006666666666667e-06,
"loss": 0.0203,
"step": 2300
},
{
"epoch": 7.254290171606864,
"grad_norm": 2.547511100769043,
"learning_rate": 5.951111111111112e-06,
"loss": 0.0232,
"step": 2325
},
{
"epoch": 7.332293291731669,
"grad_norm": 1.2308272123336792,
"learning_rate": 5.895555555555557e-06,
"loss": 0.0248,
"step": 2350
},
{
"epoch": 7.410296411856474,
"grad_norm": 1.271571159362793,
"learning_rate": 5.84e-06,
"loss": 0.0204,
"step": 2375
},
{
"epoch": 7.488299531981279,
"grad_norm": 2.4256279468536377,
"learning_rate": 5.784444444444445e-06,
"loss": 0.0236,
"step": 2400
},
{
"epoch": 7.566302652106084,
"grad_norm": 1.8029208183288574,
"learning_rate": 5.72888888888889e-06,
"loss": 0.0225,
"step": 2425
},
{
"epoch": 7.644305772230889,
"grad_norm": 1.7934774160385132,
"learning_rate": 5.673333333333334e-06,
"loss": 0.0222,
"step": 2450
},
{
"epoch": 7.722308892355694,
"grad_norm": 0.9680442214012146,
"learning_rate": 5.617777777777779e-06,
"loss": 0.0219,
"step": 2475
},
{
"epoch": 7.800312012480499,
"grad_norm": 1.29971182346344,
"learning_rate": 5.562222222222222e-06,
"loss": 0.0226,
"step": 2500
},
{
"epoch": 7.878315132605304,
"grad_norm": 1.8278181552886963,
"learning_rate": 5.506666666666667e-06,
"loss": 0.0236,
"step": 2525
},
{
"epoch": 7.956318252730109,
"grad_norm": 1.8502779006958008,
"learning_rate": 5.451111111111112e-06,
"loss": 0.0229,
"step": 2550
},
{
"epoch": 8.034321372854913,
"grad_norm": 2.3189539909362793,
"learning_rate": 5.3955555555555565e-06,
"loss": 0.0193,
"step": 2575
},
{
"epoch": 8.112324492979718,
"grad_norm": 0.7720842361450195,
"learning_rate": 5.3400000000000005e-06,
"loss": 0.0121,
"step": 2600
},
{
"epoch": 8.190327613104524,
"grad_norm": 3.5574843883514404,
"learning_rate": 5.2844444444444454e-06,
"loss": 0.0155,
"step": 2625
},
{
"epoch": 8.268330733229329,
"grad_norm": 1.5664520263671875,
"learning_rate": 5.228888888888889e-06,
"loss": 0.0147,
"step": 2650
},
{
"epoch": 8.346333853354134,
"grad_norm": 1.2214146852493286,
"learning_rate": 5.1733333333333335e-06,
"loss": 0.0149,
"step": 2675
},
{
"epoch": 8.424336973478939,
"grad_norm": 1.0644782781600952,
"learning_rate": 5.117777777777778e-06,
"loss": 0.0131,
"step": 2700
},
{
"epoch": 8.502340093603744,
"grad_norm": 0.8796694278717041,
"learning_rate": 5.062222222222222e-06,
"loss": 0.014,
"step": 2725
},
{
"epoch": 8.580343213728549,
"grad_norm": 0.8544178009033203,
"learning_rate": 5.006666666666667e-06,
"loss": 0.0146,
"step": 2750
},
{
"epoch": 8.658346333853354,
"grad_norm": 0.7999880313873291,
"learning_rate": 4.951111111111111e-06,
"loss": 0.0147,
"step": 2775
},
{
"epoch": 8.736349453978159,
"grad_norm": 1.7287328243255615,
"learning_rate": 4.895555555555556e-06,
"loss": 0.0148,
"step": 2800
},
{
"epoch": 8.814352574102964,
"grad_norm": 1.2320371866226196,
"learning_rate": 4.84e-06,
"loss": 0.0117,
"step": 2825
},
{
"epoch": 8.892355694227769,
"grad_norm": 0.6565980315208435,
"learning_rate": 4.784444444444445e-06,
"loss": 0.0171,
"step": 2850
},
{
"epoch": 8.970358814352574,
"grad_norm": 0.6645917296409607,
"learning_rate": 4.728888888888889e-06,
"loss": 0.0146,
"step": 2875
},
{
"epoch": 9.048361934477379,
"grad_norm": 1.5899229049682617,
"learning_rate": 4.673333333333333e-06,
"loss": 0.0095,
"step": 2900
},
{
"epoch": 9.126365054602184,
"grad_norm": 1.3482928276062012,
"learning_rate": 4.617777777777778e-06,
"loss": 0.0102,
"step": 2925
},
{
"epoch": 9.204368174726989,
"grad_norm": 1.0298078060150146,
"learning_rate": 4.562222222222222e-06,
"loss": 0.01,
"step": 2950
},
{
"epoch": 9.282371294851794,
"grad_norm": 0.7524386048316956,
"learning_rate": 4.506666666666667e-06,
"loss": 0.0087,
"step": 2975
},
{
"epoch": 9.360374414976599,
"grad_norm": 1.3448346853256226,
"learning_rate": 4.451111111111112e-06,
"loss": 0.0086,
"step": 3000
},
{
"epoch": 9.360374414976599,
"eval_loss": 0.8110594749450684,
"eval_runtime": 514.7694,
"eval_samples_per_second": 2.488,
"eval_steps_per_second": 0.157,
"eval_wer": 0.3620899890257286,
"step": 3000
},
{
"epoch": 9.438377535101404,
"grad_norm": 1.4639930725097656,
"learning_rate": 4.395555555555556e-06,
"loss": 0.0089,
"step": 3025
},
{
"epoch": 9.51638065522621,
"grad_norm": 0.7041357755661011,
"learning_rate": 4.34e-06,
"loss": 0.01,
"step": 3050
},
{
"epoch": 9.594383775351014,
"grad_norm": 0.8498127460479736,
"learning_rate": 4.284444444444445e-06,
"loss": 0.0085,
"step": 3075
},
{
"epoch": 9.67238689547582,
"grad_norm": 0.8553516864776611,
"learning_rate": 4.228888888888889e-06,
"loss": 0.0101,
"step": 3100
},
{
"epoch": 9.750390015600624,
"grad_norm": 2.619405746459961,
"learning_rate": 4.173333333333334e-06,
"loss": 0.0099,
"step": 3125
},
{
"epoch": 9.82839313572543,
"grad_norm": 1.0941424369812012,
"learning_rate": 4.117777777777779e-06,
"loss": 0.0089,
"step": 3150
},
{
"epoch": 9.906396255850234,
"grad_norm": 1.0034147500991821,
"learning_rate": 4.062222222222223e-06,
"loss": 0.0087,
"step": 3175
},
{
"epoch": 9.98439937597504,
"grad_norm": 0.6879417896270752,
"learning_rate": 4.006666666666667e-06,
"loss": 0.0086,
"step": 3200
},
{
"epoch": 10.062402496099844,
"grad_norm": 0.5009737014770508,
"learning_rate": 3.951111111111112e-06,
"loss": 0.0051,
"step": 3225
},
{
"epoch": 10.14040561622465,
"grad_norm": 0.3143404424190521,
"learning_rate": 3.895555555555556e-06,
"loss": 0.0048,
"step": 3250
},
{
"epoch": 10.218408736349454,
"grad_norm": 0.44424840807914734,
"learning_rate": 3.8400000000000005e-06,
"loss": 0.0046,
"step": 3275
},
{
"epoch": 10.29641185647426,
"grad_norm": 0.18599098920822144,
"learning_rate": 3.784444444444445e-06,
"loss": 0.0039,
"step": 3300
},
{
"epoch": 10.374414976599065,
"grad_norm": 0.2974778115749359,
"learning_rate": 3.728888888888889e-06,
"loss": 0.0046,
"step": 3325
},
{
"epoch": 10.45241809672387,
"grad_norm": 0.990300178527832,
"learning_rate": 3.673333333333334e-06,
"loss": 0.0041,
"step": 3350
},
{
"epoch": 10.530421216848675,
"grad_norm": 1.2518631219863892,
"learning_rate": 3.617777777777778e-06,
"loss": 0.0048,
"step": 3375
},
{
"epoch": 10.60842433697348,
"grad_norm": 0.9894776940345764,
"learning_rate": 3.5622222222222224e-06,
"loss": 0.0053,
"step": 3400
},
{
"epoch": 10.686427457098285,
"grad_norm": 0.192794531583786,
"learning_rate": 3.5066666666666673e-06,
"loss": 0.0059,
"step": 3425
},
{
"epoch": 10.76443057722309,
"grad_norm": 0.30634793639183044,
"learning_rate": 3.4511111111111113e-06,
"loss": 0.0055,
"step": 3450
},
{
"epoch": 10.842433697347893,
"grad_norm": 0.2689841687679291,
"learning_rate": 3.3955555555555558e-06,
"loss": 0.0051,
"step": 3475
},
{
"epoch": 10.920436817472698,
"grad_norm": 2.5046868324279785,
"learning_rate": 3.3400000000000006e-06,
"loss": 0.0038,
"step": 3500
},
{
"epoch": 10.998439937597503,
"grad_norm": 0.7331063151359558,
"learning_rate": 3.2844444444444447e-06,
"loss": 0.0046,
"step": 3525
},
{
"epoch": 11.076443057722308,
"grad_norm": 0.3739769458770752,
"learning_rate": 3.228888888888889e-06,
"loss": 0.0033,
"step": 3550
},
{
"epoch": 11.154446177847113,
"grad_norm": 0.4729616343975067,
"learning_rate": 3.173333333333334e-06,
"loss": 0.0036,
"step": 3575
},
{
"epoch": 11.232449297971918,
"grad_norm": 0.15952661633491516,
"learning_rate": 3.117777777777778e-06,
"loss": 0.0029,
"step": 3600
},
{
"epoch": 11.310452418096723,
"grad_norm": 0.20027977228164673,
"learning_rate": 3.0622222222222225e-06,
"loss": 0.0036,
"step": 3625
},
{
"epoch": 11.388455538221528,
"grad_norm": 0.19324319064617157,
"learning_rate": 3.0066666666666674e-06,
"loss": 0.0033,
"step": 3650
},
{
"epoch": 11.466458658346333,
"grad_norm": 0.20121298730373383,
"learning_rate": 2.9511111111111114e-06,
"loss": 0.0022,
"step": 3675
},
{
"epoch": 11.544461778471138,
"grad_norm": 0.7968800663948059,
"learning_rate": 2.895555555555556e-06,
"loss": 0.0025,
"step": 3700
},
{
"epoch": 11.622464898595943,
"grad_norm": 0.1721133291721344,
"learning_rate": 2.84e-06,
"loss": 0.0026,
"step": 3725
},
{
"epoch": 11.700468018720748,
"grad_norm": 0.37284353375434875,
"learning_rate": 2.784444444444445e-06,
"loss": 0.0028,
"step": 3750
},
{
"epoch": 11.778471138845553,
"grad_norm": 0.1511831283569336,
"learning_rate": 2.7288888888888893e-06,
"loss": 0.0023,
"step": 3775
},
{
"epoch": 11.856474258970358,
"grad_norm": 0.10786991566419601,
"learning_rate": 2.6733333333333333e-06,
"loss": 0.0029,
"step": 3800
},
{
"epoch": 11.934477379095163,
"grad_norm": 1.2048157453536987,
"learning_rate": 2.617777777777778e-06,
"loss": 0.0032,
"step": 3825
},
{
"epoch": 12.012480499219969,
"grad_norm": 0.1635945737361908,
"learning_rate": 2.5622222222222226e-06,
"loss": 0.0026,
"step": 3850
},
{
"epoch": 12.090483619344774,
"grad_norm": 0.06904599815607071,
"learning_rate": 2.5066666666666667e-06,
"loss": 0.0026,
"step": 3875
},
{
"epoch": 12.168486739469579,
"grad_norm": 0.06420668214559555,
"learning_rate": 2.451111111111111e-06,
"loss": 0.002,
"step": 3900
},
{
"epoch": 12.246489859594384,
"grad_norm": 0.26706597208976746,
"learning_rate": 2.3955555555555556e-06,
"loss": 0.0025,
"step": 3925
},
{
"epoch": 12.324492979719189,
"grad_norm": 0.05851108580827713,
"learning_rate": 2.3400000000000005e-06,
"loss": 0.0018,
"step": 3950
},
{
"epoch": 12.402496099843994,
"grad_norm": 0.06494307518005371,
"learning_rate": 2.2844444444444445e-06,
"loss": 0.0015,
"step": 3975
},
{
"epoch": 12.480499219968799,
"grad_norm": 0.07140354067087173,
"learning_rate": 2.228888888888889e-06,
"loss": 0.0017,
"step": 4000
},
{
"epoch": 12.480499219968799,
"eval_loss": 0.8715334534645081,
"eval_runtime": 513.4704,
"eval_samples_per_second": 2.495,
"eval_steps_per_second": 0.158,
"eval_wer": 0.360260943787343,
"step": 4000
},
{
"epoch": 12.558502340093604,
"grad_norm": 0.04837729409337044,
"learning_rate": 2.1733333333333334e-06,
"loss": 0.0014,
"step": 4025
},
{
"epoch": 12.636505460218409,
"grad_norm": 0.050302207469940186,
"learning_rate": 2.117777777777778e-06,
"loss": 0.0015,
"step": 4050
},
{
"epoch": 12.714508580343214,
"grad_norm": 1.5353797674179077,
"learning_rate": 2.0622222222222223e-06,
"loss": 0.0019,
"step": 4075
},
{
"epoch": 12.792511700468019,
"grad_norm": 0.0588994063436985,
"learning_rate": 2.006666666666667e-06,
"loss": 0.0017,
"step": 4100
},
{
"epoch": 12.870514820592824,
"grad_norm": 0.05936579406261444,
"learning_rate": 1.9511111111111113e-06,
"loss": 0.002,
"step": 4125
},
{
"epoch": 12.948517940717629,
"grad_norm": 0.1404724270105362,
"learning_rate": 1.8955555555555557e-06,
"loss": 0.0023,
"step": 4150
},
{
"epoch": 13.026521060842434,
"grad_norm": 0.07803475856781006,
"learning_rate": 1.8400000000000002e-06,
"loss": 0.0013,
"step": 4175
},
{
"epoch": 13.104524180967239,
"grad_norm": 0.045549679547548294,
"learning_rate": 1.7844444444444444e-06,
"loss": 0.0013,
"step": 4200
},
{
"epoch": 13.182527301092044,
"grad_norm": 1.5323783159255981,
"learning_rate": 1.728888888888889e-06,
"loss": 0.0018,
"step": 4225
},
{
"epoch": 13.260530421216849,
"grad_norm": 0.0453680157661438,
"learning_rate": 1.6733333333333335e-06,
"loss": 0.0011,
"step": 4250
},
{
"epoch": 13.338533541341654,
"grad_norm": 0.16936659812927246,
"learning_rate": 1.6177777777777778e-06,
"loss": 0.0012,
"step": 4275
},
{
"epoch": 13.41653666146646,
"grad_norm": 0.03871113434433937,
"learning_rate": 1.5622222222222225e-06,
"loss": 0.0013,
"step": 4300
},
{
"epoch": 13.494539781591264,
"grad_norm": 0.10738777369260788,
"learning_rate": 1.506666666666667e-06,
"loss": 0.0011,
"step": 4325
},
{
"epoch": 13.57254290171607,
"grad_norm": 0.047905419021844864,
"learning_rate": 1.4511111111111112e-06,
"loss": 0.001,
"step": 4350
},
{
"epoch": 13.650546021840874,
"grad_norm": 0.04242739453911781,
"learning_rate": 1.3955555555555556e-06,
"loss": 0.0013,
"step": 4375
},
{
"epoch": 13.72854914196568,
"grad_norm": 0.04559892416000366,
"learning_rate": 1.34e-06,
"loss": 0.0012,
"step": 4400
},
{
"epoch": 13.806552262090484,
"grad_norm": 0.030529584735631943,
"learning_rate": 1.2844444444444445e-06,
"loss": 0.0011,
"step": 4425
},
{
"epoch": 13.88455538221529,
"grad_norm": 0.03567594662308693,
"learning_rate": 1.228888888888889e-06,
"loss": 0.001,
"step": 4450
},
{
"epoch": 13.962558502340094,
"grad_norm": 0.04149959608912468,
"learning_rate": 1.1733333333333335e-06,
"loss": 0.0011,
"step": 4475
},
{
"epoch": 14.0405616224649,
"grad_norm": 0.0304497629404068,
"learning_rate": 1.117777777777778e-06,
"loss": 0.0009,
"step": 4500
},
{
"epoch": 14.118564742589703,
"grad_norm": 0.028642022982239723,
"learning_rate": 1.0622222222222222e-06,
"loss": 0.0009,
"step": 4525
},
{
"epoch": 14.196567862714508,
"grad_norm": 0.02880062349140644,
"learning_rate": 1.0066666666666668e-06,
"loss": 0.0009,
"step": 4550
},
{
"epoch": 14.274570982839313,
"grad_norm": 0.03172925487160683,
"learning_rate": 9.511111111111111e-07,
"loss": 0.0014,
"step": 4575
},
{
"epoch": 14.352574102964118,
"grad_norm": 0.02901492826640606,
"learning_rate": 8.955555555555557e-07,
"loss": 0.0009,
"step": 4600
},
{
"epoch": 14.430577223088923,
"grad_norm": 0.027313807979226112,
"learning_rate": 8.400000000000001e-07,
"loss": 0.0009,
"step": 4625
},
{
"epoch": 14.508580343213728,
"grad_norm": 0.03275463357567787,
"learning_rate": 7.844444444444445e-07,
"loss": 0.0011,
"step": 4650
},
{
"epoch": 14.586583463338533,
"grad_norm": 0.02867468073964119,
"learning_rate": 7.28888888888889e-07,
"loss": 0.0009,
"step": 4675
},
{
"epoch": 14.664586583463338,
"grad_norm": 0.02795287035405636,
"learning_rate": 6.733333333333334e-07,
"loss": 0.0009,
"step": 4700
},
{
"epoch": 14.742589703588143,
"grad_norm": 0.033373042941093445,
"learning_rate": 6.177777777777778e-07,
"loss": 0.0009,
"step": 4725
},
{
"epoch": 14.820592823712948,
"grad_norm": 0.10227759182453156,
"learning_rate": 5.622222222222223e-07,
"loss": 0.001,
"step": 4750
},
{
"epoch": 14.898595943837753,
"grad_norm": 0.03362324461340904,
"learning_rate": 5.066666666666667e-07,
"loss": 0.0009,
"step": 4775
},
{
"epoch": 14.976599063962558,
"grad_norm": 0.02761083096265793,
"learning_rate": 4.511111111111111e-07,
"loss": 0.0009,
"step": 4800
},
{
"epoch": 15.054602184087363,
"grad_norm": 0.030326733365654945,
"learning_rate": 3.9555555555555557e-07,
"loss": 0.0009,
"step": 4825
},
{
"epoch": 15.132605304212168,
"grad_norm": 0.02800353430211544,
"learning_rate": 3.4000000000000003e-07,
"loss": 0.0008,
"step": 4850
},
{
"epoch": 15.210608424336973,
"grad_norm": 0.033761948347091675,
"learning_rate": 2.844444444444445e-07,
"loss": 0.0008,
"step": 4875
},
{
"epoch": 15.288611544461778,
"grad_norm": 0.030224645510315895,
"learning_rate": 2.2888888888888892e-07,
"loss": 0.0008,
"step": 4900
},
{
"epoch": 15.366614664586583,
"grad_norm": 0.023724382743239403,
"learning_rate": 1.7333333333333335e-07,
"loss": 0.0008,
"step": 4925
},
{
"epoch": 15.444617784711388,
"grad_norm": 0.03140026330947876,
"learning_rate": 1.1777777777777778e-07,
"loss": 0.0009,
"step": 4950
},
{
"epoch": 15.522620904836193,
"grad_norm": 0.03177293762564659,
"learning_rate": 6.222222222222223e-08,
"loss": 0.0011,
"step": 4975
},
{
"epoch": 15.600624024960998,
"grad_norm": 0.023135744035243988,
"learning_rate": 6.666666666666667e-09,
"loss": 0.0008,
"step": 5000
},
{
"epoch": 15.600624024960998,
"eval_loss": 0.9218041896820068,
"eval_runtime": 514.1223,
"eval_samples_per_second": 2.492,
"eval_steps_per_second": 0.158,
"eval_wer": 0.35574929886599194,
"step": 5000
},
{
"epoch": 15.600624024960998,
"step": 5000,
"total_flos": 3.39293344978944e+20,
"train_loss": 0.13203169204816223,
"train_runtime": 41579.1778,
"train_samples_per_second": 3.848,
"train_steps_per_second": 0.12
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 16,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.39293344978944e+20,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}