whisper-base-zh / checkpoint-4000 /trainer_state.json
dongim04's picture
Initial model upload
14fffed verified
{
"best_metric": 86.22448979591837,
"best_model_checkpoint": "./whisper-large-lt/checkpoint-2000",
"epoch": 8.281573498964804,
"eval_steps": 1000,
"global_step": 4000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.051759834368530024,
"grad_norm": 31.699411392211914,
"learning_rate": 4.6000000000000004e-07,
"loss": 2.2847,
"step": 25
},
{
"epoch": 0.10351966873706005,
"grad_norm": 21.853273391723633,
"learning_rate": 9.400000000000001e-07,
"loss": 2.0241,
"step": 50
},
{
"epoch": 0.15527950310559005,
"grad_norm": 20.82790756225586,
"learning_rate": 1.44e-06,
"loss": 1.5189,
"step": 75
},
{
"epoch": 0.2070393374741201,
"grad_norm": 10.164386749267578,
"learning_rate": 1.94e-06,
"loss": 0.9866,
"step": 100
},
{
"epoch": 0.2587991718426501,
"grad_norm": 8.86418342590332,
"learning_rate": 2.4400000000000004e-06,
"loss": 0.7653,
"step": 125
},
{
"epoch": 0.3105590062111801,
"grad_norm": 8.09189224243164,
"learning_rate": 2.9400000000000002e-06,
"loss": 0.6515,
"step": 150
},
{
"epoch": 0.36231884057971014,
"grad_norm": 8.163768768310547,
"learning_rate": 3.44e-06,
"loss": 0.553,
"step": 175
},
{
"epoch": 0.4140786749482402,
"grad_norm": 7.93491792678833,
"learning_rate": 3.94e-06,
"loss": 0.5089,
"step": 200
},
{
"epoch": 0.4658385093167702,
"grad_norm": 8.422746658325195,
"learning_rate": 4.440000000000001e-06,
"loss": 0.4928,
"step": 225
},
{
"epoch": 0.5175983436853002,
"grad_norm": 7.056028366088867,
"learning_rate": 4.94e-06,
"loss": 0.4955,
"step": 250
},
{
"epoch": 0.5693581780538303,
"grad_norm": 7.153671741485596,
"learning_rate": 5.4400000000000004e-06,
"loss": 0.4869,
"step": 275
},
{
"epoch": 0.6211180124223602,
"grad_norm": 8.896004676818848,
"learning_rate": 5.94e-06,
"loss": 0.4647,
"step": 300
},
{
"epoch": 0.6728778467908902,
"grad_norm": 7.931967735290527,
"learning_rate": 6.440000000000001e-06,
"loss": 0.4588,
"step": 325
},
{
"epoch": 0.7246376811594203,
"grad_norm": 9.128633499145508,
"learning_rate": 6.9400000000000005e-06,
"loss": 0.4467,
"step": 350
},
{
"epoch": 0.7763975155279503,
"grad_norm": 9.321606636047363,
"learning_rate": 7.440000000000001e-06,
"loss": 0.4442,
"step": 375
},
{
"epoch": 0.8281573498964804,
"grad_norm": 6.722208023071289,
"learning_rate": 7.94e-06,
"loss": 0.414,
"step": 400
},
{
"epoch": 0.8799171842650103,
"grad_norm": 6.496078968048096,
"learning_rate": 8.44e-06,
"loss": 0.4067,
"step": 425
},
{
"epoch": 0.9316770186335404,
"grad_norm": 8.029313087463379,
"learning_rate": 8.94e-06,
"loss": 0.4352,
"step": 450
},
{
"epoch": 0.9834368530020704,
"grad_norm": 7.314033508300781,
"learning_rate": 9.440000000000001e-06,
"loss": 0.425,
"step": 475
},
{
"epoch": 1.0351966873706004,
"grad_norm": 7.7378740310668945,
"learning_rate": 9.940000000000001e-06,
"loss": 0.3573,
"step": 500
},
{
"epoch": 1.0869565217391304,
"grad_norm": 6.325808525085449,
"learning_rate": 9.937142857142858e-06,
"loss": 0.3305,
"step": 525
},
{
"epoch": 1.1387163561076605,
"grad_norm": 7.014776706695557,
"learning_rate": 9.865714285714285e-06,
"loss": 0.3432,
"step": 550
},
{
"epoch": 1.1904761904761905,
"grad_norm": 7.104799747467041,
"learning_rate": 9.794285714285714e-06,
"loss": 0.3428,
"step": 575
},
{
"epoch": 1.2422360248447206,
"grad_norm": 8.842447280883789,
"learning_rate": 9.722857142857143e-06,
"loss": 0.3332,
"step": 600
},
{
"epoch": 1.2939958592132506,
"grad_norm": 4.9119672775268555,
"learning_rate": 9.651428571428572e-06,
"loss": 0.3595,
"step": 625
},
{
"epoch": 1.3457556935817805,
"grad_norm": 6.631478309631348,
"learning_rate": 9.58e-06,
"loss": 0.3273,
"step": 650
},
{
"epoch": 1.3975155279503104,
"grad_norm": 6.498466491699219,
"learning_rate": 9.508571428571429e-06,
"loss": 0.3274,
"step": 675
},
{
"epoch": 1.4492753623188406,
"grad_norm": 6.38285493850708,
"learning_rate": 9.437142857142858e-06,
"loss": 0.3156,
"step": 700
},
{
"epoch": 1.5010351966873707,
"grad_norm": 6.786648273468018,
"learning_rate": 9.365714285714287e-06,
"loss": 0.3161,
"step": 725
},
{
"epoch": 1.5527950310559007,
"grad_norm": 6.803364276885986,
"learning_rate": 9.294285714285714e-06,
"loss": 0.3287,
"step": 750
},
{
"epoch": 1.6045548654244306,
"grad_norm": 7.055851936340332,
"learning_rate": 9.222857142857143e-06,
"loss": 0.334,
"step": 775
},
{
"epoch": 1.6563146997929605,
"grad_norm": 5.978209495544434,
"learning_rate": 9.151428571428572e-06,
"loss": 0.3276,
"step": 800
},
{
"epoch": 1.7080745341614907,
"grad_norm": 8.85584831237793,
"learning_rate": 9.080000000000001e-06,
"loss": 0.3138,
"step": 825
},
{
"epoch": 1.7598343685300208,
"grad_norm": 6.784140110015869,
"learning_rate": 9.00857142857143e-06,
"loss": 0.3167,
"step": 850
},
{
"epoch": 1.8115942028985508,
"grad_norm": 8.701028823852539,
"learning_rate": 8.937142857142857e-06,
"loss": 0.3061,
"step": 875
},
{
"epoch": 1.8633540372670807,
"grad_norm": 6.928929328918457,
"learning_rate": 8.865714285714287e-06,
"loss": 0.3172,
"step": 900
},
{
"epoch": 1.9151138716356106,
"grad_norm": 8.34626579284668,
"learning_rate": 8.794285714285716e-06,
"loss": 0.3105,
"step": 925
},
{
"epoch": 1.9668737060041408,
"grad_norm": 7.861119270324707,
"learning_rate": 8.722857142857145e-06,
"loss": 0.3212,
"step": 950
},
{
"epoch": 2.018633540372671,
"grad_norm": 5.226954936981201,
"learning_rate": 8.651428571428572e-06,
"loss": 0.2669,
"step": 975
},
{
"epoch": 2.070393374741201,
"grad_norm": 6.24221134185791,
"learning_rate": 8.580000000000001e-06,
"loss": 0.2036,
"step": 1000
},
{
"epoch": 2.070393374741201,
"eval_loss": 0.37555259466171265,
"eval_runtime": 551.1751,
"eval_samples_per_second": 3.503,
"eval_steps_per_second": 0.439,
"eval_wer": 86.58163265306122,
"step": 1000
},
{
"epoch": 2.122153209109731,
"grad_norm": 3.854379415512085,
"learning_rate": 8.50857142857143e-06,
"loss": 0.203,
"step": 1025
},
{
"epoch": 2.1739130434782608,
"grad_norm": 4.169343948364258,
"learning_rate": 8.437142857142859e-06,
"loss": 0.23,
"step": 1050
},
{
"epoch": 2.2256728778467907,
"grad_norm": 5.8532633781433105,
"learning_rate": 8.365714285714286e-06,
"loss": 0.1985,
"step": 1075
},
{
"epoch": 2.277432712215321,
"grad_norm": 4.382779598236084,
"learning_rate": 8.294285714285715e-06,
"loss": 0.1997,
"step": 1100
},
{
"epoch": 2.329192546583851,
"grad_norm": 6.208737850189209,
"learning_rate": 8.222857142857144e-06,
"loss": 0.2066,
"step": 1125
},
{
"epoch": 2.380952380952381,
"grad_norm": 5.715628623962402,
"learning_rate": 8.151428571428572e-06,
"loss": 0.2165,
"step": 1150
},
{
"epoch": 2.432712215320911,
"grad_norm": 5.7561445236206055,
"learning_rate": 8.08e-06,
"loss": 0.2176,
"step": 1175
},
{
"epoch": 2.4844720496894412,
"grad_norm": 6.207643508911133,
"learning_rate": 8.00857142857143e-06,
"loss": 0.2075,
"step": 1200
},
{
"epoch": 2.536231884057971,
"grad_norm": 5.213014125823975,
"learning_rate": 7.937142857142857e-06,
"loss": 0.2041,
"step": 1225
},
{
"epoch": 2.587991718426501,
"grad_norm": 5.595193862915039,
"learning_rate": 7.865714285714286e-06,
"loss": 0.205,
"step": 1250
},
{
"epoch": 2.639751552795031,
"grad_norm": 5.534831523895264,
"learning_rate": 7.794285714285715e-06,
"loss": 0.2097,
"step": 1275
},
{
"epoch": 2.691511387163561,
"grad_norm": 5.9356231689453125,
"learning_rate": 7.722857142857142e-06,
"loss": 0.2209,
"step": 1300
},
{
"epoch": 2.7432712215320914,
"grad_norm": 4.567516326904297,
"learning_rate": 7.651428571428571e-06,
"loss": 0.1915,
"step": 1325
},
{
"epoch": 2.795031055900621,
"grad_norm": 5.693762302398682,
"learning_rate": 7.58e-06,
"loss": 0.1972,
"step": 1350
},
{
"epoch": 2.846790890269151,
"grad_norm": 5.581110000610352,
"learning_rate": 7.508571428571429e-06,
"loss": 0.2186,
"step": 1375
},
{
"epoch": 2.898550724637681,
"grad_norm": 5.270753860473633,
"learning_rate": 7.4371428571428575e-06,
"loss": 0.21,
"step": 1400
},
{
"epoch": 2.950310559006211,
"grad_norm": 6.520173072814941,
"learning_rate": 7.365714285714286e-06,
"loss": 0.2235,
"step": 1425
},
{
"epoch": 3.002070393374741,
"grad_norm": 3.4095804691314697,
"learning_rate": 7.294285714285715e-06,
"loss": 0.1991,
"step": 1450
},
{
"epoch": 3.0538302277432714,
"grad_norm": 4.170091152191162,
"learning_rate": 7.222857142857144e-06,
"loss": 0.1364,
"step": 1475
},
{
"epoch": 3.1055900621118013,
"grad_norm": 5.475827217102051,
"learning_rate": 7.151428571428573e-06,
"loss": 0.1373,
"step": 1500
},
{
"epoch": 3.1573498964803313,
"grad_norm": 5.360046863555908,
"learning_rate": 7.08e-06,
"loss": 0.1431,
"step": 1525
},
{
"epoch": 3.209109730848861,
"grad_norm": 3.8683834075927734,
"learning_rate": 7.008571428571429e-06,
"loss": 0.1315,
"step": 1550
},
{
"epoch": 3.260869565217391,
"grad_norm": 4.890464782714844,
"learning_rate": 6.937142857142858e-06,
"loss": 0.1394,
"step": 1575
},
{
"epoch": 3.3126293995859215,
"grad_norm": 4.547149181365967,
"learning_rate": 6.865714285714287e-06,
"loss": 0.1339,
"step": 1600
},
{
"epoch": 3.3643892339544514,
"grad_norm": 3.74664044380188,
"learning_rate": 6.794285714285714e-06,
"loss": 0.1369,
"step": 1625
},
{
"epoch": 3.4161490683229814,
"grad_norm": 5.266635894775391,
"learning_rate": 6.722857142857143e-06,
"loss": 0.1419,
"step": 1650
},
{
"epoch": 3.4679089026915113,
"grad_norm": 3.839707374572754,
"learning_rate": 6.651428571428572e-06,
"loss": 0.1296,
"step": 1675
},
{
"epoch": 3.5196687370600412,
"grad_norm": 4.542924404144287,
"learning_rate": 6.5800000000000005e-06,
"loss": 0.1379,
"step": 1700
},
{
"epoch": 3.571428571428571,
"grad_norm": 3.216632127761841,
"learning_rate": 6.5085714285714295e-06,
"loss": 0.1312,
"step": 1725
},
{
"epoch": 3.6231884057971016,
"grad_norm": 4.014632701873779,
"learning_rate": 6.437142857142858e-06,
"loss": 0.1365,
"step": 1750
},
{
"epoch": 3.6749482401656315,
"grad_norm": 4.811014175415039,
"learning_rate": 6.365714285714286e-06,
"loss": 0.1304,
"step": 1775
},
{
"epoch": 3.7267080745341614,
"grad_norm": 3.7714288234710693,
"learning_rate": 6.294285714285715e-06,
"loss": 0.1525,
"step": 1800
},
{
"epoch": 3.7784679089026914,
"grad_norm": 3.39595103263855,
"learning_rate": 6.222857142857144e-06,
"loss": 0.1428,
"step": 1825
},
{
"epoch": 3.8302277432712213,
"grad_norm": 5.35399866104126,
"learning_rate": 6.151428571428571e-06,
"loss": 0.1338,
"step": 1850
},
{
"epoch": 3.8819875776397517,
"grad_norm": 4.488447666168213,
"learning_rate": 6.08e-06,
"loss": 0.1371,
"step": 1875
},
{
"epoch": 3.9337474120082816,
"grad_norm": 4.74454402923584,
"learning_rate": 6.008571428571429e-06,
"loss": 0.1382,
"step": 1900
},
{
"epoch": 3.9855072463768115,
"grad_norm": 5.4989190101623535,
"learning_rate": 5.937142857142858e-06,
"loss": 0.1443,
"step": 1925
},
{
"epoch": 4.037267080745342,
"grad_norm": 4.14027214050293,
"learning_rate": 5.865714285714286e-06,
"loss": 0.1094,
"step": 1950
},
{
"epoch": 4.089026915113871,
"grad_norm": 2.9087343215942383,
"learning_rate": 5.794285714285715e-06,
"loss": 0.0856,
"step": 1975
},
{
"epoch": 4.140786749482402,
"grad_norm": 3.901829242706299,
"learning_rate": 5.722857142857144e-06,
"loss": 0.0885,
"step": 2000
},
{
"epoch": 4.140786749482402,
"eval_loss": 0.3902941644191742,
"eval_runtime": 550.9002,
"eval_samples_per_second": 3.505,
"eval_steps_per_second": 0.439,
"eval_wer": 86.22448979591837,
"step": 2000
},
{
"epoch": 4.192546583850931,
"grad_norm": 3.7913148403167725,
"learning_rate": 5.651428571428572e-06,
"loss": 0.0895,
"step": 2025
},
{
"epoch": 4.244306418219462,
"grad_norm": 3.6157302856445312,
"learning_rate": 5.580000000000001e-06,
"loss": 0.0863,
"step": 2050
},
{
"epoch": 4.296066252587992,
"grad_norm": 3.6557390689849854,
"learning_rate": 5.508571428571429e-06,
"loss": 0.0929,
"step": 2075
},
{
"epoch": 4.3478260869565215,
"grad_norm": 3.9819066524505615,
"learning_rate": 5.437142857142857e-06,
"loss": 0.0935,
"step": 2100
},
{
"epoch": 4.399585921325052,
"grad_norm": 3.2281110286712646,
"learning_rate": 5.365714285714286e-06,
"loss": 0.0909,
"step": 2125
},
{
"epoch": 4.451345755693581,
"grad_norm": 4.9514546394348145,
"learning_rate": 5.294285714285715e-06,
"loss": 0.0897,
"step": 2150
},
{
"epoch": 4.503105590062112,
"grad_norm": 3.684457302093506,
"learning_rate": 5.2228571428571425e-06,
"loss": 0.1066,
"step": 2175
},
{
"epoch": 4.554865424430642,
"grad_norm": 4.545319557189941,
"learning_rate": 5.1514285714285715e-06,
"loss": 0.0976,
"step": 2200
},
{
"epoch": 4.606625258799172,
"grad_norm": 4.822242736816406,
"learning_rate": 5.0800000000000005e-06,
"loss": 0.0859,
"step": 2225
},
{
"epoch": 4.658385093167702,
"grad_norm": 4.011146068572998,
"learning_rate": 5.0085714285714295e-06,
"loss": 0.0931,
"step": 2250
},
{
"epoch": 4.710144927536232,
"grad_norm": 3.5186495780944824,
"learning_rate": 4.937142857142858e-06,
"loss": 0.0928,
"step": 2275
},
{
"epoch": 4.761904761904762,
"grad_norm": 4.684374809265137,
"learning_rate": 4.865714285714287e-06,
"loss": 0.0815,
"step": 2300
},
{
"epoch": 4.813664596273292,
"grad_norm": 2.7669050693511963,
"learning_rate": 4.794285714285715e-06,
"loss": 0.086,
"step": 2325
},
{
"epoch": 4.865424430641822,
"grad_norm": 3.66933012008667,
"learning_rate": 4.722857142857144e-06,
"loss": 0.0932,
"step": 2350
},
{
"epoch": 4.917184265010352,
"grad_norm": 4.243403911590576,
"learning_rate": 4.651428571428572e-06,
"loss": 0.0955,
"step": 2375
},
{
"epoch": 4.9689440993788825,
"grad_norm": 3.056316375732422,
"learning_rate": 4.58e-06,
"loss": 0.0845,
"step": 2400
},
{
"epoch": 5.020703933747412,
"grad_norm": 4.136480331420898,
"learning_rate": 4.508571428571429e-06,
"loss": 0.0847,
"step": 2425
},
{
"epoch": 5.072463768115942,
"grad_norm": 2.3009705543518066,
"learning_rate": 4.437142857142857e-06,
"loss": 0.0623,
"step": 2450
},
{
"epoch": 5.124223602484472,
"grad_norm": 2.838564395904541,
"learning_rate": 4.3657142857142855e-06,
"loss": 0.0602,
"step": 2475
},
{
"epoch": 5.175983436853002,
"grad_norm": 4.828927040100098,
"learning_rate": 4.2942857142857146e-06,
"loss": 0.0536,
"step": 2500
},
{
"epoch": 5.227743271221532,
"grad_norm": 2.5556607246398926,
"learning_rate": 4.222857142857143e-06,
"loss": 0.0538,
"step": 2525
},
{
"epoch": 5.279503105590062,
"grad_norm": 3.7536535263061523,
"learning_rate": 4.151428571428572e-06,
"loss": 0.0601,
"step": 2550
},
{
"epoch": 5.3312629399585925,
"grad_norm": 3.1169402599334717,
"learning_rate": 4.08e-06,
"loss": 0.0599,
"step": 2575
},
{
"epoch": 5.383022774327122,
"grad_norm": 4.849262237548828,
"learning_rate": 4.008571428571429e-06,
"loss": 0.064,
"step": 2600
},
{
"epoch": 5.434782608695652,
"grad_norm": 2.763498306274414,
"learning_rate": 3.937142857142858e-06,
"loss": 0.0615,
"step": 2625
},
{
"epoch": 5.486542443064182,
"grad_norm": 4.436384201049805,
"learning_rate": 3.865714285714286e-06,
"loss": 0.0654,
"step": 2650
},
{
"epoch": 5.538302277432712,
"grad_norm": 4.356779098510742,
"learning_rate": 3.7942857142857147e-06,
"loss": 0.0613,
"step": 2675
},
{
"epoch": 5.590062111801243,
"grad_norm": 2.862515449523926,
"learning_rate": 3.722857142857143e-06,
"loss": 0.0632,
"step": 2700
},
{
"epoch": 5.641821946169772,
"grad_norm": 3.0462210178375244,
"learning_rate": 3.651428571428572e-06,
"loss": 0.0629,
"step": 2725
},
{
"epoch": 5.693581780538302,
"grad_norm": 2.4727702140808105,
"learning_rate": 3.58e-06,
"loss": 0.0588,
"step": 2750
},
{
"epoch": 5.745341614906832,
"grad_norm": 3.399616241455078,
"learning_rate": 3.508571428571429e-06,
"loss": 0.0576,
"step": 2775
},
{
"epoch": 5.797101449275362,
"grad_norm": 4.195678234100342,
"learning_rate": 3.437142857142857e-06,
"loss": 0.0574,
"step": 2800
},
{
"epoch": 5.848861283643893,
"grad_norm": 2.242182970046997,
"learning_rate": 3.3657142857142862e-06,
"loss": 0.065,
"step": 2825
},
{
"epoch": 5.900621118012422,
"grad_norm": 4.623630046844482,
"learning_rate": 3.2942857142857144e-06,
"loss": 0.0586,
"step": 2850
},
{
"epoch": 5.9523809523809526,
"grad_norm": 2.815309762954712,
"learning_rate": 3.222857142857143e-06,
"loss": 0.0618,
"step": 2875
},
{
"epoch": 6.004140786749482,
"grad_norm": 3.130713701248169,
"learning_rate": 3.151428571428572e-06,
"loss": 0.0559,
"step": 2900
},
{
"epoch": 6.055900621118012,
"grad_norm": 2.457791566848755,
"learning_rate": 3.08e-06,
"loss": 0.04,
"step": 2925
},
{
"epoch": 6.107660455486543,
"grad_norm": 2.183882236480713,
"learning_rate": 3.008571428571429e-06,
"loss": 0.0395,
"step": 2950
},
{
"epoch": 6.159420289855072,
"grad_norm": 2.6089022159576416,
"learning_rate": 2.9371428571428573e-06,
"loss": 0.0352,
"step": 2975
},
{
"epoch": 6.211180124223603,
"grad_norm": 2.3659653663635254,
"learning_rate": 2.865714285714286e-06,
"loss": 0.0367,
"step": 3000
},
{
"epoch": 6.211180124223603,
"eval_loss": 0.4294891357421875,
"eval_runtime": 550.0141,
"eval_samples_per_second": 3.511,
"eval_steps_per_second": 0.44,
"eval_wer": 86.88775510204081,
"step": 3000
},
{
"epoch": 6.262939958592132,
"grad_norm": 3.4095540046691895,
"learning_rate": 2.7942857142857145e-06,
"loss": 0.0434,
"step": 3025
},
{
"epoch": 6.3146997929606625,
"grad_norm": 1.9496163129806519,
"learning_rate": 2.722857142857143e-06,
"loss": 0.0414,
"step": 3050
},
{
"epoch": 6.366459627329193,
"grad_norm": 2.4933230876922607,
"learning_rate": 2.6514285714285713e-06,
"loss": 0.0402,
"step": 3075
},
{
"epoch": 6.418219461697722,
"grad_norm": 2.609180212020874,
"learning_rate": 2.5800000000000003e-06,
"loss": 0.0437,
"step": 3100
},
{
"epoch": 6.469979296066253,
"grad_norm": 2.446335792541504,
"learning_rate": 2.5085714285714285e-06,
"loss": 0.0379,
"step": 3125
},
{
"epoch": 6.521739130434782,
"grad_norm": 2.933326482772827,
"learning_rate": 2.4371428571428575e-06,
"loss": 0.0411,
"step": 3150
},
{
"epoch": 6.573498964803313,
"grad_norm": 2.2033092975616455,
"learning_rate": 2.365714285714286e-06,
"loss": 0.0401,
"step": 3175
},
{
"epoch": 6.625258799171843,
"grad_norm": 2.4697012901306152,
"learning_rate": 2.2942857142857146e-06,
"loss": 0.0403,
"step": 3200
},
{
"epoch": 6.6770186335403725,
"grad_norm": 3.1077818870544434,
"learning_rate": 2.222857142857143e-06,
"loss": 0.0434,
"step": 3225
},
{
"epoch": 6.728778467908903,
"grad_norm": 2.0225911140441895,
"learning_rate": 2.1514285714285714e-06,
"loss": 0.0417,
"step": 3250
},
{
"epoch": 6.780538302277432,
"grad_norm": 2.710073471069336,
"learning_rate": 2.08e-06,
"loss": 0.0374,
"step": 3275
},
{
"epoch": 6.832298136645963,
"grad_norm": 1.8564422130584717,
"learning_rate": 2.0085714285714286e-06,
"loss": 0.0419,
"step": 3300
},
{
"epoch": 6.884057971014493,
"grad_norm": 1.7741810083389282,
"learning_rate": 1.9371428571428576e-06,
"loss": 0.043,
"step": 3325
},
{
"epoch": 6.935817805383023,
"grad_norm": 2.3681976795196533,
"learning_rate": 1.865714285714286e-06,
"loss": 0.0408,
"step": 3350
},
{
"epoch": 6.987577639751553,
"grad_norm": 3.068981885910034,
"learning_rate": 1.7942857142857146e-06,
"loss": 0.0401,
"step": 3375
},
{
"epoch": 7.0393374741200825,
"grad_norm": 1.8063828945159912,
"learning_rate": 1.7228571428571432e-06,
"loss": 0.0287,
"step": 3400
},
{
"epoch": 7.091097308488613,
"grad_norm": 2.1350810527801514,
"learning_rate": 1.6514285714285715e-06,
"loss": 0.0308,
"step": 3425
},
{
"epoch": 7.142857142857143,
"grad_norm": 1.8076541423797607,
"learning_rate": 1.5800000000000001e-06,
"loss": 0.0279,
"step": 3450
},
{
"epoch": 7.194616977225673,
"grad_norm": 1.7828601598739624,
"learning_rate": 1.5085714285714287e-06,
"loss": 0.0295,
"step": 3475
},
{
"epoch": 7.246376811594203,
"grad_norm": 1.9913222789764404,
"learning_rate": 1.4371428571428573e-06,
"loss": 0.032,
"step": 3500
},
{
"epoch": 7.298136645962733,
"grad_norm": 3.143350839614868,
"learning_rate": 1.3657142857142857e-06,
"loss": 0.0307,
"step": 3525
},
{
"epoch": 7.349896480331263,
"grad_norm": 1.6706522703170776,
"learning_rate": 1.2942857142857143e-06,
"loss": 0.0261,
"step": 3550
},
{
"epoch": 7.401656314699793,
"grad_norm": 0.9704394936561584,
"learning_rate": 1.222857142857143e-06,
"loss": 0.0261,
"step": 3575
},
{
"epoch": 7.453416149068323,
"grad_norm": 1.450588345527649,
"learning_rate": 1.1514285714285714e-06,
"loss": 0.0295,
"step": 3600
},
{
"epoch": 7.505175983436853,
"grad_norm": 2.030329942703247,
"learning_rate": 1.08e-06,
"loss": 0.0255,
"step": 3625
},
{
"epoch": 7.556935817805383,
"grad_norm": 1.2898095846176147,
"learning_rate": 1.0085714285714286e-06,
"loss": 0.0291,
"step": 3650
},
{
"epoch": 7.608695652173913,
"grad_norm": 1.5555272102355957,
"learning_rate": 9.371428571428571e-07,
"loss": 0.0259,
"step": 3675
},
{
"epoch": 7.660455486542443,
"grad_norm": 2.2604877948760986,
"learning_rate": 8.657142857142858e-07,
"loss": 0.0308,
"step": 3700
},
{
"epoch": 7.712215320910973,
"grad_norm": 2.4159529209136963,
"learning_rate": 7.942857142857144e-07,
"loss": 0.0324,
"step": 3725
},
{
"epoch": 7.763975155279503,
"grad_norm": 2.122925043106079,
"learning_rate": 7.228571428571429e-07,
"loss": 0.0331,
"step": 3750
},
{
"epoch": 7.815734989648033,
"grad_norm": 2.790031909942627,
"learning_rate": 6.514285714285715e-07,
"loss": 0.0271,
"step": 3775
},
{
"epoch": 7.867494824016563,
"grad_norm": 2.1916027069091797,
"learning_rate": 5.800000000000001e-07,
"loss": 0.0286,
"step": 3800
},
{
"epoch": 7.919254658385093,
"grad_norm": 1.8285205364227295,
"learning_rate": 5.085714285714286e-07,
"loss": 0.0258,
"step": 3825
},
{
"epoch": 7.971014492753623,
"grad_norm": 2.5326058864593506,
"learning_rate": 4.371428571428572e-07,
"loss": 0.0288,
"step": 3850
},
{
"epoch": 8.022774327122153,
"grad_norm": 1.6372758150100708,
"learning_rate": 3.657142857142858e-07,
"loss": 0.0244,
"step": 3875
},
{
"epoch": 8.074534161490684,
"grad_norm": 2.088735342025757,
"learning_rate": 2.942857142857143e-07,
"loss": 0.0229,
"step": 3900
},
{
"epoch": 8.126293995859212,
"grad_norm": 1.248395562171936,
"learning_rate": 2.228571428571429e-07,
"loss": 0.0269,
"step": 3925
},
{
"epoch": 8.178053830227743,
"grad_norm": 1.8508952856063843,
"learning_rate": 1.5142857142857144e-07,
"loss": 0.0205,
"step": 3950
},
{
"epoch": 8.229813664596273,
"grad_norm": 1.0671072006225586,
"learning_rate": 8e-08,
"loss": 0.0241,
"step": 3975
},
{
"epoch": 8.281573498964804,
"grad_norm": 1.9898015260696411,
"learning_rate": 8.571428571428572e-09,
"loss": 0.0242,
"step": 4000
},
{
"epoch": 8.281573498964804,
"eval_loss": 0.45003581047058105,
"eval_runtime": 550.1722,
"eval_samples_per_second": 3.51,
"eval_steps_per_second": 0.44,
"eval_wer": 87.24489795918367,
"step": 4000
}
],
"logging_steps": 25,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.14688871448576e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}