whisper-large-v3-ft-cv-cy / trainer_state.json
DewiBrynJones's picture
End of training
93237c9 verified
raw
history blame
36.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 7.072135785007072,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03536067892503536,
"grad_norm": 5.942479610443115,
"learning_rate": 5.000000000000001e-07,
"loss": 1.024,
"step": 25
},
{
"epoch": 0.07072135785007072,
"grad_norm": 4.6511549949646,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.7269,
"step": 50
},
{
"epoch": 0.10608203677510608,
"grad_norm": 3.9613099098205566,
"learning_rate": 1.5e-06,
"loss": 0.5419,
"step": 75
},
{
"epoch": 0.14144271570014144,
"grad_norm": 4.0183186531066895,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.4772,
"step": 100
},
{
"epoch": 0.1768033946251768,
"grad_norm": 4.522441864013672,
"learning_rate": 2.5e-06,
"loss": 0.449,
"step": 125
},
{
"epoch": 0.21216407355021216,
"grad_norm": 3.861356258392334,
"learning_rate": 3e-06,
"loss": 0.4327,
"step": 150
},
{
"epoch": 0.24752475247524752,
"grad_norm": 4.831862449645996,
"learning_rate": 3.5e-06,
"loss": 0.4107,
"step": 175
},
{
"epoch": 0.2828854314002829,
"grad_norm": 3.336674690246582,
"learning_rate": 4.000000000000001e-06,
"loss": 0.4043,
"step": 200
},
{
"epoch": 0.31824611032531824,
"grad_norm": 4.305329322814941,
"learning_rate": 4.5e-06,
"loss": 0.3914,
"step": 225
},
{
"epoch": 0.3536067892503536,
"grad_norm": 3.83046817779541,
"learning_rate": 5e-06,
"loss": 0.3685,
"step": 250
},
{
"epoch": 0.38896746817538896,
"grad_norm": 3.484257698059082,
"learning_rate": 5.500000000000001e-06,
"loss": 0.351,
"step": 275
},
{
"epoch": 0.4243281471004243,
"grad_norm": 3.844430923461914,
"learning_rate": 6e-06,
"loss": 0.3629,
"step": 300
},
{
"epoch": 0.4596888260254597,
"grad_norm": 4.026344299316406,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.3662,
"step": 325
},
{
"epoch": 0.49504950495049505,
"grad_norm": 3.9913763999938965,
"learning_rate": 7e-06,
"loss": 0.3473,
"step": 350
},
{
"epoch": 0.5304101838755304,
"grad_norm": 4.128275394439697,
"learning_rate": 7.500000000000001e-06,
"loss": 0.3476,
"step": 375
},
{
"epoch": 0.5657708628005658,
"grad_norm": 3.474440813064575,
"learning_rate": 8.000000000000001e-06,
"loss": 0.3357,
"step": 400
},
{
"epoch": 0.6011315417256011,
"grad_norm": 2.85972261428833,
"learning_rate": 8.5e-06,
"loss": 0.3081,
"step": 425
},
{
"epoch": 0.6364922206506365,
"grad_norm": 3.8829457759857178,
"learning_rate": 9e-06,
"loss": 0.3129,
"step": 450
},
{
"epoch": 0.6718528995756718,
"grad_norm": 3.661864757537842,
"learning_rate": 9.5e-06,
"loss": 0.315,
"step": 475
},
{
"epoch": 0.7072135785007072,
"grad_norm": 3.2057082653045654,
"learning_rate": 1e-05,
"loss": 0.324,
"step": 500
},
{
"epoch": 0.7425742574257426,
"grad_norm": 3.3009073734283447,
"learning_rate": 9.944444444444445e-06,
"loss": 0.3201,
"step": 525
},
{
"epoch": 0.7779349363507779,
"grad_norm": 3.073880910873413,
"learning_rate": 9.88888888888889e-06,
"loss": 0.2754,
"step": 550
},
{
"epoch": 0.8132956152758133,
"grad_norm": 2.913012742996216,
"learning_rate": 9.833333333333333e-06,
"loss": 0.2737,
"step": 575
},
{
"epoch": 0.8486562942008486,
"grad_norm": 2.8291831016540527,
"learning_rate": 9.777777777777779e-06,
"loss": 0.2823,
"step": 600
},
{
"epoch": 0.884016973125884,
"grad_norm": 3.002702474594116,
"learning_rate": 9.722222222222223e-06,
"loss": 0.2639,
"step": 625
},
{
"epoch": 0.9193776520509194,
"grad_norm": 3.184739112854004,
"learning_rate": 9.666666666666667e-06,
"loss": 0.2689,
"step": 650
},
{
"epoch": 0.9547383309759547,
"grad_norm": 3.179779291152954,
"learning_rate": 9.611111111111112e-06,
"loss": 0.268,
"step": 675
},
{
"epoch": 0.9900990099009901,
"grad_norm": 3.430415391921997,
"learning_rate": 9.555555555555556e-06,
"loss": 0.2657,
"step": 700
},
{
"epoch": 1.0254596888260255,
"grad_norm": 2.4761476516723633,
"learning_rate": 9.5e-06,
"loss": 0.2076,
"step": 725
},
{
"epoch": 1.0608203677510608,
"grad_norm": 2.250551700592041,
"learning_rate": 9.444444444444445e-06,
"loss": 0.177,
"step": 750
},
{
"epoch": 1.0961810466760962,
"grad_norm": 2.776700258255005,
"learning_rate": 9.38888888888889e-06,
"loss": 0.1664,
"step": 775
},
{
"epoch": 1.1315417256011315,
"grad_norm": 2.360950469970703,
"learning_rate": 9.333333333333334e-06,
"loss": 0.1576,
"step": 800
},
{
"epoch": 1.166902404526167,
"grad_norm": 2.858429193496704,
"learning_rate": 9.277777777777778e-06,
"loss": 0.1706,
"step": 825
},
{
"epoch": 1.2022630834512023,
"grad_norm": 2.6928622722625732,
"learning_rate": 9.222222222222224e-06,
"loss": 0.1733,
"step": 850
},
{
"epoch": 1.2376237623762376,
"grad_norm": 2.4788143634796143,
"learning_rate": 9.166666666666666e-06,
"loss": 0.1653,
"step": 875
},
{
"epoch": 1.272984441301273,
"grad_norm": 2.5625805854797363,
"learning_rate": 9.111111111111112e-06,
"loss": 0.1669,
"step": 900
},
{
"epoch": 1.3083451202263083,
"grad_norm": 2.179570198059082,
"learning_rate": 9.055555555555556e-06,
"loss": 0.1594,
"step": 925
},
{
"epoch": 1.3437057991513437,
"grad_norm": 2.751049280166626,
"learning_rate": 9e-06,
"loss": 0.161,
"step": 950
},
{
"epoch": 1.379066478076379,
"grad_norm": 2.8607306480407715,
"learning_rate": 8.944444444444446e-06,
"loss": 0.1563,
"step": 975
},
{
"epoch": 1.4144271570014144,
"grad_norm": 2.380326271057129,
"learning_rate": 8.888888888888888e-06,
"loss": 0.1583,
"step": 1000
},
{
"epoch": 1.4144271570014144,
"eval_loss": 0.25619471073150635,
"eval_runtime": 2242.3857,
"eval_samples_per_second": 2.4,
"eval_steps_per_second": 0.15,
"eval_wer": 0.20623086405026045,
"step": 1000
},
{
"epoch": 1.4497878359264498,
"grad_norm": 2.163681983947754,
"learning_rate": 8.833333333333334e-06,
"loss": 0.1759,
"step": 1025
},
{
"epoch": 1.4851485148514851,
"grad_norm": 2.3342068195343018,
"learning_rate": 8.777777777777778e-06,
"loss": 0.1553,
"step": 1050
},
{
"epoch": 1.5205091937765205,
"grad_norm": 1.9928505420684814,
"learning_rate": 8.722222222222224e-06,
"loss": 0.1647,
"step": 1075
},
{
"epoch": 1.5558698727015559,
"grad_norm": 2.837106227874756,
"learning_rate": 8.666666666666668e-06,
"loss": 0.153,
"step": 1100
},
{
"epoch": 1.5912305516265912,
"grad_norm": 2.0526952743530273,
"learning_rate": 8.611111111111112e-06,
"loss": 0.1621,
"step": 1125
},
{
"epoch": 1.6265912305516266,
"grad_norm": 2.482564926147461,
"learning_rate": 8.555555555555556e-06,
"loss": 0.1556,
"step": 1150
},
{
"epoch": 1.661951909476662,
"grad_norm": 1.8798569440841675,
"learning_rate": 8.5e-06,
"loss": 0.1387,
"step": 1175
},
{
"epoch": 1.6973125884016973,
"grad_norm": 2.249302864074707,
"learning_rate": 8.444444444444446e-06,
"loss": 0.141,
"step": 1200
},
{
"epoch": 1.7326732673267327,
"grad_norm": 2.683685064315796,
"learning_rate": 8.38888888888889e-06,
"loss": 0.1454,
"step": 1225
},
{
"epoch": 1.768033946251768,
"grad_norm": 2.46101450920105,
"learning_rate": 8.333333333333334e-06,
"loss": 0.1522,
"step": 1250
},
{
"epoch": 1.8033946251768034,
"grad_norm": 2.1939165592193604,
"learning_rate": 8.277777777777778e-06,
"loss": 0.1384,
"step": 1275
},
{
"epoch": 1.8387553041018387,
"grad_norm": 1.9950499534606934,
"learning_rate": 8.222222222222222e-06,
"loss": 0.1487,
"step": 1300
},
{
"epoch": 1.874115983026874,
"grad_norm": 2.307433605194092,
"learning_rate": 8.166666666666668e-06,
"loss": 0.1429,
"step": 1325
},
{
"epoch": 1.9094766619519095,
"grad_norm": 2.7583506107330322,
"learning_rate": 8.111111111111112e-06,
"loss": 0.1372,
"step": 1350
},
{
"epoch": 1.9448373408769448,
"grad_norm": 2.037691593170166,
"learning_rate": 8.055555555555557e-06,
"loss": 0.1438,
"step": 1375
},
{
"epoch": 1.9801980198019802,
"grad_norm": 3.130493640899658,
"learning_rate": 8.000000000000001e-06,
"loss": 0.1399,
"step": 1400
},
{
"epoch": 2.0155586987270158,
"grad_norm": 1.4508907794952393,
"learning_rate": 7.944444444444445e-06,
"loss": 0.0945,
"step": 1425
},
{
"epoch": 2.050919377652051,
"grad_norm": 1.6655480861663818,
"learning_rate": 7.88888888888889e-06,
"loss": 0.0695,
"step": 1450
},
{
"epoch": 2.0862800565770865,
"grad_norm": 1.3497799634933472,
"learning_rate": 7.833333333333333e-06,
"loss": 0.0653,
"step": 1475
},
{
"epoch": 2.1216407355021216,
"grad_norm": 1.9043290615081787,
"learning_rate": 7.77777777777778e-06,
"loss": 0.0685,
"step": 1500
},
{
"epoch": 2.157001414427157,
"grad_norm": 1.7058606147766113,
"learning_rate": 7.722222222222223e-06,
"loss": 0.0686,
"step": 1525
},
{
"epoch": 2.1923620933521923,
"grad_norm": 1.5799323320388794,
"learning_rate": 7.666666666666667e-06,
"loss": 0.0713,
"step": 1550
},
{
"epoch": 2.227722772277228,
"grad_norm": 2.1463682651519775,
"learning_rate": 7.611111111111111e-06,
"loss": 0.0742,
"step": 1575
},
{
"epoch": 2.263083451202263,
"grad_norm": 1.647290587425232,
"learning_rate": 7.555555555555556e-06,
"loss": 0.0658,
"step": 1600
},
{
"epoch": 2.298444130127298,
"grad_norm": 2.096013307571411,
"learning_rate": 7.500000000000001e-06,
"loss": 0.0647,
"step": 1625
},
{
"epoch": 2.333804809052334,
"grad_norm": 1.7993731498718262,
"learning_rate": 7.444444444444445e-06,
"loss": 0.064,
"step": 1650
},
{
"epoch": 2.3691654879773694,
"grad_norm": 1.6189881563186646,
"learning_rate": 7.38888888888889e-06,
"loss": 0.071,
"step": 1675
},
{
"epoch": 2.4045261669024045,
"grad_norm": 1.9548283815383911,
"learning_rate": 7.333333333333333e-06,
"loss": 0.065,
"step": 1700
},
{
"epoch": 2.4398868458274396,
"grad_norm": 1.7484291791915894,
"learning_rate": 7.277777777777778e-06,
"loss": 0.0621,
"step": 1725
},
{
"epoch": 2.4752475247524752,
"grad_norm": 1.8069815635681152,
"learning_rate": 7.222222222222223e-06,
"loss": 0.0752,
"step": 1750
},
{
"epoch": 2.510608203677511,
"grad_norm": 1.3003634214401245,
"learning_rate": 7.166666666666667e-06,
"loss": 0.0719,
"step": 1775
},
{
"epoch": 2.545968882602546,
"grad_norm": 1.7700819969177246,
"learning_rate": 7.111111111111112e-06,
"loss": 0.0723,
"step": 1800
},
{
"epoch": 2.581329561527581,
"grad_norm": 2.041518211364746,
"learning_rate": 7.055555555555557e-06,
"loss": 0.0659,
"step": 1825
},
{
"epoch": 2.6166902404526167,
"grad_norm": 1.4527554512023926,
"learning_rate": 7e-06,
"loss": 0.0669,
"step": 1850
},
{
"epoch": 2.6520509193776522,
"grad_norm": 1.6484756469726562,
"learning_rate": 6.944444444444445e-06,
"loss": 0.0624,
"step": 1875
},
{
"epoch": 2.6874115983026874,
"grad_norm": 1.9906888008117676,
"learning_rate": 6.88888888888889e-06,
"loss": 0.0614,
"step": 1900
},
{
"epoch": 2.7227722772277225,
"grad_norm": 1.4477077722549438,
"learning_rate": 6.833333333333334e-06,
"loss": 0.0651,
"step": 1925
},
{
"epoch": 2.758132956152758,
"grad_norm": 1.5185719728469849,
"learning_rate": 6.777777777777779e-06,
"loss": 0.0626,
"step": 1950
},
{
"epoch": 2.7934936350777937,
"grad_norm": 2.1326749324798584,
"learning_rate": 6.7222222222222235e-06,
"loss": 0.0582,
"step": 1975
},
{
"epoch": 2.828854314002829,
"grad_norm": 1.7811297178268433,
"learning_rate": 6.666666666666667e-06,
"loss": 0.0675,
"step": 2000
},
{
"epoch": 2.828854314002829,
"eval_loss": 0.23944300413131714,
"eval_runtime": 2146.1705,
"eval_samples_per_second": 2.507,
"eval_steps_per_second": 0.157,
"eval_wer": 0.1849377708855223,
"step": 2000
},
{
"epoch": 2.864214992927864,
"grad_norm": 2.299253463745117,
"learning_rate": 6.6111111111111115e-06,
"loss": 0.0632,
"step": 2025
},
{
"epoch": 2.8995756718528995,
"grad_norm": 1.6922551393508911,
"learning_rate": 6.555555555555556e-06,
"loss": 0.0624,
"step": 2050
},
{
"epoch": 2.934936350777935,
"grad_norm": 1.5919361114501953,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.0621,
"step": 2075
},
{
"epoch": 2.9702970297029703,
"grad_norm": 1.9371802806854248,
"learning_rate": 6.444444444444445e-06,
"loss": 0.0665,
"step": 2100
},
{
"epoch": 3.005657708628006,
"grad_norm": 1.0974698066711426,
"learning_rate": 6.3888888888888885e-06,
"loss": 0.0577,
"step": 2125
},
{
"epoch": 3.041018387553041,
"grad_norm": 0.9451077580451965,
"learning_rate": 6.333333333333333e-06,
"loss": 0.026,
"step": 2150
},
{
"epoch": 3.0763790664780766,
"grad_norm": 1.0205219984054565,
"learning_rate": 6.277777777777778e-06,
"loss": 0.0271,
"step": 2175
},
{
"epoch": 3.1117397454031117,
"grad_norm": 1.12224543094635,
"learning_rate": 6.222222222222223e-06,
"loss": 0.0277,
"step": 2200
},
{
"epoch": 3.1471004243281473,
"grad_norm": 1.326258897781372,
"learning_rate": 6.166666666666667e-06,
"loss": 0.0245,
"step": 2225
},
{
"epoch": 3.1824611032531824,
"grad_norm": 1.4001460075378418,
"learning_rate": 6.111111111111112e-06,
"loss": 0.0268,
"step": 2250
},
{
"epoch": 3.217821782178218,
"grad_norm": 1.4342401027679443,
"learning_rate": 6.055555555555555e-06,
"loss": 0.0348,
"step": 2275
},
{
"epoch": 3.253182461103253,
"grad_norm": 0.9824921488761902,
"learning_rate": 6e-06,
"loss": 0.0287,
"step": 2300
},
{
"epoch": 3.2885431400282887,
"grad_norm": 1.2897576093673706,
"learning_rate": 5.944444444444445e-06,
"loss": 0.0278,
"step": 2325
},
{
"epoch": 3.323903818953324,
"grad_norm": 0.8747806549072266,
"learning_rate": 5.88888888888889e-06,
"loss": 0.0284,
"step": 2350
},
{
"epoch": 3.3592644978783595,
"grad_norm": 1.4630858898162842,
"learning_rate": 5.833333333333334e-06,
"loss": 0.0248,
"step": 2375
},
{
"epoch": 3.3946251768033946,
"grad_norm": 1.220078945159912,
"learning_rate": 5.777777777777778e-06,
"loss": 0.0278,
"step": 2400
},
{
"epoch": 3.42998585572843,
"grad_norm": 1.1768743991851807,
"learning_rate": 5.722222222222222e-06,
"loss": 0.0249,
"step": 2425
},
{
"epoch": 3.4653465346534653,
"grad_norm": 1.5660589933395386,
"learning_rate": 5.666666666666667e-06,
"loss": 0.0307,
"step": 2450
},
{
"epoch": 3.500707213578501,
"grad_norm": 0.8128176331520081,
"learning_rate": 5.611111111111112e-06,
"loss": 0.0321,
"step": 2475
},
{
"epoch": 3.536067892503536,
"grad_norm": 1.3139251470565796,
"learning_rate": 5.555555555555557e-06,
"loss": 0.0295,
"step": 2500
},
{
"epoch": 3.571428571428571,
"grad_norm": 0.9214044809341431,
"learning_rate": 5.500000000000001e-06,
"loss": 0.0279,
"step": 2525
},
{
"epoch": 3.6067892503536068,
"grad_norm": 1.11368989944458,
"learning_rate": 5.444444444444445e-06,
"loss": 0.031,
"step": 2550
},
{
"epoch": 3.6421499292786423,
"grad_norm": 1.238482117652893,
"learning_rate": 5.388888888888889e-06,
"loss": 0.0283,
"step": 2575
},
{
"epoch": 3.6775106082036775,
"grad_norm": 1.528247594833374,
"learning_rate": 5.333333333333334e-06,
"loss": 0.0285,
"step": 2600
},
{
"epoch": 3.7128712871287126,
"grad_norm": 1.4220536947250366,
"learning_rate": 5.2777777777777785e-06,
"loss": 0.0254,
"step": 2625
},
{
"epoch": 3.748231966053748,
"grad_norm": 0.7737128734588623,
"learning_rate": 5.2222222222222226e-06,
"loss": 0.0275,
"step": 2650
},
{
"epoch": 3.783592644978784,
"grad_norm": 1.0860861539840698,
"learning_rate": 5.1666666666666675e-06,
"loss": 0.0332,
"step": 2675
},
{
"epoch": 3.818953323903819,
"grad_norm": 1.240889072418213,
"learning_rate": 5.1111111111111115e-06,
"loss": 0.0238,
"step": 2700
},
{
"epoch": 3.854314002828854,
"grad_norm": 1.815717101097107,
"learning_rate": 5.0555555555555555e-06,
"loss": 0.0288,
"step": 2725
},
{
"epoch": 3.8896746817538896,
"grad_norm": 0.997189998626709,
"learning_rate": 5e-06,
"loss": 0.0291,
"step": 2750
},
{
"epoch": 3.9250353606789252,
"grad_norm": 0.7937121987342834,
"learning_rate": 4.944444444444445e-06,
"loss": 0.0289,
"step": 2775
},
{
"epoch": 3.9603960396039604,
"grad_norm": 0.74869704246521,
"learning_rate": 4.888888888888889e-06,
"loss": 0.0294,
"step": 2800
},
{
"epoch": 3.9957567185289955,
"grad_norm": 0.791512131690979,
"learning_rate": 4.833333333333333e-06,
"loss": 0.0278,
"step": 2825
},
{
"epoch": 4.0311173974540315,
"grad_norm": 0.7676866054534912,
"learning_rate": 4.777777777777778e-06,
"loss": 0.0128,
"step": 2850
},
{
"epoch": 4.066478076379067,
"grad_norm": 0.8113923668861389,
"learning_rate": 4.722222222222222e-06,
"loss": 0.012,
"step": 2875
},
{
"epoch": 4.101838755304102,
"grad_norm": 0.9902219176292419,
"learning_rate": 4.666666666666667e-06,
"loss": 0.0104,
"step": 2900
},
{
"epoch": 4.137199434229137,
"grad_norm": 0.6298081874847412,
"learning_rate": 4.611111111111112e-06,
"loss": 0.0101,
"step": 2925
},
{
"epoch": 4.172560113154173,
"grad_norm": 0.4004449248313904,
"learning_rate": 4.555555555555556e-06,
"loss": 0.0124,
"step": 2950
},
{
"epoch": 4.207920792079208,
"grad_norm": 1.081607699394226,
"learning_rate": 4.5e-06,
"loss": 0.0131,
"step": 2975
},
{
"epoch": 4.243281471004243,
"grad_norm": 1.244435429573059,
"learning_rate": 4.444444444444444e-06,
"loss": 0.0113,
"step": 3000
},
{
"epoch": 4.243281471004243,
"eval_loss": 0.2729232609272003,
"eval_runtime": 2135.6924,
"eval_samples_per_second": 2.52,
"eval_steps_per_second": 0.158,
"eval_wer": 0.17223348840908187,
"step": 3000
},
{
"epoch": 4.278642149929278,
"grad_norm": 0.7269119620323181,
"learning_rate": 4.388888888888889e-06,
"loss": 0.0141,
"step": 3025
},
{
"epoch": 4.314002828854314,
"grad_norm": 0.7338502407073975,
"learning_rate": 4.333333333333334e-06,
"loss": 0.0094,
"step": 3050
},
{
"epoch": 4.3493635077793495,
"grad_norm": 1.575302004814148,
"learning_rate": 4.277777777777778e-06,
"loss": 0.0117,
"step": 3075
},
{
"epoch": 4.384724186704385,
"grad_norm": 0.8755286931991577,
"learning_rate": 4.222222222222223e-06,
"loss": 0.0096,
"step": 3100
},
{
"epoch": 4.42008486562942,
"grad_norm": 0.606636106967926,
"learning_rate": 4.166666666666667e-06,
"loss": 0.0108,
"step": 3125
},
{
"epoch": 4.455445544554456,
"grad_norm": 0.4599132835865021,
"learning_rate": 4.111111111111111e-06,
"loss": 0.0109,
"step": 3150
},
{
"epoch": 4.490806223479491,
"grad_norm": 0.9773505330085754,
"learning_rate": 4.055555555555556e-06,
"loss": 0.0095,
"step": 3175
},
{
"epoch": 4.526166902404526,
"grad_norm": 1.153731107711792,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0102,
"step": 3200
},
{
"epoch": 4.561527581329561,
"grad_norm": 0.38161006569862366,
"learning_rate": 3.944444444444445e-06,
"loss": 0.01,
"step": 3225
},
{
"epoch": 4.596888260254596,
"grad_norm": 0.4255661368370056,
"learning_rate": 3.88888888888889e-06,
"loss": 0.0098,
"step": 3250
},
{
"epoch": 4.632248939179632,
"grad_norm": 0.47027960419654846,
"learning_rate": 3.833333333333334e-06,
"loss": 0.0082,
"step": 3275
},
{
"epoch": 4.667609618104668,
"grad_norm": 0.7094388604164124,
"learning_rate": 3.777777777777778e-06,
"loss": 0.013,
"step": 3300
},
{
"epoch": 4.702970297029703,
"grad_norm": 1.0208382606506348,
"learning_rate": 3.7222222222222225e-06,
"loss": 0.01,
"step": 3325
},
{
"epoch": 4.738330975954739,
"grad_norm": 0.41139042377471924,
"learning_rate": 3.6666666666666666e-06,
"loss": 0.0115,
"step": 3350
},
{
"epoch": 4.773691654879774,
"grad_norm": 0.2961082458496094,
"learning_rate": 3.6111111111111115e-06,
"loss": 0.0102,
"step": 3375
},
{
"epoch": 4.809052333804809,
"grad_norm": 0.7990303039550781,
"learning_rate": 3.555555555555556e-06,
"loss": 0.0085,
"step": 3400
},
{
"epoch": 4.844413012729844,
"grad_norm": 0.7530162930488586,
"learning_rate": 3.5e-06,
"loss": 0.0098,
"step": 3425
},
{
"epoch": 4.879773691654879,
"grad_norm": 0.44302016496658325,
"learning_rate": 3.444444444444445e-06,
"loss": 0.0078,
"step": 3450
},
{
"epoch": 4.915134370579915,
"grad_norm": 0.43590739369392395,
"learning_rate": 3.3888888888888893e-06,
"loss": 0.008,
"step": 3475
},
{
"epoch": 4.9504950495049505,
"grad_norm": 1.0824764966964722,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.0133,
"step": 3500
},
{
"epoch": 4.985855728429986,
"grad_norm": 1.0049275159835815,
"learning_rate": 3.277777777777778e-06,
"loss": 0.0098,
"step": 3525
},
{
"epoch": 5.021216407355022,
"grad_norm": 0.13272249698638916,
"learning_rate": 3.2222222222222227e-06,
"loss": 0.0093,
"step": 3550
},
{
"epoch": 5.056577086280057,
"grad_norm": 0.42071208357810974,
"learning_rate": 3.1666666666666667e-06,
"loss": 0.0045,
"step": 3575
},
{
"epoch": 5.091937765205092,
"grad_norm": 1.430225133895874,
"learning_rate": 3.1111111111111116e-06,
"loss": 0.0044,
"step": 3600
},
{
"epoch": 5.127298444130127,
"grad_norm": 0.8059938549995422,
"learning_rate": 3.055555555555556e-06,
"loss": 0.0042,
"step": 3625
},
{
"epoch": 5.162659123055163,
"grad_norm": 0.2698410153388977,
"learning_rate": 3e-06,
"loss": 0.0031,
"step": 3650
},
{
"epoch": 5.198019801980198,
"grad_norm": 0.4564538598060608,
"learning_rate": 2.944444444444445e-06,
"loss": 0.0038,
"step": 3675
},
{
"epoch": 5.233380480905233,
"grad_norm": 2.555163621902466,
"learning_rate": 2.888888888888889e-06,
"loss": 0.0035,
"step": 3700
},
{
"epoch": 5.2687411598302685,
"grad_norm": 0.6030502915382385,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.0041,
"step": 3725
},
{
"epoch": 5.3041018387553045,
"grad_norm": 0.2795655131340027,
"learning_rate": 2.7777777777777783e-06,
"loss": 0.0039,
"step": 3750
},
{
"epoch": 5.33946251768034,
"grad_norm": 0.23795121908187866,
"learning_rate": 2.7222222222222224e-06,
"loss": 0.0038,
"step": 3775
},
{
"epoch": 5.374823196605375,
"grad_norm": 0.08186432719230652,
"learning_rate": 2.666666666666667e-06,
"loss": 0.0023,
"step": 3800
},
{
"epoch": 5.41018387553041,
"grad_norm": 0.7226074934005737,
"learning_rate": 2.6111111111111113e-06,
"loss": 0.0034,
"step": 3825
},
{
"epoch": 5.445544554455446,
"grad_norm": 0.4257412850856781,
"learning_rate": 2.5555555555555557e-06,
"loss": 0.004,
"step": 3850
},
{
"epoch": 5.480905233380481,
"grad_norm": 0.48502105474472046,
"learning_rate": 2.5e-06,
"loss": 0.003,
"step": 3875
},
{
"epoch": 5.516265912305516,
"grad_norm": 0.2505144774913788,
"learning_rate": 2.4444444444444447e-06,
"loss": 0.0047,
"step": 3900
},
{
"epoch": 5.551626591230551,
"grad_norm": 0.24553366005420685,
"learning_rate": 2.388888888888889e-06,
"loss": 0.0027,
"step": 3925
},
{
"epoch": 5.586987270155587,
"grad_norm": 0.1790177971124649,
"learning_rate": 2.3333333333333336e-06,
"loss": 0.0031,
"step": 3950
},
{
"epoch": 5.6223479490806225,
"grad_norm": 0.625106155872345,
"learning_rate": 2.277777777777778e-06,
"loss": 0.0045,
"step": 3975
},
{
"epoch": 5.657708628005658,
"grad_norm": 0.3036724925041199,
"learning_rate": 2.222222222222222e-06,
"loss": 0.0036,
"step": 4000
},
{
"epoch": 5.657708628005658,
"eval_loss": 0.3003575801849365,
"eval_runtime": 2115.084,
"eval_samples_per_second": 2.544,
"eval_steps_per_second": 0.159,
"eval_wer": 0.1705435603801344,
"step": 4000
},
{
"epoch": 5.693069306930693,
"grad_norm": 0.5264594554901123,
"learning_rate": 2.166666666666667e-06,
"loss": 0.0038,
"step": 4025
},
{
"epoch": 5.728429985855728,
"grad_norm": 0.43084102869033813,
"learning_rate": 2.1111111111111114e-06,
"loss": 0.003,
"step": 4050
},
{
"epoch": 5.763790664780764,
"grad_norm": 0.3291011452674866,
"learning_rate": 2.0555555555555555e-06,
"loss": 0.0032,
"step": 4075
},
{
"epoch": 5.799151343705799,
"grad_norm": 0.11041796952486038,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.0041,
"step": 4100
},
{
"epoch": 5.834512022630834,
"grad_norm": 0.40376558899879456,
"learning_rate": 1.944444444444445e-06,
"loss": 0.0029,
"step": 4125
},
{
"epoch": 5.86987270155587,
"grad_norm": 1.008151650428772,
"learning_rate": 1.888888888888889e-06,
"loss": 0.0023,
"step": 4150
},
{
"epoch": 5.905233380480905,
"grad_norm": 0.29702553153038025,
"learning_rate": 1.8333333333333333e-06,
"loss": 0.0039,
"step": 4175
},
{
"epoch": 5.9405940594059405,
"grad_norm": 0.0774470716714859,
"learning_rate": 1.777777777777778e-06,
"loss": 0.0025,
"step": 4200
},
{
"epoch": 5.975954738330976,
"grad_norm": 0.2981080114841461,
"learning_rate": 1.7222222222222224e-06,
"loss": 0.0025,
"step": 4225
},
{
"epoch": 6.011315417256012,
"grad_norm": 0.08830191940069199,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.002,
"step": 4250
},
{
"epoch": 6.046676096181047,
"grad_norm": 0.06720926612615585,
"learning_rate": 1.6111111111111113e-06,
"loss": 0.0014,
"step": 4275
},
{
"epoch": 6.082036775106082,
"grad_norm": 0.07184426486492157,
"learning_rate": 1.5555555555555558e-06,
"loss": 0.0017,
"step": 4300
},
{
"epoch": 6.117397454031117,
"grad_norm": 0.06138487532734871,
"learning_rate": 1.5e-06,
"loss": 0.0012,
"step": 4325
},
{
"epoch": 6.152758132956153,
"grad_norm": 0.11943061649799347,
"learning_rate": 1.4444444444444445e-06,
"loss": 0.0014,
"step": 4350
},
{
"epoch": 6.188118811881188,
"grad_norm": 0.05326369032263756,
"learning_rate": 1.3888888888888892e-06,
"loss": 0.0017,
"step": 4375
},
{
"epoch": 6.223479490806223,
"grad_norm": 0.05557446554303169,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.0011,
"step": 4400
},
{
"epoch": 6.258840169731259,
"grad_norm": 0.05990071967244148,
"learning_rate": 1.2777777777777779e-06,
"loss": 0.0015,
"step": 4425
},
{
"epoch": 6.294200848656295,
"grad_norm": 0.06366278976202011,
"learning_rate": 1.2222222222222223e-06,
"loss": 0.0012,
"step": 4450
},
{
"epoch": 6.32956152758133,
"grad_norm": 0.0781664326786995,
"learning_rate": 1.1666666666666668e-06,
"loss": 0.0016,
"step": 4475
},
{
"epoch": 6.364922206506365,
"grad_norm": 0.19605182111263275,
"learning_rate": 1.111111111111111e-06,
"loss": 0.0016,
"step": 4500
},
{
"epoch": 6.4002828854314,
"grad_norm": 0.07928458601236343,
"learning_rate": 1.0555555555555557e-06,
"loss": 0.0015,
"step": 4525
},
{
"epoch": 6.435643564356436,
"grad_norm": 0.2883352041244507,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.0018,
"step": 4550
},
{
"epoch": 6.471004243281471,
"grad_norm": 0.037699662148952484,
"learning_rate": 9.444444444444445e-07,
"loss": 0.0015,
"step": 4575
},
{
"epoch": 6.506364922206506,
"grad_norm": 0.2606990933418274,
"learning_rate": 8.88888888888889e-07,
"loss": 0.0012,
"step": 4600
},
{
"epoch": 6.5417256011315414,
"grad_norm": 0.04962063580751419,
"learning_rate": 8.333333333333333e-07,
"loss": 0.0016,
"step": 4625
},
{
"epoch": 6.5770862800565775,
"grad_norm": 0.11313042789697647,
"learning_rate": 7.777777777777779e-07,
"loss": 0.0014,
"step": 4650
},
{
"epoch": 6.612446958981613,
"grad_norm": 0.10243412852287292,
"learning_rate": 7.222222222222222e-07,
"loss": 0.0011,
"step": 4675
},
{
"epoch": 6.647807637906648,
"grad_norm": 0.2408544421195984,
"learning_rate": 6.666666666666667e-07,
"loss": 0.0015,
"step": 4700
},
{
"epoch": 6.683168316831683,
"grad_norm": 0.08557415008544922,
"learning_rate": 6.111111111111112e-07,
"loss": 0.0013,
"step": 4725
},
{
"epoch": 6.718528995756719,
"grad_norm": 0.05530049279332161,
"learning_rate": 5.555555555555555e-07,
"loss": 0.0012,
"step": 4750
},
{
"epoch": 6.753889674681754,
"grad_norm": 0.1299530267715454,
"learning_rate": 5.000000000000001e-07,
"loss": 0.0018,
"step": 4775
},
{
"epoch": 6.789250353606789,
"grad_norm": 0.03794874995946884,
"learning_rate": 4.444444444444445e-07,
"loss": 0.0011,
"step": 4800
},
{
"epoch": 6.824611032531824,
"grad_norm": 0.051171157509088516,
"learning_rate": 3.8888888888888895e-07,
"loss": 0.0016,
"step": 4825
},
{
"epoch": 6.85997171145686,
"grad_norm": 0.05626167729496956,
"learning_rate": 3.3333333333333335e-07,
"loss": 0.0014,
"step": 4850
},
{
"epoch": 6.8953323903818955,
"grad_norm": 0.05049213021993637,
"learning_rate": 2.7777777777777776e-07,
"loss": 0.0017,
"step": 4875
},
{
"epoch": 6.930693069306931,
"grad_norm": 0.2871930003166199,
"learning_rate": 2.2222222222222224e-07,
"loss": 0.0014,
"step": 4900
},
{
"epoch": 6.966053748231966,
"grad_norm": 0.05043856427073479,
"learning_rate": 1.6666666666666668e-07,
"loss": 0.0014,
"step": 4925
},
{
"epoch": 7.001414427157002,
"grad_norm": 0.052644409239292145,
"learning_rate": 1.1111111111111112e-07,
"loss": 0.0018,
"step": 4950
},
{
"epoch": 7.036775106082037,
"grad_norm": 0.04488613083958626,
"learning_rate": 5.555555555555556e-08,
"loss": 0.0013,
"step": 4975
},
{
"epoch": 7.072135785007072,
"grad_norm": 0.05924491211771965,
"learning_rate": 0.0,
"loss": 0.0012,
"step": 5000
},
{
"epoch": 7.072135785007072,
"eval_loss": 0.3279673457145691,
"eval_runtime": 2105.881,
"eval_samples_per_second": 2.555,
"eval_steps_per_second": 0.16,
"eval_wer": 0.1676010974591435,
"step": 5000
},
{
"epoch": 7.072135785007072,
"step": 5000,
"total_flos": 5.4329055670370304e+20,
"train_loss": 0.09207180016152561,
"train_runtime": 58226.4708,
"train_samples_per_second": 2.748,
"train_steps_per_second": 0.086
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.4329055670370304e+20,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}