whisper-large-v3-ft-btb-cy / trainer_state.json
DewiBrynJones's picture
End of training
7f4700a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.29000429000429,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02145002145002145,
"grad_norm": 9.066595077514648,
"learning_rate": 5.000000000000001e-07,
"loss": 1.715,
"step": 25
},
{
"epoch": 0.0429000429000429,
"grad_norm": 6.495127201080322,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.3703,
"step": 50
},
{
"epoch": 0.06435006435006435,
"grad_norm": 5.474078178405762,
"learning_rate": 1.5e-06,
"loss": 1.0103,
"step": 75
},
{
"epoch": 0.0858000858000858,
"grad_norm": 5.429323196411133,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.9033,
"step": 100
},
{
"epoch": 0.10725010725010725,
"grad_norm": 5.380603790283203,
"learning_rate": 2.5e-06,
"loss": 0.7779,
"step": 125
},
{
"epoch": 0.1287001287001287,
"grad_norm": 4.108312606811523,
"learning_rate": 3e-06,
"loss": 0.7064,
"step": 150
},
{
"epoch": 0.15015015015015015,
"grad_norm": 4.261275291442871,
"learning_rate": 3.5e-06,
"loss": 0.6859,
"step": 175
},
{
"epoch": 0.1716001716001716,
"grad_norm": 4.792464256286621,
"learning_rate": 4.000000000000001e-06,
"loss": 0.6422,
"step": 200
},
{
"epoch": 0.19305019305019305,
"grad_norm": 4.474643707275391,
"learning_rate": 4.5e-06,
"loss": 0.6719,
"step": 225
},
{
"epoch": 0.2145002145002145,
"grad_norm": 5.082165718078613,
"learning_rate": 5e-06,
"loss": 0.6536,
"step": 250
},
{
"epoch": 0.23595023595023595,
"grad_norm": 5.100755214691162,
"learning_rate": 5.500000000000001e-06,
"loss": 0.6508,
"step": 275
},
{
"epoch": 0.2574002574002574,
"grad_norm": 4.228156566619873,
"learning_rate": 6e-06,
"loss": 0.633,
"step": 300
},
{
"epoch": 0.27885027885027885,
"grad_norm": 4.423853874206543,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.5827,
"step": 325
},
{
"epoch": 0.3003003003003003,
"grad_norm": 4.693906784057617,
"learning_rate": 7e-06,
"loss": 0.5881,
"step": 350
},
{
"epoch": 0.32175032175032175,
"grad_norm": 5.522333145141602,
"learning_rate": 7.500000000000001e-06,
"loss": 0.5953,
"step": 375
},
{
"epoch": 0.3432003432003432,
"grad_norm": 3.7150588035583496,
"learning_rate": 8.000000000000001e-06,
"loss": 0.5733,
"step": 400
},
{
"epoch": 0.36465036465036466,
"grad_norm": 4.65098762512207,
"learning_rate": 8.5e-06,
"loss": 0.5741,
"step": 425
},
{
"epoch": 0.3861003861003861,
"grad_norm": 4.152739524841309,
"learning_rate": 9e-06,
"loss": 0.5721,
"step": 450
},
{
"epoch": 0.40755040755040756,
"grad_norm": 4.5913615226745605,
"learning_rate": 9.5e-06,
"loss": 0.5837,
"step": 475
},
{
"epoch": 0.429000429000429,
"grad_norm": 3.7452423572540283,
"learning_rate": 1e-05,
"loss": 0.5763,
"step": 500
},
{
"epoch": 0.45045045045045046,
"grad_norm": 4.0456953048706055,
"learning_rate": 9.944444444444445e-06,
"loss": 0.5743,
"step": 525
},
{
"epoch": 0.4719004719004719,
"grad_norm": 4.183647155761719,
"learning_rate": 9.88888888888889e-06,
"loss": 0.5851,
"step": 550
},
{
"epoch": 0.49335049335049336,
"grad_norm": 3.6905064582824707,
"learning_rate": 9.833333333333333e-06,
"loss": 0.5423,
"step": 575
},
{
"epoch": 0.5148005148005148,
"grad_norm": 4.111310005187988,
"learning_rate": 9.777777777777779e-06,
"loss": 0.5392,
"step": 600
},
{
"epoch": 0.5362505362505362,
"grad_norm": 3.7859528064727783,
"learning_rate": 9.722222222222223e-06,
"loss": 0.5257,
"step": 625
},
{
"epoch": 0.5577005577005577,
"grad_norm": 3.810257911682129,
"learning_rate": 9.666666666666667e-06,
"loss": 0.4958,
"step": 650
},
{
"epoch": 0.5791505791505791,
"grad_norm": 3.279156446456909,
"learning_rate": 9.611111111111112e-06,
"loss": 0.5103,
"step": 675
},
{
"epoch": 0.6006006006006006,
"grad_norm": 3.16200852394104,
"learning_rate": 9.555555555555556e-06,
"loss": 0.5262,
"step": 700
},
{
"epoch": 0.622050622050622,
"grad_norm": 3.9035494327545166,
"learning_rate": 9.5e-06,
"loss": 0.5131,
"step": 725
},
{
"epoch": 0.6435006435006435,
"grad_norm": 3.071242570877075,
"learning_rate": 9.444444444444445e-06,
"loss": 0.478,
"step": 750
},
{
"epoch": 0.6649506649506649,
"grad_norm": 4.14898157119751,
"learning_rate": 9.38888888888889e-06,
"loss": 0.5035,
"step": 775
},
{
"epoch": 0.6864006864006864,
"grad_norm": 3.8921945095062256,
"learning_rate": 9.333333333333334e-06,
"loss": 0.5,
"step": 800
},
{
"epoch": 0.7078507078507078,
"grad_norm": 3.4418222904205322,
"learning_rate": 9.277777777777778e-06,
"loss": 0.4676,
"step": 825
},
{
"epoch": 0.7293007293007293,
"grad_norm": 3.1406946182250977,
"learning_rate": 9.222222222222224e-06,
"loss": 0.4668,
"step": 850
},
{
"epoch": 0.7507507507507507,
"grad_norm": 3.900789499282837,
"learning_rate": 9.166666666666666e-06,
"loss": 0.4834,
"step": 875
},
{
"epoch": 0.7722007722007722,
"grad_norm": 3.2185850143432617,
"learning_rate": 9.111111111111112e-06,
"loss": 0.4716,
"step": 900
},
{
"epoch": 0.7936507936507936,
"grad_norm": 3.683173179626465,
"learning_rate": 9.055555555555556e-06,
"loss": 0.4882,
"step": 925
},
{
"epoch": 0.8151008151008151,
"grad_norm": 3.3974883556365967,
"learning_rate": 9e-06,
"loss": 0.4781,
"step": 950
},
{
"epoch": 0.8365508365508365,
"grad_norm": 3.78350830078125,
"learning_rate": 8.944444444444446e-06,
"loss": 0.4872,
"step": 975
},
{
"epoch": 0.858000858000858,
"grad_norm": 3.633676528930664,
"learning_rate": 8.888888888888888e-06,
"loss": 0.4429,
"step": 1000
},
{
"epoch": 0.858000858000858,
"eval_loss": 0.46728599071502686,
"eval_runtime": 1674.249,
"eval_samples_per_second": 2.33,
"eval_steps_per_second": 0.146,
"eval_wer": 0.3495363662706462,
"step": 1000
},
{
"epoch": 0.8794508794508794,
"grad_norm": 3.6749162673950195,
"learning_rate": 8.833333333333334e-06,
"loss": 0.4712,
"step": 1025
},
{
"epoch": 0.9009009009009009,
"grad_norm": 3.0788767337799072,
"learning_rate": 8.777777777777778e-06,
"loss": 0.4857,
"step": 1050
},
{
"epoch": 0.9223509223509223,
"grad_norm": 3.3249752521514893,
"learning_rate": 8.722222222222224e-06,
"loss": 0.4406,
"step": 1075
},
{
"epoch": 0.9438009438009438,
"grad_norm": 3.189654588699341,
"learning_rate": 8.666666666666668e-06,
"loss": 0.4313,
"step": 1100
},
{
"epoch": 0.9652509652509652,
"grad_norm": 3.3606882095336914,
"learning_rate": 8.611111111111112e-06,
"loss": 0.4517,
"step": 1125
},
{
"epoch": 0.9867009867009867,
"grad_norm": 3.4091646671295166,
"learning_rate": 8.555555555555556e-06,
"loss": 0.4378,
"step": 1150
},
{
"epoch": 1.008151008151008,
"grad_norm": 2.8111917972564697,
"learning_rate": 8.5e-06,
"loss": 0.3944,
"step": 1175
},
{
"epoch": 1.0296010296010296,
"grad_norm": 2.7863500118255615,
"learning_rate": 8.444444444444446e-06,
"loss": 0.3242,
"step": 1200
},
{
"epoch": 1.0510510510510511,
"grad_norm": 2.681203603744507,
"learning_rate": 8.38888888888889e-06,
"loss": 0.3294,
"step": 1225
},
{
"epoch": 1.0725010725010724,
"grad_norm": 3.21625018119812,
"learning_rate": 8.333333333333334e-06,
"loss": 0.3232,
"step": 1250
},
{
"epoch": 1.093951093951094,
"grad_norm": 2.1063191890716553,
"learning_rate": 8.277777777777778e-06,
"loss": 0.3162,
"step": 1275
},
{
"epoch": 1.1154011154011154,
"grad_norm": 2.7239444255828857,
"learning_rate": 8.222222222222222e-06,
"loss": 0.3372,
"step": 1300
},
{
"epoch": 1.136851136851137,
"grad_norm": 2.6881861686706543,
"learning_rate": 8.166666666666668e-06,
"loss": 0.3269,
"step": 1325
},
{
"epoch": 1.1583011583011582,
"grad_norm": 3.041693925857544,
"learning_rate": 8.111111111111112e-06,
"loss": 0.3299,
"step": 1350
},
{
"epoch": 1.1797511797511797,
"grad_norm": 3.6576833724975586,
"learning_rate": 8.055555555555557e-06,
"loss": 0.3786,
"step": 1375
},
{
"epoch": 1.2012012012012012,
"grad_norm": 3.035702705383301,
"learning_rate": 8.000000000000001e-06,
"loss": 0.346,
"step": 1400
},
{
"epoch": 1.2226512226512227,
"grad_norm": 2.5944619178771973,
"learning_rate": 7.944444444444445e-06,
"loss": 0.3309,
"step": 1425
},
{
"epoch": 1.244101244101244,
"grad_norm": 2.5746264457702637,
"learning_rate": 7.88888888888889e-06,
"loss": 0.3235,
"step": 1450
},
{
"epoch": 1.2655512655512655,
"grad_norm": 2.7788243293762207,
"learning_rate": 7.833333333333333e-06,
"loss": 0.3212,
"step": 1475
},
{
"epoch": 1.287001287001287,
"grad_norm": 3.6462416648864746,
"learning_rate": 7.77777777777778e-06,
"loss": 0.332,
"step": 1500
},
{
"epoch": 1.3084513084513085,
"grad_norm": 2.2463419437408447,
"learning_rate": 7.722222222222223e-06,
"loss": 0.3366,
"step": 1525
},
{
"epoch": 1.3299013299013298,
"grad_norm": 3.5368576049804688,
"learning_rate": 7.666666666666667e-06,
"loss": 0.3138,
"step": 1550
},
{
"epoch": 1.3513513513513513,
"grad_norm": 2.547539472579956,
"learning_rate": 7.611111111111111e-06,
"loss": 0.3564,
"step": 1575
},
{
"epoch": 1.3728013728013728,
"grad_norm": 3.0548551082611084,
"learning_rate": 7.555555555555556e-06,
"loss": 0.3393,
"step": 1600
},
{
"epoch": 1.3942513942513943,
"grad_norm": 2.7717063426971436,
"learning_rate": 7.500000000000001e-06,
"loss": 0.3266,
"step": 1625
},
{
"epoch": 1.4157014157014158,
"grad_norm": 2.6384313106536865,
"learning_rate": 7.444444444444445e-06,
"loss": 0.3355,
"step": 1650
},
{
"epoch": 1.4371514371514371,
"grad_norm": 2.7044169902801514,
"learning_rate": 7.38888888888889e-06,
"loss": 0.3375,
"step": 1675
},
{
"epoch": 1.4586014586014586,
"grad_norm": 2.4753165245056152,
"learning_rate": 7.333333333333333e-06,
"loss": 0.3347,
"step": 1700
},
{
"epoch": 1.4800514800514801,
"grad_norm": 3.0648443698883057,
"learning_rate": 7.277777777777778e-06,
"loss": 0.3202,
"step": 1725
},
{
"epoch": 1.5015015015015014,
"grad_norm": 3.1359264850616455,
"learning_rate": 7.222222222222223e-06,
"loss": 0.3303,
"step": 1750
},
{
"epoch": 1.522951522951523,
"grad_norm": 2.995232105255127,
"learning_rate": 7.166666666666667e-06,
"loss": 0.3197,
"step": 1775
},
{
"epoch": 1.5444015444015444,
"grad_norm": 2.7742152214050293,
"learning_rate": 7.111111111111112e-06,
"loss": 0.3255,
"step": 1800
},
{
"epoch": 1.565851565851566,
"grad_norm": 2.7029542922973633,
"learning_rate": 7.055555555555557e-06,
"loss": 0.3147,
"step": 1825
},
{
"epoch": 1.5873015873015874,
"grad_norm": 2.604520082473755,
"learning_rate": 7e-06,
"loss": 0.3235,
"step": 1850
},
{
"epoch": 1.6087516087516087,
"grad_norm": 3.152074098587036,
"learning_rate": 6.944444444444445e-06,
"loss": 0.319,
"step": 1875
},
{
"epoch": 1.6302016302016302,
"grad_norm": 3.121864080429077,
"learning_rate": 6.88888888888889e-06,
"loss": 0.2972,
"step": 1900
},
{
"epoch": 1.6516516516516515,
"grad_norm": 2.733600616455078,
"learning_rate": 6.833333333333334e-06,
"loss": 0.3177,
"step": 1925
},
{
"epoch": 1.673101673101673,
"grad_norm": 2.8984882831573486,
"learning_rate": 6.777777777777779e-06,
"loss": 0.3142,
"step": 1950
},
{
"epoch": 1.6945516945516945,
"grad_norm": 2.585207939147949,
"learning_rate": 6.7222222222222235e-06,
"loss": 0.3372,
"step": 1975
},
{
"epoch": 1.716001716001716,
"grad_norm": 3.007678985595703,
"learning_rate": 6.666666666666667e-06,
"loss": 0.3192,
"step": 2000
},
{
"epoch": 1.716001716001716,
"eval_loss": 0.41164642572402954,
"eval_runtime": 1704.3317,
"eval_samples_per_second": 2.289,
"eval_steps_per_second": 0.143,
"eval_wer": 0.2986090988119386,
"step": 2000
},
{
"epoch": 1.7374517374517375,
"grad_norm": 2.6128337383270264,
"learning_rate": 6.6111111111111115e-06,
"loss": 0.3215,
"step": 2025
},
{
"epoch": 1.758901758901759,
"grad_norm": 2.6473169326782227,
"learning_rate": 6.555555555555556e-06,
"loss": 0.324,
"step": 2050
},
{
"epoch": 1.7803517803517803,
"grad_norm": 2.654451608657837,
"learning_rate": 6.5000000000000004e-06,
"loss": 0.3029,
"step": 2075
},
{
"epoch": 1.8018018018018018,
"grad_norm": 2.335432767868042,
"learning_rate": 6.444444444444445e-06,
"loss": 0.3197,
"step": 2100
},
{
"epoch": 1.8232518232518231,
"grad_norm": 2.4517362117767334,
"learning_rate": 6.3888888888888885e-06,
"loss": 0.3136,
"step": 2125
},
{
"epoch": 1.8447018447018446,
"grad_norm": 2.7718915939331055,
"learning_rate": 6.333333333333333e-06,
"loss": 0.3059,
"step": 2150
},
{
"epoch": 1.8661518661518661,
"grad_norm": 2.876389265060425,
"learning_rate": 6.277777777777778e-06,
"loss": 0.3193,
"step": 2175
},
{
"epoch": 1.8876018876018876,
"grad_norm": 2.892172336578369,
"learning_rate": 6.222222222222223e-06,
"loss": 0.3097,
"step": 2200
},
{
"epoch": 1.9090519090519091,
"grad_norm": 3.030829429626465,
"learning_rate": 6.166666666666667e-06,
"loss": 0.3232,
"step": 2225
},
{
"epoch": 1.9305019305019306,
"grad_norm": 2.721433162689209,
"learning_rate": 6.111111111111112e-06,
"loss": 0.2944,
"step": 2250
},
{
"epoch": 1.951951951951952,
"grad_norm": 3.1279942989349365,
"learning_rate": 6.055555555555555e-06,
"loss": 0.3063,
"step": 2275
},
{
"epoch": 1.9734019734019734,
"grad_norm": 2.706367254257202,
"learning_rate": 6e-06,
"loss": 0.2998,
"step": 2300
},
{
"epoch": 1.9948519948519947,
"grad_norm": 3.0728838443756104,
"learning_rate": 5.944444444444445e-06,
"loss": 0.3036,
"step": 2325
},
{
"epoch": 2.016302016302016,
"grad_norm": 2.0992302894592285,
"learning_rate": 5.88888888888889e-06,
"loss": 0.2477,
"step": 2350
},
{
"epoch": 2.0377520377520377,
"grad_norm": 1.931810975074768,
"learning_rate": 5.833333333333334e-06,
"loss": 0.2085,
"step": 2375
},
{
"epoch": 2.0592020592020592,
"grad_norm": 2.4714324474334717,
"learning_rate": 5.777777777777778e-06,
"loss": 0.201,
"step": 2400
},
{
"epoch": 2.0806520806520807,
"grad_norm": 2.1515088081359863,
"learning_rate": 5.722222222222222e-06,
"loss": 0.1978,
"step": 2425
},
{
"epoch": 2.1021021021021022,
"grad_norm": 2.203587770462036,
"learning_rate": 5.666666666666667e-06,
"loss": 0.2082,
"step": 2450
},
{
"epoch": 2.1235521235521237,
"grad_norm": 2.3945024013519287,
"learning_rate": 5.611111111111112e-06,
"loss": 0.2012,
"step": 2475
},
{
"epoch": 2.145002145002145,
"grad_norm": 2.5214550495147705,
"learning_rate": 5.555555555555557e-06,
"loss": 0.1983,
"step": 2500
},
{
"epoch": 2.1664521664521663,
"grad_norm": 2.8749985694885254,
"learning_rate": 5.500000000000001e-06,
"loss": 0.202,
"step": 2525
},
{
"epoch": 2.187902187902188,
"grad_norm": 2.302109956741333,
"learning_rate": 5.444444444444445e-06,
"loss": 0.187,
"step": 2550
},
{
"epoch": 2.2093522093522093,
"grad_norm": 2.303842782974243,
"learning_rate": 5.388888888888889e-06,
"loss": 0.2007,
"step": 2575
},
{
"epoch": 2.230802230802231,
"grad_norm": 2.1499674320220947,
"learning_rate": 5.333333333333334e-06,
"loss": 0.2025,
"step": 2600
},
{
"epoch": 2.2522522522522523,
"grad_norm": 2.735565185546875,
"learning_rate": 5.2777777777777785e-06,
"loss": 0.1934,
"step": 2625
},
{
"epoch": 2.273702273702274,
"grad_norm": 2.781230926513672,
"learning_rate": 5.2222222222222226e-06,
"loss": 0.1959,
"step": 2650
},
{
"epoch": 2.2951522951522954,
"grad_norm": 2.934748649597168,
"learning_rate": 5.1666666666666675e-06,
"loss": 0.1923,
"step": 2675
},
{
"epoch": 2.3166023166023164,
"grad_norm": 2.5688586235046387,
"learning_rate": 5.1111111111111115e-06,
"loss": 0.2096,
"step": 2700
},
{
"epoch": 2.338052338052338,
"grad_norm": 2.15342378616333,
"learning_rate": 5.0555555555555555e-06,
"loss": 0.198,
"step": 2725
},
{
"epoch": 2.3595023595023594,
"grad_norm": 2.287332773208618,
"learning_rate": 5e-06,
"loss": 0.2079,
"step": 2750
},
{
"epoch": 2.380952380952381,
"grad_norm": 2.2606818675994873,
"learning_rate": 4.944444444444445e-06,
"loss": 0.2014,
"step": 2775
},
{
"epoch": 2.4024024024024024,
"grad_norm": 2.0348024368286133,
"learning_rate": 4.888888888888889e-06,
"loss": 0.2046,
"step": 2800
},
{
"epoch": 2.423852423852424,
"grad_norm": 2.326032876968384,
"learning_rate": 4.833333333333333e-06,
"loss": 0.1905,
"step": 2825
},
{
"epoch": 2.4453024453024454,
"grad_norm": 2.2641820907592773,
"learning_rate": 4.777777777777778e-06,
"loss": 0.1959,
"step": 2850
},
{
"epoch": 2.4667524667524665,
"grad_norm": 1.923122763633728,
"learning_rate": 4.722222222222222e-06,
"loss": 0.199,
"step": 2875
},
{
"epoch": 2.488202488202488,
"grad_norm": 2.273163080215454,
"learning_rate": 4.666666666666667e-06,
"loss": 0.1983,
"step": 2900
},
{
"epoch": 2.5096525096525095,
"grad_norm": 2.816136598587036,
"learning_rate": 4.611111111111112e-06,
"loss": 0.1901,
"step": 2925
},
{
"epoch": 2.531102531102531,
"grad_norm": 2.453509569168091,
"learning_rate": 4.555555555555556e-06,
"loss": 0.1819,
"step": 2950
},
{
"epoch": 2.5525525525525525,
"grad_norm": 1.754265546798706,
"learning_rate": 4.5e-06,
"loss": 0.1808,
"step": 2975
},
{
"epoch": 2.574002574002574,
"grad_norm": 2.362611770629883,
"learning_rate": 4.444444444444444e-06,
"loss": 0.1917,
"step": 3000
},
{
"epoch": 2.574002574002574,
"eval_loss": 0.4086485207080841,
"eval_runtime": 1612.1909,
"eval_samples_per_second": 2.42,
"eval_steps_per_second": 0.151,
"eval_wer": 0.2937071380276248,
"step": 3000
},
{
"epoch": 2.5954525954525955,
"grad_norm": 2.633920669555664,
"learning_rate": 4.388888888888889e-06,
"loss": 0.188,
"step": 3025
},
{
"epoch": 2.616902616902617,
"grad_norm": 2.290724515914917,
"learning_rate": 4.333333333333334e-06,
"loss": 0.2027,
"step": 3050
},
{
"epoch": 2.6383526383526386,
"grad_norm": 2.451239585876465,
"learning_rate": 4.277777777777778e-06,
"loss": 0.1982,
"step": 3075
},
{
"epoch": 2.6598026598026596,
"grad_norm": 2.550907611846924,
"learning_rate": 4.222222222222223e-06,
"loss": 0.1994,
"step": 3100
},
{
"epoch": 2.681252681252681,
"grad_norm": 2.633718729019165,
"learning_rate": 4.166666666666667e-06,
"loss": 0.229,
"step": 3125
},
{
"epoch": 2.7027027027027026,
"grad_norm": 2.618665933609009,
"learning_rate": 4.111111111111111e-06,
"loss": 0.2017,
"step": 3150
},
{
"epoch": 2.724152724152724,
"grad_norm": 2.954728364944458,
"learning_rate": 4.055555555555556e-06,
"loss": 0.1873,
"step": 3175
},
{
"epoch": 2.7456027456027456,
"grad_norm": 2.27900767326355,
"learning_rate": 4.000000000000001e-06,
"loss": 0.2034,
"step": 3200
},
{
"epoch": 2.767052767052767,
"grad_norm": 2.202578544616699,
"learning_rate": 3.944444444444445e-06,
"loss": 0.1955,
"step": 3225
},
{
"epoch": 2.7885027885027887,
"grad_norm": 2.391146659851074,
"learning_rate": 3.88888888888889e-06,
"loss": 0.1934,
"step": 3250
},
{
"epoch": 2.8099528099528097,
"grad_norm": 2.5382649898529053,
"learning_rate": 3.833333333333334e-06,
"loss": 0.1907,
"step": 3275
},
{
"epoch": 2.8314028314028317,
"grad_norm": 2.9581174850463867,
"learning_rate": 3.777777777777778e-06,
"loss": 0.2083,
"step": 3300
},
{
"epoch": 2.8528528528528527,
"grad_norm": 2.907230854034424,
"learning_rate": 3.7222222222222225e-06,
"loss": 0.2074,
"step": 3325
},
{
"epoch": 2.8743028743028742,
"grad_norm": 2.437242031097412,
"learning_rate": 3.6666666666666666e-06,
"loss": 0.193,
"step": 3350
},
{
"epoch": 2.8957528957528957,
"grad_norm": 2.3380300998687744,
"learning_rate": 3.6111111111111115e-06,
"loss": 0.211,
"step": 3375
},
{
"epoch": 2.9172029172029172,
"grad_norm": 3.0222008228302,
"learning_rate": 3.555555555555556e-06,
"loss": 0.1924,
"step": 3400
},
{
"epoch": 2.9386529386529388,
"grad_norm": 2.763075351715088,
"learning_rate": 3.5e-06,
"loss": 0.1989,
"step": 3425
},
{
"epoch": 2.9601029601029603,
"grad_norm": 2.859872817993164,
"learning_rate": 3.444444444444445e-06,
"loss": 0.1845,
"step": 3450
},
{
"epoch": 2.9815529815529818,
"grad_norm": 2.663336992263794,
"learning_rate": 3.3888888888888893e-06,
"loss": 0.1994,
"step": 3475
},
{
"epoch": 3.003003003003003,
"grad_norm": 1.762622594833374,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.1894,
"step": 3500
},
{
"epoch": 3.0244530244530243,
"grad_norm": 1.7877063751220703,
"learning_rate": 3.277777777777778e-06,
"loss": 0.1154,
"step": 3525
},
{
"epoch": 3.045903045903046,
"grad_norm": 2.131319761276245,
"learning_rate": 3.2222222222222227e-06,
"loss": 0.1122,
"step": 3550
},
{
"epoch": 3.0673530673530673,
"grad_norm": 1.935207486152649,
"learning_rate": 3.1666666666666667e-06,
"loss": 0.1157,
"step": 3575
},
{
"epoch": 3.088803088803089,
"grad_norm": 2.1667850017547607,
"learning_rate": 3.1111111111111116e-06,
"loss": 0.1165,
"step": 3600
},
{
"epoch": 3.1102531102531104,
"grad_norm": 2.0937955379486084,
"learning_rate": 3.055555555555556e-06,
"loss": 0.1066,
"step": 3625
},
{
"epoch": 3.131703131703132,
"grad_norm": 2.4619686603546143,
"learning_rate": 3e-06,
"loss": 0.1047,
"step": 3650
},
{
"epoch": 3.153153153153153,
"grad_norm": 2.3083388805389404,
"learning_rate": 2.944444444444445e-06,
"loss": 0.1483,
"step": 3675
},
{
"epoch": 3.1746031746031744,
"grad_norm": 2.0462327003479004,
"learning_rate": 2.888888888888889e-06,
"loss": 0.1183,
"step": 3700
},
{
"epoch": 3.196053196053196,
"grad_norm": 2.2849583625793457,
"learning_rate": 2.8333333333333335e-06,
"loss": 0.1155,
"step": 3725
},
{
"epoch": 3.2175032175032174,
"grad_norm": 2.364773750305176,
"learning_rate": 2.7777777777777783e-06,
"loss": 0.1139,
"step": 3750
},
{
"epoch": 3.238953238953239,
"grad_norm": 2.6987013816833496,
"learning_rate": 2.7222222222222224e-06,
"loss": 0.1117,
"step": 3775
},
{
"epoch": 3.2604032604032605,
"grad_norm": 2.8335797786712646,
"learning_rate": 2.666666666666667e-06,
"loss": 0.1135,
"step": 3800
},
{
"epoch": 3.281853281853282,
"grad_norm": 2.0935230255126953,
"learning_rate": 2.6111111111111113e-06,
"loss": 0.1048,
"step": 3825
},
{
"epoch": 3.3033033033033035,
"grad_norm": 2.1639814376831055,
"learning_rate": 2.5555555555555557e-06,
"loss": 0.1088,
"step": 3850
},
{
"epoch": 3.324753324753325,
"grad_norm": 2.022599458694458,
"learning_rate": 2.5e-06,
"loss": 0.1093,
"step": 3875
},
{
"epoch": 3.346203346203346,
"grad_norm": 2.6962931156158447,
"learning_rate": 2.4444444444444447e-06,
"loss": 0.1139,
"step": 3900
},
{
"epoch": 3.3676533676533675,
"grad_norm": 1.9098910093307495,
"learning_rate": 2.388888888888889e-06,
"loss": 0.1082,
"step": 3925
},
{
"epoch": 3.389103389103389,
"grad_norm": 2.3071112632751465,
"learning_rate": 2.3333333333333336e-06,
"loss": 0.129,
"step": 3950
},
{
"epoch": 3.4105534105534105,
"grad_norm": 1.9279221296310425,
"learning_rate": 2.277777777777778e-06,
"loss": 0.1153,
"step": 3975
},
{
"epoch": 3.432003432003432,
"grad_norm": 2.1972861289978027,
"learning_rate": 2.222222222222222e-06,
"loss": 0.1113,
"step": 4000
},
{
"epoch": 3.432003432003432,
"eval_loss": 0.43410712480545044,
"eval_runtime": 2229.8693,
"eval_samples_per_second": 1.749,
"eval_steps_per_second": 0.109,
"eval_wer": 0.2852313339128755,
"step": 4000
},
{
"epoch": 3.4534534534534536,
"grad_norm": 1.9216057062149048,
"learning_rate": 2.166666666666667e-06,
"loss": 0.1126,
"step": 4025
},
{
"epoch": 3.474903474903475,
"grad_norm": 2.277034282684326,
"learning_rate": 2.1111111111111114e-06,
"loss": 0.1088,
"step": 4050
},
{
"epoch": 3.496353496353496,
"grad_norm": 2.0428617000579834,
"learning_rate": 2.0555555555555555e-06,
"loss": 0.1083,
"step": 4075
},
{
"epoch": 3.517803517803518,
"grad_norm": 2.5672316551208496,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.1059,
"step": 4100
},
{
"epoch": 3.539253539253539,
"grad_norm": 2.100874185562134,
"learning_rate": 1.944444444444445e-06,
"loss": 0.1118,
"step": 4125
},
{
"epoch": 3.5607035607035606,
"grad_norm": 2.066019296646118,
"learning_rate": 1.888888888888889e-06,
"loss": 0.1043,
"step": 4150
},
{
"epoch": 3.582153582153582,
"grad_norm": 2.2962706089019775,
"learning_rate": 1.8333333333333333e-06,
"loss": 0.1158,
"step": 4175
},
{
"epoch": 3.6036036036036037,
"grad_norm": 2.4337549209594727,
"learning_rate": 1.777777777777778e-06,
"loss": 0.1074,
"step": 4200
},
{
"epoch": 3.625053625053625,
"grad_norm": 2.21247935295105,
"learning_rate": 1.7222222222222224e-06,
"loss": 0.1081,
"step": 4225
},
{
"epoch": 3.6465036465036467,
"grad_norm": 1.9160076379776,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.1139,
"step": 4250
},
{
"epoch": 3.667953667953668,
"grad_norm": 2.366440773010254,
"learning_rate": 1.6111111111111113e-06,
"loss": 0.1072,
"step": 4275
},
{
"epoch": 3.6894036894036892,
"grad_norm": 2.299656867980957,
"learning_rate": 1.5555555555555558e-06,
"loss": 0.1108,
"step": 4300
},
{
"epoch": 3.7108537108537107,
"grad_norm": 2.0302138328552246,
"learning_rate": 1.5e-06,
"loss": 0.1013,
"step": 4325
},
{
"epoch": 3.7323037323037322,
"grad_norm": 2.416654586791992,
"learning_rate": 1.4444444444444445e-06,
"loss": 0.1134,
"step": 4350
},
{
"epoch": 3.7537537537537538,
"grad_norm": 1.7967655658721924,
"learning_rate": 1.3888888888888892e-06,
"loss": 0.1001,
"step": 4375
},
{
"epoch": 3.7752037752037753,
"grad_norm": 2.470515251159668,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.1033,
"step": 4400
},
{
"epoch": 3.7966537966537968,
"grad_norm": 2.558213949203491,
"learning_rate": 1.2777777777777779e-06,
"loss": 0.1081,
"step": 4425
},
{
"epoch": 3.8181038181038183,
"grad_norm": 2.0769007205963135,
"learning_rate": 1.2222222222222223e-06,
"loss": 0.1027,
"step": 4450
},
{
"epoch": 3.8395538395538393,
"grad_norm": 1.9774547815322876,
"learning_rate": 1.1666666666666668e-06,
"loss": 0.1136,
"step": 4475
},
{
"epoch": 3.861003861003861,
"grad_norm": 2.82588791847229,
"learning_rate": 1.111111111111111e-06,
"loss": 0.1129,
"step": 4500
},
{
"epoch": 3.8824538824538823,
"grad_norm": 2.3755438327789307,
"learning_rate": 1.0555555555555557e-06,
"loss": 0.1095,
"step": 4525
},
{
"epoch": 3.903903903903904,
"grad_norm": 2.441087007522583,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.1007,
"step": 4550
},
{
"epoch": 3.9253539253539254,
"grad_norm": 2.604708433151245,
"learning_rate": 9.444444444444445e-07,
"loss": 0.1055,
"step": 4575
},
{
"epoch": 3.946803946803947,
"grad_norm": 2.5220184326171875,
"learning_rate": 8.88888888888889e-07,
"loss": 0.112,
"step": 4600
},
{
"epoch": 3.9682539682539684,
"grad_norm": 2.3847906589508057,
"learning_rate": 8.333333333333333e-07,
"loss": 0.1082,
"step": 4625
},
{
"epoch": 3.9897039897039894,
"grad_norm": 1.749206304550171,
"learning_rate": 7.8e-07,
"loss": 0.1096,
"step": 4650
},
{
"epoch": 4.011154011154011,
"grad_norm": 1.657009482383728,
"learning_rate": 7.244444444444446e-07,
"loss": 0.084,
"step": 4675
},
{
"epoch": 4.032604032604032,
"grad_norm": 1.4502904415130615,
"learning_rate": 6.68888888888889e-07,
"loss": 0.0624,
"step": 4700
},
{
"epoch": 4.054054054054054,
"grad_norm": 1.753985047340393,
"learning_rate": 6.133333333333333e-07,
"loss": 0.064,
"step": 4725
},
{
"epoch": 4.0755040755040755,
"grad_norm": 1.8382900953292847,
"learning_rate": 5.577777777777779e-07,
"loss": 0.0607,
"step": 4750
},
{
"epoch": 4.0969540969540965,
"grad_norm": 2.292609930038452,
"learning_rate": 5.022222222222222e-07,
"loss": 0.0617,
"step": 4775
},
{
"epoch": 4.1184041184041185,
"grad_norm": 1.5938409566879272,
"learning_rate": 4.466666666666667e-07,
"loss": 0.0643,
"step": 4800
},
{
"epoch": 4.1398541398541395,
"grad_norm": 1.7310892343521118,
"learning_rate": 3.9111111111111115e-07,
"loss": 0.0595,
"step": 4825
},
{
"epoch": 4.1613041613041615,
"grad_norm": 1.9981136322021484,
"learning_rate": 3.3555555555555556e-07,
"loss": 0.0553,
"step": 4850
},
{
"epoch": 4.1827541827541825,
"grad_norm": 1.4661279916763306,
"learning_rate": 2.8e-07,
"loss": 0.0599,
"step": 4875
},
{
"epoch": 4.2042042042042045,
"grad_norm": 2.204902172088623,
"learning_rate": 2.2444444444444445e-07,
"loss": 0.0627,
"step": 4900
},
{
"epoch": 4.2256542256542255,
"grad_norm": 1.7172112464904785,
"learning_rate": 1.6888888888888888e-07,
"loss": 0.0643,
"step": 4925
},
{
"epoch": 4.2471042471042475,
"grad_norm": 1.4979685544967651,
"learning_rate": 1.1333333333333336e-07,
"loss": 0.0602,
"step": 4950
},
{
"epoch": 4.268554268554269,
"grad_norm": 1.880300760269165,
"learning_rate": 5.777777777777778e-08,
"loss": 0.0591,
"step": 4975
},
{
"epoch": 4.29000429000429,
"grad_norm": 1.7681268453598022,
"learning_rate": 2.2222222222222225e-09,
"loss": 0.0665,
"step": 5000
},
{
"epoch": 4.29000429000429,
"eval_loss": 0.46873393654823303,
"eval_runtime": 2420.0826,
"eval_samples_per_second": 1.612,
"eval_steps_per_second": 0.101,
"eval_wer": 0.2887085868830291,
"step": 5000
},
{
"epoch": 4.29000429000429,
"step": 5000,
"total_flos": 5.435317790834688e+20,
"train_loss": 0.2924629046201706,
"train_runtime": 60618.8801,
"train_samples_per_second": 2.639,
"train_steps_per_second": 0.082
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.435317790834688e+20,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}