whisper-base-zh / checkpoint-2000 /trainer_state.json
dongim04's picture
Initial model upload
14fffed verified
{
"best_metric": 86.22448979591837,
"best_model_checkpoint": "./whisper-large-lt/checkpoint-2000",
"epoch": 4.140786749482402,
"eval_steps": 1000,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.051759834368530024,
"grad_norm": 31.699411392211914,
"learning_rate": 4.6000000000000004e-07,
"loss": 2.2847,
"step": 25
},
{
"epoch": 0.10351966873706005,
"grad_norm": 21.853273391723633,
"learning_rate": 9.400000000000001e-07,
"loss": 2.0241,
"step": 50
},
{
"epoch": 0.15527950310559005,
"grad_norm": 20.82790756225586,
"learning_rate": 1.44e-06,
"loss": 1.5189,
"step": 75
},
{
"epoch": 0.2070393374741201,
"grad_norm": 10.164386749267578,
"learning_rate": 1.94e-06,
"loss": 0.9866,
"step": 100
},
{
"epoch": 0.2587991718426501,
"grad_norm": 8.86418342590332,
"learning_rate": 2.4400000000000004e-06,
"loss": 0.7653,
"step": 125
},
{
"epoch": 0.3105590062111801,
"grad_norm": 8.09189224243164,
"learning_rate": 2.9400000000000002e-06,
"loss": 0.6515,
"step": 150
},
{
"epoch": 0.36231884057971014,
"grad_norm": 8.163768768310547,
"learning_rate": 3.44e-06,
"loss": 0.553,
"step": 175
},
{
"epoch": 0.4140786749482402,
"grad_norm": 7.93491792678833,
"learning_rate": 3.94e-06,
"loss": 0.5089,
"step": 200
},
{
"epoch": 0.4658385093167702,
"grad_norm": 8.422746658325195,
"learning_rate": 4.440000000000001e-06,
"loss": 0.4928,
"step": 225
},
{
"epoch": 0.5175983436853002,
"grad_norm": 7.056028366088867,
"learning_rate": 4.94e-06,
"loss": 0.4955,
"step": 250
},
{
"epoch": 0.5693581780538303,
"grad_norm": 7.153671741485596,
"learning_rate": 5.4400000000000004e-06,
"loss": 0.4869,
"step": 275
},
{
"epoch": 0.6211180124223602,
"grad_norm": 8.896004676818848,
"learning_rate": 5.94e-06,
"loss": 0.4647,
"step": 300
},
{
"epoch": 0.6728778467908902,
"grad_norm": 7.931967735290527,
"learning_rate": 6.440000000000001e-06,
"loss": 0.4588,
"step": 325
},
{
"epoch": 0.7246376811594203,
"grad_norm": 9.128633499145508,
"learning_rate": 6.9400000000000005e-06,
"loss": 0.4467,
"step": 350
},
{
"epoch": 0.7763975155279503,
"grad_norm": 9.321606636047363,
"learning_rate": 7.440000000000001e-06,
"loss": 0.4442,
"step": 375
},
{
"epoch": 0.8281573498964804,
"grad_norm": 6.722208023071289,
"learning_rate": 7.94e-06,
"loss": 0.414,
"step": 400
},
{
"epoch": 0.8799171842650103,
"grad_norm": 6.496078968048096,
"learning_rate": 8.44e-06,
"loss": 0.4067,
"step": 425
},
{
"epoch": 0.9316770186335404,
"grad_norm": 8.029313087463379,
"learning_rate": 8.94e-06,
"loss": 0.4352,
"step": 450
},
{
"epoch": 0.9834368530020704,
"grad_norm": 7.314033508300781,
"learning_rate": 9.440000000000001e-06,
"loss": 0.425,
"step": 475
},
{
"epoch": 1.0351966873706004,
"grad_norm": 7.7378740310668945,
"learning_rate": 9.940000000000001e-06,
"loss": 0.3573,
"step": 500
},
{
"epoch": 1.0869565217391304,
"grad_norm": 6.325808525085449,
"learning_rate": 9.937142857142858e-06,
"loss": 0.3305,
"step": 525
},
{
"epoch": 1.1387163561076605,
"grad_norm": 7.014776706695557,
"learning_rate": 9.865714285714285e-06,
"loss": 0.3432,
"step": 550
},
{
"epoch": 1.1904761904761905,
"grad_norm": 7.104799747467041,
"learning_rate": 9.794285714285714e-06,
"loss": 0.3428,
"step": 575
},
{
"epoch": 1.2422360248447206,
"grad_norm": 8.842447280883789,
"learning_rate": 9.722857142857143e-06,
"loss": 0.3332,
"step": 600
},
{
"epoch": 1.2939958592132506,
"grad_norm": 4.9119672775268555,
"learning_rate": 9.651428571428572e-06,
"loss": 0.3595,
"step": 625
},
{
"epoch": 1.3457556935817805,
"grad_norm": 6.631478309631348,
"learning_rate": 9.58e-06,
"loss": 0.3273,
"step": 650
},
{
"epoch": 1.3975155279503104,
"grad_norm": 6.498466491699219,
"learning_rate": 9.508571428571429e-06,
"loss": 0.3274,
"step": 675
},
{
"epoch": 1.4492753623188406,
"grad_norm": 6.38285493850708,
"learning_rate": 9.437142857142858e-06,
"loss": 0.3156,
"step": 700
},
{
"epoch": 1.5010351966873707,
"grad_norm": 6.786648273468018,
"learning_rate": 9.365714285714287e-06,
"loss": 0.3161,
"step": 725
},
{
"epoch": 1.5527950310559007,
"grad_norm": 6.803364276885986,
"learning_rate": 9.294285714285714e-06,
"loss": 0.3287,
"step": 750
},
{
"epoch": 1.6045548654244306,
"grad_norm": 7.055851936340332,
"learning_rate": 9.222857142857143e-06,
"loss": 0.334,
"step": 775
},
{
"epoch": 1.6563146997929605,
"grad_norm": 5.978209495544434,
"learning_rate": 9.151428571428572e-06,
"loss": 0.3276,
"step": 800
},
{
"epoch": 1.7080745341614907,
"grad_norm": 8.85584831237793,
"learning_rate": 9.080000000000001e-06,
"loss": 0.3138,
"step": 825
},
{
"epoch": 1.7598343685300208,
"grad_norm": 6.784140110015869,
"learning_rate": 9.00857142857143e-06,
"loss": 0.3167,
"step": 850
},
{
"epoch": 1.8115942028985508,
"grad_norm": 8.701028823852539,
"learning_rate": 8.937142857142857e-06,
"loss": 0.3061,
"step": 875
},
{
"epoch": 1.8633540372670807,
"grad_norm": 6.928929328918457,
"learning_rate": 8.865714285714287e-06,
"loss": 0.3172,
"step": 900
},
{
"epoch": 1.9151138716356106,
"grad_norm": 8.34626579284668,
"learning_rate": 8.794285714285716e-06,
"loss": 0.3105,
"step": 925
},
{
"epoch": 1.9668737060041408,
"grad_norm": 7.861119270324707,
"learning_rate": 8.722857142857145e-06,
"loss": 0.3212,
"step": 950
},
{
"epoch": 2.018633540372671,
"grad_norm": 5.226954936981201,
"learning_rate": 8.651428571428572e-06,
"loss": 0.2669,
"step": 975
},
{
"epoch": 2.070393374741201,
"grad_norm": 6.24221134185791,
"learning_rate": 8.580000000000001e-06,
"loss": 0.2036,
"step": 1000
},
{
"epoch": 2.070393374741201,
"eval_loss": 0.37555259466171265,
"eval_runtime": 551.1751,
"eval_samples_per_second": 3.503,
"eval_steps_per_second": 0.439,
"eval_wer": 86.58163265306122,
"step": 1000
},
{
"epoch": 2.122153209109731,
"grad_norm": 3.854379415512085,
"learning_rate": 8.50857142857143e-06,
"loss": 0.203,
"step": 1025
},
{
"epoch": 2.1739130434782608,
"grad_norm": 4.169343948364258,
"learning_rate": 8.437142857142859e-06,
"loss": 0.23,
"step": 1050
},
{
"epoch": 2.2256728778467907,
"grad_norm": 5.8532633781433105,
"learning_rate": 8.365714285714286e-06,
"loss": 0.1985,
"step": 1075
},
{
"epoch": 2.277432712215321,
"grad_norm": 4.382779598236084,
"learning_rate": 8.294285714285715e-06,
"loss": 0.1997,
"step": 1100
},
{
"epoch": 2.329192546583851,
"grad_norm": 6.208737850189209,
"learning_rate": 8.222857142857144e-06,
"loss": 0.2066,
"step": 1125
},
{
"epoch": 2.380952380952381,
"grad_norm": 5.715628623962402,
"learning_rate": 8.151428571428572e-06,
"loss": 0.2165,
"step": 1150
},
{
"epoch": 2.432712215320911,
"grad_norm": 5.7561445236206055,
"learning_rate": 8.08e-06,
"loss": 0.2176,
"step": 1175
},
{
"epoch": 2.4844720496894412,
"grad_norm": 6.207643508911133,
"learning_rate": 8.00857142857143e-06,
"loss": 0.2075,
"step": 1200
},
{
"epoch": 2.536231884057971,
"grad_norm": 5.213014125823975,
"learning_rate": 7.937142857142857e-06,
"loss": 0.2041,
"step": 1225
},
{
"epoch": 2.587991718426501,
"grad_norm": 5.595193862915039,
"learning_rate": 7.865714285714286e-06,
"loss": 0.205,
"step": 1250
},
{
"epoch": 2.639751552795031,
"grad_norm": 5.534831523895264,
"learning_rate": 7.794285714285715e-06,
"loss": 0.2097,
"step": 1275
},
{
"epoch": 2.691511387163561,
"grad_norm": 5.9356231689453125,
"learning_rate": 7.722857142857142e-06,
"loss": 0.2209,
"step": 1300
},
{
"epoch": 2.7432712215320914,
"grad_norm": 4.567516326904297,
"learning_rate": 7.651428571428571e-06,
"loss": 0.1915,
"step": 1325
},
{
"epoch": 2.795031055900621,
"grad_norm": 5.693762302398682,
"learning_rate": 7.58e-06,
"loss": 0.1972,
"step": 1350
},
{
"epoch": 2.846790890269151,
"grad_norm": 5.581110000610352,
"learning_rate": 7.508571428571429e-06,
"loss": 0.2186,
"step": 1375
},
{
"epoch": 2.898550724637681,
"grad_norm": 5.270753860473633,
"learning_rate": 7.4371428571428575e-06,
"loss": 0.21,
"step": 1400
},
{
"epoch": 2.950310559006211,
"grad_norm": 6.520173072814941,
"learning_rate": 7.365714285714286e-06,
"loss": 0.2235,
"step": 1425
},
{
"epoch": 3.002070393374741,
"grad_norm": 3.4095804691314697,
"learning_rate": 7.294285714285715e-06,
"loss": 0.1991,
"step": 1450
},
{
"epoch": 3.0538302277432714,
"grad_norm": 4.170091152191162,
"learning_rate": 7.222857142857144e-06,
"loss": 0.1364,
"step": 1475
},
{
"epoch": 3.1055900621118013,
"grad_norm": 5.475827217102051,
"learning_rate": 7.151428571428573e-06,
"loss": 0.1373,
"step": 1500
},
{
"epoch": 3.1573498964803313,
"grad_norm": 5.360046863555908,
"learning_rate": 7.08e-06,
"loss": 0.1431,
"step": 1525
},
{
"epoch": 3.209109730848861,
"grad_norm": 3.8683834075927734,
"learning_rate": 7.008571428571429e-06,
"loss": 0.1315,
"step": 1550
},
{
"epoch": 3.260869565217391,
"grad_norm": 4.890464782714844,
"learning_rate": 6.937142857142858e-06,
"loss": 0.1394,
"step": 1575
},
{
"epoch": 3.3126293995859215,
"grad_norm": 4.547149181365967,
"learning_rate": 6.865714285714287e-06,
"loss": 0.1339,
"step": 1600
},
{
"epoch": 3.3643892339544514,
"grad_norm": 3.74664044380188,
"learning_rate": 6.794285714285714e-06,
"loss": 0.1369,
"step": 1625
},
{
"epoch": 3.4161490683229814,
"grad_norm": 5.266635894775391,
"learning_rate": 6.722857142857143e-06,
"loss": 0.1419,
"step": 1650
},
{
"epoch": 3.4679089026915113,
"grad_norm": 3.839707374572754,
"learning_rate": 6.651428571428572e-06,
"loss": 0.1296,
"step": 1675
},
{
"epoch": 3.5196687370600412,
"grad_norm": 4.542924404144287,
"learning_rate": 6.5800000000000005e-06,
"loss": 0.1379,
"step": 1700
},
{
"epoch": 3.571428571428571,
"grad_norm": 3.216632127761841,
"learning_rate": 6.5085714285714295e-06,
"loss": 0.1312,
"step": 1725
},
{
"epoch": 3.6231884057971016,
"grad_norm": 4.014632701873779,
"learning_rate": 6.437142857142858e-06,
"loss": 0.1365,
"step": 1750
},
{
"epoch": 3.6749482401656315,
"grad_norm": 4.811014175415039,
"learning_rate": 6.365714285714286e-06,
"loss": 0.1304,
"step": 1775
},
{
"epoch": 3.7267080745341614,
"grad_norm": 3.7714288234710693,
"learning_rate": 6.294285714285715e-06,
"loss": 0.1525,
"step": 1800
},
{
"epoch": 3.7784679089026914,
"grad_norm": 3.39595103263855,
"learning_rate": 6.222857142857144e-06,
"loss": 0.1428,
"step": 1825
},
{
"epoch": 3.8302277432712213,
"grad_norm": 5.35399866104126,
"learning_rate": 6.151428571428571e-06,
"loss": 0.1338,
"step": 1850
},
{
"epoch": 3.8819875776397517,
"grad_norm": 4.488447666168213,
"learning_rate": 6.08e-06,
"loss": 0.1371,
"step": 1875
},
{
"epoch": 3.9337474120082816,
"grad_norm": 4.74454402923584,
"learning_rate": 6.008571428571429e-06,
"loss": 0.1382,
"step": 1900
},
{
"epoch": 3.9855072463768115,
"grad_norm": 5.4989190101623535,
"learning_rate": 5.937142857142858e-06,
"loss": 0.1443,
"step": 1925
},
{
"epoch": 4.037267080745342,
"grad_norm": 4.14027214050293,
"learning_rate": 5.865714285714286e-06,
"loss": 0.1094,
"step": 1950
},
{
"epoch": 4.089026915113871,
"grad_norm": 2.9087343215942383,
"learning_rate": 5.794285714285715e-06,
"loss": 0.0856,
"step": 1975
},
{
"epoch": 4.140786749482402,
"grad_norm": 3.901829242706299,
"learning_rate": 5.722857142857144e-06,
"loss": 0.0885,
"step": 2000
},
{
"epoch": 4.140786749482402,
"eval_loss": 0.3902941644191742,
"eval_runtime": 550.9002,
"eval_samples_per_second": 3.505,
"eval_steps_per_second": 0.439,
"eval_wer": 86.22448979591837,
"step": 2000
}
],
"logging_steps": 25,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.07344435724288e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}