checkpoint-1000 / trainer_state.json
Huseyin's picture
End of training
f399311 verified
{
"best_metric": 19.615089840756195,
"best_model_checkpoint": "./whisper-medium-tr/checkpoint-1000",
"epoch": 0.17238407171177383,
"eval_steps": 1000,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0034476814342354768,
"grad_norm": 17.02838897705078,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.0558,
"step": 20
},
{
"epoch": 0.0068953628684709535,
"grad_norm": 8.815332412719727,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.5997,
"step": 40
},
{
"epoch": 0.01034304430270643,
"grad_norm": 9.678966522216797,
"learning_rate": 3e-06,
"loss": 0.2506,
"step": 60
},
{
"epoch": 0.013790725736941907,
"grad_norm": 7.050973892211914,
"learning_rate": 4.000000000000001e-06,
"loss": 0.1829,
"step": 80
},
{
"epoch": 0.017238407171177382,
"grad_norm": 12.933165550231934,
"learning_rate": 5e-06,
"loss": 0.1818,
"step": 100
},
{
"epoch": 0.02068608860541286,
"grad_norm": 8.803665161132812,
"learning_rate": 6e-06,
"loss": 0.1803,
"step": 120
},
{
"epoch": 0.024133770039648336,
"grad_norm": 7.420976161956787,
"learning_rate": 7e-06,
"loss": 0.1767,
"step": 140
},
{
"epoch": 0.027581451473883814,
"grad_norm": 8.617654800415039,
"learning_rate": 8.000000000000001e-06,
"loss": 0.2164,
"step": 160
},
{
"epoch": 0.03102913290811929,
"grad_norm": 13.863837242126465,
"learning_rate": 9e-06,
"loss": 0.2168,
"step": 180
},
{
"epoch": 0.034476814342354764,
"grad_norm": 9.399210929870605,
"learning_rate": 1e-05,
"loss": 0.1798,
"step": 200
},
{
"epoch": 0.037924495776590246,
"grad_norm": 7.0017266273498535,
"learning_rate": 9.92857142857143e-06,
"loss": 0.1962,
"step": 220
},
{
"epoch": 0.04137217721082572,
"grad_norm": 13.431073188781738,
"learning_rate": 9.857142857142859e-06,
"loss": 0.2146,
"step": 240
},
{
"epoch": 0.044819858645061196,
"grad_norm": 6.425413608551025,
"learning_rate": 9.785714285714286e-06,
"loss": 0.1956,
"step": 260
},
{
"epoch": 0.04826754007929667,
"grad_norm": 8.163415908813477,
"learning_rate": 9.714285714285715e-06,
"loss": 0.193,
"step": 280
},
{
"epoch": 0.05171522151353215,
"grad_norm": 12.302197456359863,
"learning_rate": 9.642857142857144e-06,
"loss": 0.2102,
"step": 300
},
{
"epoch": 0.05516290294776763,
"grad_norm": 10.398489952087402,
"learning_rate": 9.571428571428573e-06,
"loss": 0.221,
"step": 320
},
{
"epoch": 0.0586105843820031,
"grad_norm": 10.396890640258789,
"learning_rate": 9.5e-06,
"loss": 0.1642,
"step": 340
},
{
"epoch": 0.06205826581623858,
"grad_norm": 10.882383346557617,
"learning_rate": 9.42857142857143e-06,
"loss": 0.2076,
"step": 360
},
{
"epoch": 0.06550594725047405,
"grad_norm": 9.351667404174805,
"learning_rate": 9.357142857142859e-06,
"loss": 0.1888,
"step": 380
},
{
"epoch": 0.06895362868470953,
"grad_norm": 8.273520469665527,
"learning_rate": 9.285714285714288e-06,
"loss": 0.2302,
"step": 400
},
{
"epoch": 0.072401310118945,
"grad_norm": 9.05259895324707,
"learning_rate": 9.214285714285715e-06,
"loss": 0.2045,
"step": 420
},
{
"epoch": 0.07584899155318049,
"grad_norm": 10.202899932861328,
"learning_rate": 9.142857142857144e-06,
"loss": 0.2022,
"step": 440
},
{
"epoch": 0.07929667298741597,
"grad_norm": 7.107177734375,
"learning_rate": 9.071428571428573e-06,
"loss": 0.172,
"step": 460
},
{
"epoch": 0.08274435442165144,
"grad_norm": 10.326031684875488,
"learning_rate": 9e-06,
"loss": 0.2856,
"step": 480
},
{
"epoch": 0.08619203585588692,
"grad_norm": 5.693482875823975,
"learning_rate": 8.92857142857143e-06,
"loss": 0.2283,
"step": 500
},
{
"epoch": 0.08963971729012239,
"grad_norm": 14.126120567321777,
"learning_rate": 8.857142857142858e-06,
"loss": 0.2095,
"step": 520
},
{
"epoch": 0.09308739872435787,
"grad_norm": 7.242938041687012,
"learning_rate": 8.785714285714286e-06,
"loss": 0.2102,
"step": 540
},
{
"epoch": 0.09653508015859334,
"grad_norm": 8.650644302368164,
"learning_rate": 8.714285714285715e-06,
"loss": 0.2419,
"step": 560
},
{
"epoch": 0.09998276159282882,
"grad_norm": 8.036280632019043,
"learning_rate": 8.642857142857144e-06,
"loss": 0.2031,
"step": 580
},
{
"epoch": 0.1034304430270643,
"grad_norm": 8.81808090209961,
"learning_rate": 8.571428571428571e-06,
"loss": 0.1972,
"step": 600
},
{
"epoch": 0.10687812446129978,
"grad_norm": 13.098625183105469,
"learning_rate": 8.5e-06,
"loss": 0.2224,
"step": 620
},
{
"epoch": 0.11032580589553526,
"grad_norm": 6.834613800048828,
"learning_rate": 8.428571428571429e-06,
"loss": 0.1894,
"step": 640
},
{
"epoch": 0.11377348732977073,
"grad_norm": 11.371912956237793,
"learning_rate": 8.357142857142858e-06,
"loss": 0.2268,
"step": 660
},
{
"epoch": 0.1172211687640062,
"grad_norm": 11.41600513458252,
"learning_rate": 8.285714285714287e-06,
"loss": 0.1934,
"step": 680
},
{
"epoch": 0.12066885019824168,
"grad_norm": 9.144827842712402,
"learning_rate": 8.214285714285714e-06,
"loss": 0.1878,
"step": 700
},
{
"epoch": 0.12411653163247716,
"grad_norm": 3.3032901287078857,
"learning_rate": 8.142857142857143e-06,
"loss": 0.1956,
"step": 720
},
{
"epoch": 0.12756421306671265,
"grad_norm": 8.833490371704102,
"learning_rate": 8.071428571428572e-06,
"loss": 0.2416,
"step": 740
},
{
"epoch": 0.1310118945009481,
"grad_norm": 9.268789291381836,
"learning_rate": 8.000000000000001e-06,
"loss": 0.2008,
"step": 760
},
{
"epoch": 0.1344595759351836,
"grad_norm": 7.276330471038818,
"learning_rate": 7.928571428571429e-06,
"loss": 0.1955,
"step": 780
},
{
"epoch": 0.13790725736941906,
"grad_norm": 9.219639778137207,
"learning_rate": 7.857142857142858e-06,
"loss": 0.1779,
"step": 800
},
{
"epoch": 0.14135493880365455,
"grad_norm": 8.820664405822754,
"learning_rate": 7.785714285714287e-06,
"loss": 0.154,
"step": 820
},
{
"epoch": 0.14480262023789,
"grad_norm": 10.620595932006836,
"learning_rate": 7.714285714285716e-06,
"loss": 0.1912,
"step": 840
},
{
"epoch": 0.1482503016721255,
"grad_norm": 7.289504051208496,
"learning_rate": 7.642857142857143e-06,
"loss": 0.2045,
"step": 860
},
{
"epoch": 0.15169798310636098,
"grad_norm": 6.108071804046631,
"learning_rate": 7.571428571428572e-06,
"loss": 0.1861,
"step": 880
},
{
"epoch": 0.15514566454059645,
"grad_norm": 5.369969367980957,
"learning_rate": 7.500000000000001e-06,
"loss": 0.17,
"step": 900
},
{
"epoch": 0.15859334597483193,
"grad_norm": 10.743534088134766,
"learning_rate": 7.428571428571429e-06,
"loss": 0.1768,
"step": 920
},
{
"epoch": 0.1620410274090674,
"grad_norm": 5.289422988891602,
"learning_rate": 7.357142857142858e-06,
"loss": 0.2147,
"step": 940
},
{
"epoch": 0.16548870884330288,
"grad_norm": 8.861351013183594,
"learning_rate": 7.285714285714286e-06,
"loss": 0.1999,
"step": 960
},
{
"epoch": 0.16893639027753835,
"grad_norm": 8.204620361328125,
"learning_rate": 7.2142857142857145e-06,
"loss": 0.1898,
"step": 980
},
{
"epoch": 0.17238407171177383,
"grad_norm": 8.618937492370605,
"learning_rate": 7.1428571428571436e-06,
"loss": 0.1504,
"step": 1000
},
{
"epoch": 0.17238407171177383,
"eval_loss": 0.24223625659942627,
"eval_runtime": 13074.9332,
"eval_samples_per_second": 0.863,
"eval_steps_per_second": 0.108,
"eval_wer": 19.615089840756195,
"step": 1000
}
],
"logging_steps": 20,
"max_steps": 3000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.16483926016e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}