gweltou's picture
End of training
7e79252 verified
{
"best_metric": 43.19571383548692,
"best_model_checkpoint": "whisper-small-br/checkpoint-3500",
"epoch": 4.069767441860465,
"eval_steps": 500,
"global_step": 3500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12,
"grad_norm": 23.873233795166016,
"learning_rate": 4.800000000000001e-06,
"loss": 4.4977,
"step": 100
},
{
"epoch": 0.23,
"grad_norm": 18.380311965942383,
"learning_rate": 9.75e-06,
"loss": 2.0428,
"step": 200
},
{
"epoch": 0.35,
"grad_norm": 15.277495384216309,
"learning_rate": 9.75e-06,
"loss": 1.282,
"step": 300
},
{
"epoch": 0.47,
"grad_norm": 16.010156631469727,
"learning_rate": 9.486842105263158e-06,
"loss": 0.9714,
"step": 400
},
{
"epoch": 0.58,
"grad_norm": 12.95919418334961,
"learning_rate": 9.223684210526316e-06,
"loss": 0.8721,
"step": 500
},
{
"epoch": 0.58,
"eval_cer": 23.931937364773187,
"eval_loss": 0.847813606262207,
"eval_runtime": 707.7025,
"eval_samples_per_second": 3.613,
"eval_steps_per_second": 0.452,
"eval_wer": 62.74188465174914,
"step": 500
},
{
"epoch": 0.7,
"grad_norm": 15.317465782165527,
"learning_rate": 8.960526315789474e-06,
"loss": 0.8022,
"step": 600
},
{
"epoch": 0.81,
"grad_norm": 14.4519624710083,
"learning_rate": 8.697368421052633e-06,
"loss": 0.7294,
"step": 700
},
{
"epoch": 0.93,
"grad_norm": 13.281527519226074,
"learning_rate": 8.43421052631579e-06,
"loss": 0.7037,
"step": 800
},
{
"epoch": 1.05,
"grad_norm": 11.638937950134277,
"learning_rate": 8.171052631578949e-06,
"loss": 0.5775,
"step": 900
},
{
"epoch": 1.16,
"grad_norm": 10.528143882751465,
"learning_rate": 7.907894736842105e-06,
"loss": 0.413,
"step": 1000
},
{
"epoch": 1.16,
"eval_cer": 20.239099343576957,
"eval_loss": 0.6627262830734253,
"eval_runtime": 694.5097,
"eval_samples_per_second": 3.682,
"eval_steps_per_second": 0.461,
"eval_wer": 51.78695241096754,
"step": 1000
},
{
"epoch": 1.28,
"grad_norm": 11.562024116516113,
"learning_rate": 7.644736842105264e-06,
"loss": 0.4373,
"step": 1100
},
{
"epoch": 1.4,
"grad_norm": 11.719500541687012,
"learning_rate": 7.381578947368422e-06,
"loss": 0.4028,
"step": 1200
},
{
"epoch": 1.51,
"grad_norm": 11.709799766540527,
"learning_rate": 7.11842105263158e-06,
"loss": 0.4085,
"step": 1300
},
{
"epoch": 1.63,
"grad_norm": 10.174458503723145,
"learning_rate": 6.855263157894737e-06,
"loss": 0.394,
"step": 1400
},
{
"epoch": 1.74,
"grad_norm": 10.892149925231934,
"learning_rate": 6.5921052631578955e-06,
"loss": 0.3822,
"step": 1500
},
{
"epoch": 1.74,
"eval_cer": 17.72097742246996,
"eval_loss": 0.6052409410476685,
"eval_runtime": 688.7859,
"eval_samples_per_second": 3.712,
"eval_steps_per_second": 0.465,
"eval_wer": 46.448156318941066,
"step": 1500
},
{
"epoch": 1.86,
"grad_norm": 12.372304916381836,
"learning_rate": 6.328947368421054e-06,
"loss": 0.3767,
"step": 1600
},
{
"epoch": 1.98,
"grad_norm": 13.72421646118164,
"learning_rate": 6.065789473684212e-06,
"loss": 0.3938,
"step": 1700
},
{
"epoch": 2.09,
"grad_norm": 10.771753311157227,
"learning_rate": 5.802631578947368e-06,
"loss": 0.2385,
"step": 1800
},
{
"epoch": 2.21,
"grad_norm": 5.737514495849609,
"learning_rate": 5.5394736842105266e-06,
"loss": 0.1885,
"step": 1900
},
{
"epoch": 2.33,
"grad_norm": 6.4449920654296875,
"learning_rate": 5.276315789473685e-06,
"loss": 0.1868,
"step": 2000
},
{
"epoch": 2.33,
"eval_cer": 17.650078844108695,
"eval_loss": 0.5885388255119324,
"eval_runtime": 693.9092,
"eval_samples_per_second": 3.685,
"eval_steps_per_second": 0.461,
"eval_wer": 45.30728017648913,
"step": 2000
},
{
"epoch": 2.44,
"grad_norm": 7.215250015258789,
"learning_rate": 5.013157894736842e-06,
"loss": 0.1873,
"step": 2100
},
{
"epoch": 2.56,
"grad_norm": 6.823380947113037,
"learning_rate": 4.75e-06,
"loss": 0.1898,
"step": 2200
},
{
"epoch": 2.67,
"grad_norm": 7.433701992034912,
"learning_rate": 4.4868421052631584e-06,
"loss": 0.1801,
"step": 2300
},
{
"epoch": 2.79,
"grad_norm": 8.947402000427246,
"learning_rate": 4.223684210526316e-06,
"loss": 0.1911,
"step": 2400
},
{
"epoch": 2.91,
"grad_norm": 4.656549453735352,
"learning_rate": 3.960526315789474e-06,
"loss": 0.1956,
"step": 2500
},
{
"epoch": 2.91,
"eval_cer": 16.69661520407789,
"eval_loss": 0.5721948146820068,
"eval_runtime": 688.2161,
"eval_samples_per_second": 3.715,
"eval_steps_per_second": 0.465,
"eval_wer": 43.98361172392058,
"step": 2500
},
{
"epoch": 3.02,
"grad_norm": 5.275825023651123,
"learning_rate": 3.6973684210526317e-06,
"loss": 0.1568,
"step": 2600
},
{
"epoch": 3.14,
"grad_norm": 3.9346418380737305,
"learning_rate": 3.43421052631579e-06,
"loss": 0.0884,
"step": 2700
},
{
"epoch": 3.26,
"grad_norm": 8.16520881652832,
"learning_rate": 3.1710526315789477e-06,
"loss": 0.0893,
"step": 2800
},
{
"epoch": 3.37,
"grad_norm": 9.721732139587402,
"learning_rate": 2.907894736842106e-06,
"loss": 0.0893,
"step": 2900
},
{
"epoch": 3.49,
"grad_norm": 4.916592597961426,
"learning_rate": 2.644736842105263e-06,
"loss": 0.0814,
"step": 3000
},
{
"epoch": 3.49,
"eval_cer": 17.04132898162749,
"eval_loss": 0.5933119058609009,
"eval_runtime": 688.3586,
"eval_samples_per_second": 3.715,
"eval_steps_per_second": 0.465,
"eval_wer": 44.28616451307911,
"step": 3000
},
{
"epoch": 3.6,
"grad_norm": 5.402080535888672,
"learning_rate": 2.381578947368421e-06,
"loss": 0.0847,
"step": 3100
},
{
"epoch": 3.72,
"grad_norm": 4.390013694763184,
"learning_rate": 2.118421052631579e-06,
"loss": 0.0902,
"step": 3200
},
{
"epoch": 3.84,
"grad_norm": 3.571608543395996,
"learning_rate": 1.855263157894737e-06,
"loss": 0.0757,
"step": 3300
},
{
"epoch": 3.95,
"grad_norm": 6.823545455932617,
"learning_rate": 1.5921052631578949e-06,
"loss": 0.0811,
"step": 3400
},
{
"epoch": 4.07,
"grad_norm": 2.958061933517456,
"learning_rate": 1.3289473684210526e-06,
"loss": 0.0554,
"step": 3500
},
{
"epoch": 4.07,
"eval_cer": 16.441135844120918,
"eval_loss": 0.6083739399909973,
"eval_runtime": 690.4893,
"eval_samples_per_second": 3.703,
"eval_steps_per_second": 0.463,
"eval_wer": 43.19571383548692,
"step": 3500
}
],
"logging_steps": 100,
"max_steps": 4000,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 8.0746195009536e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}