|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 100.0, |
|
"global_step": 4400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.3579999999999999e-05, |
|
"loss": 11.686, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 2.758e-05, |
|
"loss": 4.1183, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 4.157999999999999e-05, |
|
"loss": 3.0783, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 5.558e-05, |
|
"loss": 2.7958, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 11.36, |
|
"learning_rate": 6.958e-05, |
|
"loss": 2.7064, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 11.36, |
|
"eval_loss": 2.711225986480713, |
|
"eval_runtime": 6.87, |
|
"eval_samples_per_second": 21.689, |
|
"eval_steps_per_second": 0.728, |
|
"eval_wer": 1.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 13.64, |
|
"learning_rate": 6.825897435897435e-05, |
|
"loss": 2.6552, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 15.91, |
|
"learning_rate": 6.646410256410256e-05, |
|
"loss": 2.5236, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"learning_rate": 6.466923076923076e-05, |
|
"loss": 1.8116, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 20.45, |
|
"learning_rate": 6.287435897435897e-05, |
|
"loss": 1.4245, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 22.73, |
|
"learning_rate": 6.107948717948717e-05, |
|
"loss": 1.3079, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 22.73, |
|
"eval_loss": 0.7336559891700745, |
|
"eval_runtime": 6.7043, |
|
"eval_samples_per_second": 22.225, |
|
"eval_steps_per_second": 0.746, |
|
"eval_wer": 0.7775876817792986, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 5.928461538461538e-05, |
|
"loss": 1.2335, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 27.27, |
|
"learning_rate": 5.748974358974358e-05, |
|
"loss": 1.1818, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 29.55, |
|
"learning_rate": 5.569487179487179e-05, |
|
"loss": 1.1441, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 31.82, |
|
"learning_rate": 5.3899999999999996e-05, |
|
"loss": 1.1166, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 34.09, |
|
"learning_rate": 5.21051282051282e-05, |
|
"loss": 1.0919, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 34.09, |
|
"eval_loss": 0.5937860608100891, |
|
"eval_runtime": 6.763, |
|
"eval_samples_per_second": 22.032, |
|
"eval_steps_per_second": 0.739, |
|
"eval_wer": 0.7023096663815227, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 36.36, |
|
"learning_rate": 5.03102564102564e-05, |
|
"loss": 1.0684, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 38.64, |
|
"learning_rate": 4.853333333333333e-05, |
|
"loss": 1.0238, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 40.91, |
|
"learning_rate": 4.6738461538461535e-05, |
|
"loss": 1.0182, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 43.18, |
|
"learning_rate": 4.494358974358974e-05, |
|
"loss": 0.9795, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 45.45, |
|
"learning_rate": 4.314871794871794e-05, |
|
"loss": 0.9546, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 45.45, |
|
"eval_loss": 0.5698409080505371, |
|
"eval_runtime": 6.6992, |
|
"eval_samples_per_second": 22.241, |
|
"eval_steps_per_second": 0.746, |
|
"eval_wer": 0.613344739093242, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 47.73, |
|
"learning_rate": 4.135384615384616e-05, |
|
"loss": 0.9478, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 3.955897435897436e-05, |
|
"loss": 0.9329, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 52.27, |
|
"learning_rate": 3.776410256410256e-05, |
|
"loss": 0.9001, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 54.55, |
|
"learning_rate": 3.5969230769230765e-05, |
|
"loss": 0.8957, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 56.82, |
|
"learning_rate": 3.417435897435897e-05, |
|
"loss": 0.8895, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 56.82, |
|
"eval_loss": 0.5738843083381653, |
|
"eval_runtime": 6.762, |
|
"eval_samples_per_second": 22.035, |
|
"eval_steps_per_second": 0.739, |
|
"eval_wer": 0.6142001710863987, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 59.09, |
|
"learning_rate": 3.237948717948718e-05, |
|
"loss": 0.8718, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 61.36, |
|
"learning_rate": 3.058461538461538e-05, |
|
"loss": 0.8642, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 63.64, |
|
"learning_rate": 2.878974358974359e-05, |
|
"loss": 0.8443, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 65.91, |
|
"learning_rate": 2.6994871794871793e-05, |
|
"loss": 0.8272, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 68.18, |
|
"learning_rate": 2.5199999999999996e-05, |
|
"loss": 0.8152, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 68.18, |
|
"eval_loss": 0.5578901171684265, |
|
"eval_runtime": 6.7178, |
|
"eval_samples_per_second": 22.18, |
|
"eval_steps_per_second": 0.744, |
|
"eval_wer": 0.6090675791274593, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 70.45, |
|
"learning_rate": 2.3405128205128206e-05, |
|
"loss": 0.8141, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 72.73, |
|
"learning_rate": 2.161025641025641e-05, |
|
"loss": 0.8087, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 1.9833333333333332e-05, |
|
"loss": 0.7897, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 77.27, |
|
"learning_rate": 1.8038461538461535e-05, |
|
"loss": 0.7837, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 79.55, |
|
"learning_rate": 1.624358974358974e-05, |
|
"loss": 0.7703, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 79.55, |
|
"eval_loss": 0.5813038349151611, |
|
"eval_runtime": 6.7449, |
|
"eval_samples_per_second": 22.091, |
|
"eval_steps_per_second": 0.741, |
|
"eval_wer": 0.621043627031651, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 81.82, |
|
"learning_rate": 1.4448717948717947e-05, |
|
"loss": 0.7612, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 84.09, |
|
"learning_rate": 1.2653846153846152e-05, |
|
"loss": 0.7575, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 86.36, |
|
"learning_rate": 1.0858974358974358e-05, |
|
"loss": 0.7483, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 88.64, |
|
"learning_rate": 9.064102564102564e-06, |
|
"loss": 0.7496, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 90.91, |
|
"learning_rate": 7.269230769230769e-06, |
|
"loss": 0.732, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 90.91, |
|
"eval_loss": 0.5756056308746338, |
|
"eval_runtime": 6.9674, |
|
"eval_samples_per_second": 21.385, |
|
"eval_steps_per_second": 0.718, |
|
"eval_wer": 0.5859709153122327, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 93.18, |
|
"learning_rate": 5.4743589743589735e-06, |
|
"loss": 0.7262, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 95.45, |
|
"learning_rate": 3.6794871794871793e-06, |
|
"loss": 0.7307, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 97.73, |
|
"learning_rate": 1.8846153846153845e-06, |
|
"loss": 0.7265, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 1.0769230769230769e-07, |
|
"loss": 0.7226, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"step": 4400, |
|
"total_flos": 1.9662171121467834e+19, |
|
"train_loss": 1.4620501448891379, |
|
"train_runtime": 7639.7148, |
|
"train_samples_per_second": 18.142, |
|
"train_steps_per_second": 0.576 |
|
} |
|
], |
|
"max_steps": 4400, |
|
"num_train_epochs": 100, |
|
"total_flos": 1.9662171121467834e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|