|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 29.998738965952082, |
|
"global_step": 11880, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": NaN, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": NaN, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": NaN, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": NaN, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"eval_loss": 2.7731735706329346, |
|
"eval_runtime": 603.0564, |
|
"eval_samples_per_second": 8.576, |
|
"eval_wer": 1.0, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.0003, |
|
"loss": NaN, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00029736379613356763, |
|
"loss": NaN, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.0002947275922671353, |
|
"loss": NaN, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.00029209138840070295, |
|
"loss": NaN, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"eval_loss": 0.2804177403450012, |
|
"eval_runtime": 605.6208, |
|
"eval_samples_per_second": 8.54, |
|
"eval_wer": 0.516512825876631, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 0.0002894551845342706, |
|
"loss": NaN, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 0.00028681898066783827, |
|
"loss": NaN, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.000284182776801406, |
|
"loss": NaN, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 0.00028154657293497364, |
|
"loss": NaN, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"eval_loss": 0.19836615025997162, |
|
"eval_runtime": 613.5068, |
|
"eval_samples_per_second": 8.43, |
|
"eval_wer": 0.41406270428575165, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 0.00027891036906854124, |
|
"loss": NaN, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 0.00027627416520210896, |
|
"loss": NaN, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 0.0002736379613356766, |
|
"loss": NaN, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 0.00027100175746924427, |
|
"loss": NaN, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"eval_loss": 0.18502908945083618, |
|
"eval_runtime": 614.3947, |
|
"eval_samples_per_second": 8.418, |
|
"eval_wer": 0.3799911094840886, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 0.00026836555360281193, |
|
"loss": NaN, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 0.0002657293497363796, |
|
"loss": NaN, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 0.00026309314586994725, |
|
"loss": NaN, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 0.0002604569420035149, |
|
"loss": NaN, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"eval_loss": 0.16938243806362152, |
|
"eval_runtime": 609.4739, |
|
"eval_samples_per_second": 8.486, |
|
"eval_wer": 0.36521716392542425, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 5.3, |
|
"learning_rate": 0.00025782073813708257, |
|
"loss": NaN, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 0.0002551845342706502, |
|
"loss": NaN, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"learning_rate": 0.00025254833040421794, |
|
"loss": NaN, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 0.00024991212653778554, |
|
"loss": NaN, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"eval_loss": 0.17296069860458374, |
|
"eval_runtime": 608.666, |
|
"eval_samples_per_second": 8.497, |
|
"eval_wer": 0.35112308134822057, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 6.31, |
|
"learning_rate": 0.00024727592267135325, |
|
"loss": NaN, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 6.56, |
|
"learning_rate": 0.0002446397188049209, |
|
"loss": NaN, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 0.00024200351493848854, |
|
"loss": NaN, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 0.00023936731107205623, |
|
"loss": NaN, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"eval_loss": 0.17802052199840546, |
|
"eval_runtime": 612.551, |
|
"eval_samples_per_second": 8.443, |
|
"eval_wer": 0.3466778233925163, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 7.32, |
|
"learning_rate": 0.00023673110720562386, |
|
"loss": NaN, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"learning_rate": 0.00023409490333919155, |
|
"loss": NaN, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 7.83, |
|
"learning_rate": 0.0002314586994727592, |
|
"loss": NaN, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"learning_rate": 0.0002288224956063269, |
|
"loss": NaN, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"eval_loss": 0.17137496173381805, |
|
"eval_runtime": 621.5301, |
|
"eval_samples_per_second": 8.321, |
|
"eval_wer": 0.343304657061423, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 0.00022618629173989452, |
|
"loss": NaN, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 8.59, |
|
"learning_rate": 0.00022355008787346218, |
|
"loss": NaN, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 8.84, |
|
"learning_rate": 0.00022091388400702987, |
|
"loss": NaN, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 0.0002182776801405975, |
|
"loss": NaN, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"eval_loss": 0.16793519258499146, |
|
"eval_runtime": 613.7107, |
|
"eval_samples_per_second": 8.427, |
|
"eval_wer": 0.3368721073137568, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 9.34, |
|
"learning_rate": 0.00021564147627416518, |
|
"loss": NaN, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 0.00021300527240773284, |
|
"loss": NaN, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 9.85, |
|
"learning_rate": 0.00021036906854130053, |
|
"loss": NaN, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 10.1, |
|
"learning_rate": 0.00020773286467486816, |
|
"loss": NaN, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 10.1, |
|
"eval_loss": 0.165544793009758, |
|
"eval_runtime": 616.3457, |
|
"eval_samples_per_second": 8.391, |
|
"eval_wer": 0.3317731349528018, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 10.35, |
|
"learning_rate": 0.00020509666080843582, |
|
"loss": NaN, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 10.61, |
|
"learning_rate": 0.0002024604569420035, |
|
"loss": NaN, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 10.86, |
|
"learning_rate": 0.00019982425307557116, |
|
"loss": NaN, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 11.11, |
|
"learning_rate": 0.00019718804920913882, |
|
"loss": NaN, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 11.11, |
|
"eval_loss": 0.17516674101352692, |
|
"eval_runtime": 616.3789, |
|
"eval_samples_per_second": 8.391, |
|
"eval_wer": 0.32222890463614257, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 11.36, |
|
"learning_rate": 0.00019455184534270648, |
|
"loss": NaN, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 11.62, |
|
"learning_rate": 0.00019191564147627417, |
|
"loss": NaN, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 11.87, |
|
"learning_rate": 0.0001892794376098418, |
|
"loss": NaN, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 12.12, |
|
"learning_rate": 0.00018664323374340946, |
|
"loss": NaN, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 12.12, |
|
"eval_loss": 0.18037526309490204, |
|
"eval_runtime": 612.0676, |
|
"eval_samples_per_second": 8.45, |
|
"eval_wer": 0.3176790523755981, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 12.37, |
|
"learning_rate": 0.00018400702987697714, |
|
"loss": NaN, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 12.63, |
|
"learning_rate": 0.0001813708260105448, |
|
"loss": NaN, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 12.88, |
|
"learning_rate": 0.00017873462214411246, |
|
"loss": NaN, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 13.13, |
|
"learning_rate": 0.00017609841827768012, |
|
"loss": NaN, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 13.13, |
|
"eval_loss": 0.1690378040075302, |
|
"eval_runtime": 616.3617, |
|
"eval_samples_per_second": 8.391, |
|
"eval_wer": 0.31728682373244776, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 13.38, |
|
"learning_rate": 0.0001734622144112478, |
|
"loss": NaN, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 13.64, |
|
"learning_rate": 0.00017082601054481546, |
|
"loss": NaN, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 13.89, |
|
"learning_rate": 0.0001681898066783831, |
|
"loss": NaN, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 14.14, |
|
"learning_rate": 0.00016555360281195078, |
|
"loss": NaN, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 14.14, |
|
"eval_loss": 0.17649142444133759, |
|
"eval_runtime": 619.1476, |
|
"eval_samples_per_second": 8.353, |
|
"eval_wer": 0.31932641267682976, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 14.39, |
|
"learning_rate": 0.00016291739894551844, |
|
"loss": NaN, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 14.65, |
|
"learning_rate": 0.00016028119507908612, |
|
"loss": NaN, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 14.9, |
|
"learning_rate": 0.00015764499121265376, |
|
"loss": NaN, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 15.15, |
|
"learning_rate": 0.00015500878734622144, |
|
"loss": NaN, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 15.15, |
|
"eval_loss": 0.17734038829803467, |
|
"eval_runtime": 622.5524, |
|
"eval_samples_per_second": 8.308, |
|
"eval_wer": 0.3148288575687054, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 15.4, |
|
"learning_rate": 0.0001523725834797891, |
|
"loss": NaN, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 15.66, |
|
"learning_rate": 0.00014973637961335676, |
|
"loss": NaN, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 15.91, |
|
"learning_rate": 0.00014710017574692442, |
|
"loss": NaN, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 16.16, |
|
"learning_rate": 0.00014446397188049208, |
|
"loss": NaN, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 16.16, |
|
"eval_loss": 0.187224343419075, |
|
"eval_runtime": 615.2372, |
|
"eval_samples_per_second": 8.407, |
|
"eval_wer": 0.3136260230630442, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 16.41, |
|
"learning_rate": 0.00014182776801405973, |
|
"loss": NaN, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"learning_rate": 0.0001391915641476274, |
|
"loss": NaN, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 16.92, |
|
"learning_rate": 0.00013655536028119505, |
|
"loss": NaN, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 17.17, |
|
"learning_rate": 0.00013391915641476274, |
|
"loss": NaN, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 17.17, |
|
"eval_loss": 0.1843162626028061, |
|
"eval_runtime": 620.0086, |
|
"eval_samples_per_second": 8.342, |
|
"eval_wer": 0.30912846795491983, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 17.42, |
|
"learning_rate": 0.0001312829525483304, |
|
"loss": NaN, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 17.68, |
|
"learning_rate": 0.00012864674868189805, |
|
"loss": NaN, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 17.93, |
|
"learning_rate": 0.0001260105448154657, |
|
"loss": NaN, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"learning_rate": 0.00012337434094903337, |
|
"loss": NaN, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"eval_loss": 0.1754978895187378, |
|
"eval_runtime": 616.3908, |
|
"eval_samples_per_second": 8.391, |
|
"eval_wer": 0.3102005595795309, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 18.43, |
|
"learning_rate": 0.00012073813708260104, |
|
"loss": NaN, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 18.69, |
|
"learning_rate": 0.0001181019332161687, |
|
"loss": NaN, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 18.94, |
|
"learning_rate": 0.00011546572934973637, |
|
"loss": NaN, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 19.19, |
|
"learning_rate": 0.00011282952548330403, |
|
"loss": NaN, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 19.19, |
|
"eval_loss": 0.18895429372787476, |
|
"eval_runtime": 623.3336, |
|
"eval_samples_per_second": 8.297, |
|
"eval_wer": 0.30520618152341605, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 19.44, |
|
"learning_rate": 0.0001101933216168717, |
|
"loss": NaN, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 19.7, |
|
"learning_rate": 0.00010755711775043936, |
|
"loss": NaN, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 19.95, |
|
"learning_rate": 0.00010492091388400701, |
|
"loss": NaN, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 20.2, |
|
"learning_rate": 0.00010228471001757468, |
|
"loss": NaN, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 20.2, |
|
"eval_loss": 0.19008345901966095, |
|
"eval_runtime": 614.7127, |
|
"eval_samples_per_second": 8.414, |
|
"eval_wer": 0.30334963261250425, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 20.45, |
|
"learning_rate": 9.964850615114234e-05, |
|
"loss": NaN, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 20.71, |
|
"learning_rate": 9.701230228471001e-05, |
|
"loss": NaN, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 20.96, |
|
"learning_rate": 9.437609841827767e-05, |
|
"loss": NaN, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 21.21, |
|
"learning_rate": 9.173989455184534e-05, |
|
"loss": NaN, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 21.21, |
|
"eval_loss": 0.1935020387172699, |
|
"eval_runtime": 619.5234, |
|
"eval_samples_per_second": 8.348, |
|
"eval_wer": 0.307193473315378, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 21.46, |
|
"learning_rate": 8.9103690685413e-05, |
|
"loss": NaN, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 21.72, |
|
"learning_rate": 8.646748681898065e-05, |
|
"loss": NaN, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 21.97, |
|
"learning_rate": 8.383128295254832e-05, |
|
"loss": NaN, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 22.22, |
|
"learning_rate": 8.119507908611598e-05, |
|
"loss": NaN, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 22.22, |
|
"eval_loss": 0.19641530513763428, |
|
"eval_runtime": 611.6485, |
|
"eval_samples_per_second": 8.456, |
|
"eval_wer": 0.30502314148994586, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 22.47, |
|
"learning_rate": 7.855887521968365e-05, |
|
"loss": NaN, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 22.73, |
|
"learning_rate": 7.592267135325131e-05, |
|
"loss": NaN, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 22.98, |
|
"learning_rate": 7.328646748681898e-05, |
|
"loss": NaN, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 23.23, |
|
"learning_rate": 7.065026362038664e-05, |
|
"loss": NaN, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 23.23, |
|
"eval_loss": 0.19613772630691528, |
|
"eval_runtime": 613.3141, |
|
"eval_samples_per_second": 8.433, |
|
"eval_wer": 0.3008132207201318, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 23.48, |
|
"learning_rate": 6.80140597539543e-05, |
|
"loss": NaN, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 23.74, |
|
"learning_rate": 6.537785588752197e-05, |
|
"loss": NaN, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 23.99, |
|
"learning_rate": 6.274165202108963e-05, |
|
"loss": NaN, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 24.24, |
|
"learning_rate": 6.0105448154657286e-05, |
|
"loss": NaN, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 24.24, |
|
"eval_loss": 0.18839870393276215, |
|
"eval_runtime": 615.6693, |
|
"eval_samples_per_second": 8.401, |
|
"eval_wer": 0.29623721988337737, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 24.49, |
|
"learning_rate": 5.746924428822495e-05, |
|
"loss": NaN, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 24.75, |
|
"learning_rate": 5.483304042179262e-05, |
|
"loss": NaN, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 5.2196836555360276e-05, |
|
"loss": NaN, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 25.25, |
|
"learning_rate": 4.956063268892794e-05, |
|
"loss": NaN, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 25.25, |
|
"eval_loss": 0.19206073880195618, |
|
"eval_runtime": 610.6418, |
|
"eval_samples_per_second": 8.47, |
|
"eval_wer": 0.2963418141882175, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 25.5, |
|
"learning_rate": 4.6924428822495606e-05, |
|
"loss": NaN, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 25.76, |
|
"learning_rate": 4.4288224956063265e-05, |
|
"loss": NaN, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 26.01, |
|
"learning_rate": 4.165202108963093e-05, |
|
"loss": NaN, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 26.26, |
|
"learning_rate": 3.901581722319859e-05, |
|
"loss": NaN, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 26.26, |
|
"eval_loss": 0.19253745675086975, |
|
"eval_runtime": 622.5244, |
|
"eval_samples_per_second": 8.308, |
|
"eval_wer": 0.2921841905708234, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 26.51, |
|
"learning_rate": 3.6379613356766254e-05, |
|
"loss": NaN, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 26.77, |
|
"learning_rate": 3.374340949033391e-05, |
|
"loss": NaN, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 27.02, |
|
"learning_rate": 3.110720562390158e-05, |
|
"loss": NaN, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 27.27, |
|
"learning_rate": 2.847100175746924e-05, |
|
"loss": NaN, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 27.27, |
|
"eval_loss": 0.19466331601142883, |
|
"eval_runtime": 617.6765, |
|
"eval_samples_per_second": 8.373, |
|
"eval_wer": 0.292785607823654, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 27.52, |
|
"learning_rate": 2.5834797891036906e-05, |
|
"loss": NaN, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 27.78, |
|
"learning_rate": 2.3198594024604568e-05, |
|
"loss": NaN, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 28.03, |
|
"learning_rate": 2.0562390158172233e-05, |
|
"loss": NaN, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 28.28, |
|
"learning_rate": 1.792618629173989e-05, |
|
"loss": NaN, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 28.28, |
|
"eval_loss": 0.1931692361831665, |
|
"eval_runtime": 617.2527, |
|
"eval_samples_per_second": 8.379, |
|
"eval_wer": 0.29189655623251315, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 28.53, |
|
"learning_rate": 1.5289982425307557e-05, |
|
"loss": NaN, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 28.79, |
|
"learning_rate": 1.2653778558875219e-05, |
|
"loss": NaN, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 29.04, |
|
"learning_rate": 1.0017574692442883e-05, |
|
"loss": NaN, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 29.29, |
|
"learning_rate": 7.381370826010544e-06, |
|
"loss": NaN, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 29.29, |
|
"eval_loss": 0.1954003870487213, |
|
"eval_runtime": 617.8209, |
|
"eval_samples_per_second": 8.371, |
|
"eval_wer": 0.2904322359647517, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 29.54, |
|
"learning_rate": 4.745166959578207e-06, |
|
"loss": NaN, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 29.8, |
|
"learning_rate": 2.1089630931458696e-06, |
|
"loss": NaN, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"step": 11880, |
|
"total_flos": 6.1923487227453555e+19, |
|
"train_runtime": 56662.4324, |
|
"train_samples_per_second": 0.21 |
|
} |
|
], |
|
"max_steps": 11880, |
|
"num_train_epochs": 30, |
|
"total_flos": 6.1923487227453555e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|