|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.38476337052712584, |
|
"eval_steps": 500, |
|
"global_step": 6000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03206361421059382, |
|
"grad_norm": 10.316516876220703, |
|
"learning_rate": 0.0002465, |
|
"loss": 4.7051, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.03206361421059382, |
|
"eval_loss": 1.6561740636825562, |
|
"eval_runtime": 191.0416, |
|
"eval_samples_per_second": 36.756, |
|
"eval_steps_per_second": 2.298, |
|
"eval_wer": 0.9354894431230816, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.06412722842118763, |
|
"grad_norm": 4.912766933441162, |
|
"learning_rate": 0.00027816666666666663, |
|
"loss": 1.0362, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.06412722842118763, |
|
"eval_loss": 1.1839842796325684, |
|
"eval_runtime": 175.5788, |
|
"eval_samples_per_second": 39.993, |
|
"eval_steps_per_second": 2.5, |
|
"eval_wer": 0.7977517639086356, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.09619084263178146, |
|
"grad_norm": 4.178534030914307, |
|
"learning_rate": 0.00025038888888888886, |
|
"loss": 0.811, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.09619084263178146, |
|
"eval_loss": 1.0081175565719604, |
|
"eval_runtime": 175.5534, |
|
"eval_samples_per_second": 39.999, |
|
"eval_steps_per_second": 2.501, |
|
"eval_wer": 0.7174690070290597, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.12825445684237527, |
|
"grad_norm": 7.957609176635742, |
|
"learning_rate": 0.0002226111111111111, |
|
"loss": 0.6903, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.12825445684237527, |
|
"eval_loss": 0.8935067057609558, |
|
"eval_runtime": 176.1028, |
|
"eval_samples_per_second": 39.874, |
|
"eval_steps_per_second": 2.493, |
|
"eval_wer": 0.6401094885661516, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.16031807105296908, |
|
"grad_norm": 6.541042327880859, |
|
"learning_rate": 0.00019483333333333332, |
|
"loss": 0.6238, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.16031807105296908, |
|
"eval_loss": 0.8059775233268738, |
|
"eval_runtime": 177.0587, |
|
"eval_samples_per_second": 39.659, |
|
"eval_steps_per_second": 2.479, |
|
"eval_wer": 0.5848868573858276, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.19238168526356292, |
|
"grad_norm": 5.036514759063721, |
|
"learning_rate": 0.00016705555555555554, |
|
"loss": 0.5649, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.19238168526356292, |
|
"eval_loss": 0.7770201563835144, |
|
"eval_runtime": 178.1454, |
|
"eval_samples_per_second": 39.417, |
|
"eval_steps_per_second": 2.464, |
|
"eval_wer": 0.5589231852668783, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.22444529947415673, |
|
"grad_norm": 4.140881538391113, |
|
"learning_rate": 0.00013927777777777777, |
|
"loss": 0.5309, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.22444529947415673, |
|
"eval_loss": 0.7264481782913208, |
|
"eval_runtime": 176.7301, |
|
"eval_samples_per_second": 39.733, |
|
"eval_steps_per_second": 2.484, |
|
"eval_wer": 0.5326539018589139, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.25650891368475054, |
|
"grad_norm": 6.205782413482666, |
|
"learning_rate": 0.00011155555555555555, |
|
"loss": 0.4892, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.25650891368475054, |
|
"eval_loss": 0.6865007877349854, |
|
"eval_runtime": 178.7785, |
|
"eval_samples_per_second": 39.278, |
|
"eval_steps_per_second": 2.456, |
|
"eval_wer": 0.5106366016024662, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.2885725278953444, |
|
"grad_norm": 9.618443489074707, |
|
"learning_rate": 8.388888888888888e-05, |
|
"loss": 0.4521, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.2885725278953444, |
|
"eval_loss": 0.6477864980697632, |
|
"eval_runtime": 177.7072, |
|
"eval_samples_per_second": 39.514, |
|
"eval_steps_per_second": 2.47, |
|
"eval_wer": 0.4861345486918508, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.32063614210593816, |
|
"grad_norm": 8.743291854858398, |
|
"learning_rate": 5.61111111111111e-05, |
|
"loss": 0.4309, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.32063614210593816, |
|
"eval_loss": 0.6221807599067688, |
|
"eval_runtime": 177.9115, |
|
"eval_samples_per_second": 39.469, |
|
"eval_steps_per_second": 2.468, |
|
"eval_wer": 0.4763416999960138, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.352699756316532, |
|
"grad_norm": 8.870036125183105, |
|
"learning_rate": 2.833333333333333e-05, |
|
"loss": 0.4055, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.352699756316532, |
|
"eval_loss": 0.5988152027130127, |
|
"eval_runtime": 178.6515, |
|
"eval_samples_per_second": 39.306, |
|
"eval_steps_per_second": 2.457, |
|
"eval_wer": 0.45257045668956536, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.38476337052712584, |
|
"grad_norm": 14.018965721130371, |
|
"learning_rate": 6.11111111111111e-07, |
|
"loss": 0.3896, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.38476337052712584, |
|
"eval_loss": 0.5865370631217957, |
|
"eval_runtime": 176.8507, |
|
"eval_samples_per_second": 39.706, |
|
"eval_steps_per_second": 2.482, |
|
"eval_wer": 0.44523578575319894, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.38476337052712584, |
|
"step": 6000, |
|
"total_flos": 6.835515215409967e+18, |
|
"train_loss": 0.9274428431193034, |
|
"train_runtime": 4544.4955, |
|
"train_samples_per_second": 10.562, |
|
"train_steps_per_second": 1.32 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 6000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.835515215409967e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|