|
{ |
|
"best_metric": 0.7102803738317757, |
|
"best_model_checkpoint": "wav2vec2-large-ft-fake-detection/checkpoint-301", |
|
"epoch": 9.850746268656717, |
|
"eval_steps": 500, |
|
"global_step": 330, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.29850746268656714, |
|
"grad_norm": 0.7566494941711426, |
|
"learning_rate": 9.090909090909091e-06, |
|
"loss": 0.6829, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 0.6273395419120789, |
|
"learning_rate": 1.8181818181818182e-05, |
|
"loss": 0.6512, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.8955223880597015, |
|
"grad_norm": 1.1124577522277832, |
|
"learning_rate": 2.7272727272727273e-05, |
|
"loss": 0.6274, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.9850746268656716, |
|
"eval_accuracy": 0.6205607476635514, |
|
"eval_loss": 0.62535560131073, |
|
"eval_runtime": 39.1441, |
|
"eval_samples_per_second": 27.335, |
|
"eval_steps_per_second": 1.712, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.1940298507462686, |
|
"grad_norm": 1.8781670331954956, |
|
"learning_rate": 2.9292929292929294e-05, |
|
"loss": 0.577, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.4925373134328357, |
|
"grad_norm": 6.504702568054199, |
|
"learning_rate": 2.8282828282828285e-05, |
|
"loss": 0.5106, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.7910447761194028, |
|
"grad_norm": 2.83394455909729, |
|
"learning_rate": 2.7272727272727273e-05, |
|
"loss": 0.4961, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.6158878504672897, |
|
"eval_loss": 0.947680652141571, |
|
"eval_runtime": 43.4278, |
|
"eval_samples_per_second": 24.639, |
|
"eval_steps_per_second": 1.543, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 2.08955223880597, |
|
"grad_norm": 2.453080177307129, |
|
"learning_rate": 2.6262626262626265e-05, |
|
"loss": 0.4237, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.388059701492537, |
|
"grad_norm": 2.8955702781677246, |
|
"learning_rate": 2.5252525252525256e-05, |
|
"loss": 0.3841, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.6865671641791042, |
|
"grad_norm": 2.1122732162475586, |
|
"learning_rate": 2.4242424242424244e-05, |
|
"loss": 0.3749, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.9850746268656714, |
|
"grad_norm": 3.88417649269104, |
|
"learning_rate": 2.3232323232323235e-05, |
|
"loss": 0.3391, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.9850746268656714, |
|
"eval_accuracy": 0.6411214953271028, |
|
"eval_loss": 0.9273380041122437, |
|
"eval_runtime": 39.2854, |
|
"eval_samples_per_second": 27.237, |
|
"eval_steps_per_second": 1.705, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.283582089552239, |
|
"grad_norm": 2.406618595123291, |
|
"learning_rate": 2.222222222222222e-05, |
|
"loss": 0.3291, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 3.582089552238806, |
|
"grad_norm": 2.385477066040039, |
|
"learning_rate": 2.121212121212121e-05, |
|
"loss": 0.3334, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.8805970149253732, |
|
"grad_norm": 5.02505350112915, |
|
"learning_rate": 2.0202020202020203e-05, |
|
"loss": 0.2857, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.6616822429906543, |
|
"eval_loss": 0.6611053347587585, |
|
"eval_runtime": 39.7207, |
|
"eval_samples_per_second": 26.938, |
|
"eval_steps_per_second": 1.687, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 4.17910447761194, |
|
"grad_norm": 2.3081231117248535, |
|
"learning_rate": 1.919191919191919e-05, |
|
"loss": 0.3724, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.477611940298507, |
|
"grad_norm": 3.455447196960449, |
|
"learning_rate": 1.8181818181818182e-05, |
|
"loss": 0.2929, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.776119402985074, |
|
"grad_norm": 3.234449625015259, |
|
"learning_rate": 1.717171717171717e-05, |
|
"loss": 0.3186, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.985074626865671, |
|
"eval_accuracy": 0.6214953271028038, |
|
"eval_loss": 0.7654162049293518, |
|
"eval_runtime": 40.6872, |
|
"eval_samples_per_second": 26.298, |
|
"eval_steps_per_second": 1.647, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 5.074626865671641, |
|
"grad_norm": 1.7067972421646118, |
|
"learning_rate": 1.616161616161616e-05, |
|
"loss": 0.308, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 5.373134328358209, |
|
"grad_norm": 2.3431663513183594, |
|
"learning_rate": 1.5151515151515153e-05, |
|
"loss": 0.2597, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 5.6716417910447765, |
|
"grad_norm": 5.564834117889404, |
|
"learning_rate": 1.4141414141414143e-05, |
|
"loss": 0.2341, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 5.970149253731344, |
|
"grad_norm": 4.282162189483643, |
|
"learning_rate": 1.3131313131313132e-05, |
|
"loss": 0.2483, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.6224299065420561, |
|
"eval_loss": 0.9394506216049194, |
|
"eval_runtime": 37.7276, |
|
"eval_samples_per_second": 28.361, |
|
"eval_steps_per_second": 1.776, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 6.268656716417911, |
|
"grad_norm": 3.2581288814544678, |
|
"learning_rate": 1.2121212121212122e-05, |
|
"loss": 0.2363, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 6.567164179104478, |
|
"grad_norm": 3.974890947341919, |
|
"learning_rate": 1.111111111111111e-05, |
|
"loss": 0.2808, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 6.865671641791045, |
|
"grad_norm": 2.1842823028564453, |
|
"learning_rate": 1.0101010101010101e-05, |
|
"loss": 0.239, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 6.985074626865671, |
|
"eval_accuracy": 0.6542056074766355, |
|
"eval_loss": 0.8366522192955017, |
|
"eval_runtime": 42.0615, |
|
"eval_samples_per_second": 25.439, |
|
"eval_steps_per_second": 1.593, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 7.164179104477612, |
|
"grad_norm": 2.455822706222534, |
|
"learning_rate": 9.090909090909091e-06, |
|
"loss": 0.2156, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 7.462686567164179, |
|
"grad_norm": 4.788252830505371, |
|
"learning_rate": 8.08080808080808e-06, |
|
"loss": 0.2297, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 7.7611940298507465, |
|
"grad_norm": 3.2945239543914795, |
|
"learning_rate": 7.070707070707071e-06, |
|
"loss": 0.2049, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.685981308411215, |
|
"eval_loss": 0.7708710432052612, |
|
"eval_runtime": 43.0532, |
|
"eval_samples_per_second": 24.853, |
|
"eval_steps_per_second": 1.556, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 8.059701492537313, |
|
"grad_norm": 3.367969512939453, |
|
"learning_rate": 6.060606060606061e-06, |
|
"loss": 0.2485, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 8.35820895522388, |
|
"grad_norm": 2.869590997695923, |
|
"learning_rate": 5.050505050505051e-06, |
|
"loss": 0.2318, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 8.656716417910447, |
|
"grad_norm": 3.1148149967193604, |
|
"learning_rate": 4.04040404040404e-06, |
|
"loss": 0.2058, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 8.955223880597014, |
|
"grad_norm": 4.1323065757751465, |
|
"learning_rate": 3.0303030303030305e-06, |
|
"loss": 0.224, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 8.985074626865671, |
|
"eval_accuracy": 0.7102803738317757, |
|
"eval_loss": 0.669384777545929, |
|
"eval_runtime": 42.9166, |
|
"eval_samples_per_second": 24.932, |
|
"eval_steps_per_second": 1.561, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 9.253731343283581, |
|
"grad_norm": 3.609107494354248, |
|
"learning_rate": 2.02020202020202e-06, |
|
"loss": 0.2291, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 9.552238805970148, |
|
"grad_norm": 2.175992250442505, |
|
"learning_rate": 1.01010101010101e-06, |
|
"loss": 0.2151, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 9.850746268656717, |
|
"grad_norm": 3.807249069213867, |
|
"learning_rate": 0.0, |
|
"loss": 0.2279, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 9.850746268656717, |
|
"eval_accuracy": 0.6822429906542056, |
|
"eval_loss": 0.786749541759491, |
|
"eval_runtime": 39.843, |
|
"eval_samples_per_second": 26.855, |
|
"eval_steps_per_second": 1.682, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 9.850746268656717, |
|
"step": 330, |
|
"total_flos": 1.277142970631616e+18, |
|
"train_loss": 0.33447390686381945, |
|
"train_runtime": 2006.3999, |
|
"train_samples_per_second": 21.317, |
|
"train_steps_per_second": 0.164 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 330, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.277142970631616e+18, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|