|
{ |
|
"best_metric": 0.6523364485981309, |
|
"best_model_checkpoint": "wav2vec2-base-ft-fake-detection/checkpoint-33", |
|
"epoch": 9.850746268656717, |
|
"eval_steps": 500, |
|
"global_step": 330, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.29850746268656714, |
|
"grad_norm": 0.6689966320991516, |
|
"learning_rate": 9.090909090909091e-06, |
|
"loss": 0.696, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 0.972612738609314, |
|
"learning_rate": 1.8181818181818182e-05, |
|
"loss": 0.6649, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.8955223880597015, |
|
"grad_norm": 0.47211921215057373, |
|
"learning_rate": 2.7272727272727273e-05, |
|
"loss": 0.6253, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.9850746268656716, |
|
"eval_accuracy": 0.6523364485981309, |
|
"eval_loss": 0.6260867714881897, |
|
"eval_runtime": 42.0969, |
|
"eval_samples_per_second": 25.418, |
|
"eval_steps_per_second": 1.592, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.1940298507462686, |
|
"grad_norm": 1.006468653678894, |
|
"learning_rate": 2.9292929292929294e-05, |
|
"loss": 0.5877, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.4925373134328357, |
|
"grad_norm": 7.379218101501465, |
|
"learning_rate": 2.8282828282828285e-05, |
|
"loss": 0.4855, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.7910447761194028, |
|
"grad_norm": 3.237988233566284, |
|
"learning_rate": 2.7272727272727273e-05, |
|
"loss": 0.4394, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.5644859813084112, |
|
"eval_loss": 0.7140001058578491, |
|
"eval_runtime": 39.3742, |
|
"eval_samples_per_second": 27.175, |
|
"eval_steps_per_second": 1.702, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 2.08955223880597, |
|
"grad_norm": 2.6573326587677, |
|
"learning_rate": 2.6262626262626265e-05, |
|
"loss": 0.402, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.388059701492537, |
|
"grad_norm": 3.1143898963928223, |
|
"learning_rate": 2.5252525252525256e-05, |
|
"loss": 0.3622, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.6865671641791042, |
|
"grad_norm": 3.858957529067993, |
|
"learning_rate": 2.4242424242424244e-05, |
|
"loss": 0.3682, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.9850746268656714, |
|
"grad_norm": 2.6959176063537598, |
|
"learning_rate": 2.3232323232323235e-05, |
|
"loss": 0.3685, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.9850746268656714, |
|
"eval_accuracy": 0.5850467289719626, |
|
"eval_loss": 0.7180566191673279, |
|
"eval_runtime": 34.4048, |
|
"eval_samples_per_second": 31.1, |
|
"eval_steps_per_second": 1.947, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.283582089552239, |
|
"grad_norm": 7.184162616729736, |
|
"learning_rate": 2.222222222222222e-05, |
|
"loss": 0.3469, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 3.582089552238806, |
|
"grad_norm": 3.8342511653900146, |
|
"learning_rate": 2.121212121212121e-05, |
|
"loss": 0.3181, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.8805970149253732, |
|
"grad_norm": 3.2687244415283203, |
|
"learning_rate": 2.0202020202020203e-05, |
|
"loss": 0.317, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.6149532710280374, |
|
"eval_loss": 0.7291006445884705, |
|
"eval_runtime": 38.9708, |
|
"eval_samples_per_second": 27.456, |
|
"eval_steps_per_second": 1.719, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 4.17910447761194, |
|
"grad_norm": 2.616014003753662, |
|
"learning_rate": 1.919191919191919e-05, |
|
"loss": 0.3229, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.477611940298507, |
|
"grad_norm": 6.417969226837158, |
|
"learning_rate": 1.8181818181818182e-05, |
|
"loss": 0.2673, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.776119402985074, |
|
"grad_norm": 6.794923782348633, |
|
"learning_rate": 1.717171717171717e-05, |
|
"loss": 0.3027, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.985074626865671, |
|
"eval_accuracy": 0.6158878504672897, |
|
"eval_loss": 0.7457267045974731, |
|
"eval_runtime": 37.5396, |
|
"eval_samples_per_second": 28.503, |
|
"eval_steps_per_second": 1.785, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 5.074626865671641, |
|
"grad_norm": 2.6645431518554688, |
|
"learning_rate": 1.616161616161616e-05, |
|
"loss": 0.2804, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 5.373134328358209, |
|
"grad_norm": 3.8583788871765137, |
|
"learning_rate": 1.5151515151515153e-05, |
|
"loss": 0.2613, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 5.6716417910447765, |
|
"grad_norm": 2.621940851211548, |
|
"learning_rate": 1.4141414141414143e-05, |
|
"loss": 0.2578, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 5.970149253731344, |
|
"grad_norm": 4.610651969909668, |
|
"learning_rate": 1.3131313131313132e-05, |
|
"loss": 0.2672, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.6242990654205608, |
|
"eval_loss": 0.7805104851722717, |
|
"eval_runtime": 35.9857, |
|
"eval_samples_per_second": 29.734, |
|
"eval_steps_per_second": 1.862, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 6.268656716417911, |
|
"grad_norm": 3.4527761936187744, |
|
"learning_rate": 1.2121212121212122e-05, |
|
"loss": 0.2166, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 6.567164179104478, |
|
"grad_norm": 3.452103614807129, |
|
"learning_rate": 1.111111111111111e-05, |
|
"loss": 0.2513, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 6.865671641791045, |
|
"grad_norm": 3.1861703395843506, |
|
"learning_rate": 1.0101010101010101e-05, |
|
"loss": 0.2711, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 6.985074626865671, |
|
"eval_accuracy": 0.6214953271028038, |
|
"eval_loss": 0.8112902641296387, |
|
"eval_runtime": 36.6515, |
|
"eval_samples_per_second": 29.194, |
|
"eval_steps_per_second": 1.828, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 7.164179104477612, |
|
"grad_norm": 4.2410993576049805, |
|
"learning_rate": 9.090909090909091e-06, |
|
"loss": 0.23, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 7.462686567164179, |
|
"grad_norm": 2.9806125164031982, |
|
"learning_rate": 8.08080808080808e-06, |
|
"loss": 0.2123, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 7.7611940298507465, |
|
"grad_norm": 2.821526527404785, |
|
"learning_rate": 7.070707070707071e-06, |
|
"loss": 0.2086, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.5962616822429907, |
|
"eval_loss": 0.9130397439002991, |
|
"eval_runtime": 34.0895, |
|
"eval_samples_per_second": 31.388, |
|
"eval_steps_per_second": 1.965, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 8.059701492537313, |
|
"grad_norm": 4.247228622436523, |
|
"learning_rate": 6.060606060606061e-06, |
|
"loss": 0.2475, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 8.35820895522388, |
|
"grad_norm": 2.185816764831543, |
|
"learning_rate": 5.050505050505051e-06, |
|
"loss": 0.208, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 8.656716417910447, |
|
"grad_norm": 4.324727535247803, |
|
"learning_rate": 4.04040404040404e-06, |
|
"loss": 0.1971, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 8.955223880597014, |
|
"grad_norm": 4.010946750640869, |
|
"learning_rate": 3.0303030303030305e-06, |
|
"loss": 0.2077, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 8.985074626865671, |
|
"eval_accuracy": 0.616822429906542, |
|
"eval_loss": 0.9042268991470337, |
|
"eval_runtime": 33.3964, |
|
"eval_samples_per_second": 32.039, |
|
"eval_steps_per_second": 2.006, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 9.253731343283581, |
|
"grad_norm": 5.064040184020996, |
|
"learning_rate": 2.02020202020202e-06, |
|
"loss": 0.2026, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 9.552238805970148, |
|
"grad_norm": 2.497706890106201, |
|
"learning_rate": 1.01010101010101e-06, |
|
"loss": 0.202, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 9.850746268656717, |
|
"grad_norm": 4.643800735473633, |
|
"learning_rate": 0.0, |
|
"loss": 0.223, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 9.850746268656717, |
|
"eval_accuracy": 0.6177570093457944, |
|
"eval_loss": 0.8923965096473694, |
|
"eval_runtime": 38.1063, |
|
"eval_samples_per_second": 28.079, |
|
"eval_steps_per_second": 1.758, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 9.850746268656717, |
|
"step": 330, |
|
"total_flos": 3.8258265808224e+17, |
|
"train_loss": 0.3278515060742696, |
|
"train_runtime": 1794.8751, |
|
"train_samples_per_second": 23.829, |
|
"train_steps_per_second": 0.184 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 330, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.8258265808224e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|