|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0350938761186173, |
|
"eval_steps": 5, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007018775223723461, |
|
"eval_loss": 10.378314971923828, |
|
"eval_runtime": 5.1789, |
|
"eval_samples_per_second": 463.422, |
|
"eval_steps_per_second": 57.928, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002105632567117038, |
|
"grad_norm": 0.07256367057561874, |
|
"learning_rate": 3e-05, |
|
"loss": 10.3792, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00350938761186173, |
|
"eval_loss": 10.377887725830078, |
|
"eval_runtime": 5.1938, |
|
"eval_samples_per_second": 462.089, |
|
"eval_steps_per_second": 57.761, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004211265134234076, |
|
"grad_norm": 0.06716205924749374, |
|
"learning_rate": 6e-05, |
|
"loss": 10.3798, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0063168977013511145, |
|
"grad_norm": 0.07351335883140564, |
|
"learning_rate": 9e-05, |
|
"loss": 10.3792, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00701877522372346, |
|
"eval_loss": 10.376435279846191, |
|
"eval_runtime": 5.235, |
|
"eval_samples_per_second": 458.451, |
|
"eval_steps_per_second": 57.306, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.008422530268468151, |
|
"grad_norm": 0.05700007453560829, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 10.3739, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01052816283558519, |
|
"grad_norm": 0.07175807654857635, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 10.3717, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.01052816283558519, |
|
"eval_loss": 10.374283790588379, |
|
"eval_runtime": 5.229, |
|
"eval_samples_per_second": 458.977, |
|
"eval_steps_per_second": 57.372, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.012633795402702229, |
|
"grad_norm": 0.09788131713867188, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 10.375, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01403755044744692, |
|
"eval_loss": 10.371757507324219, |
|
"eval_runtime": 5.1826, |
|
"eval_samples_per_second": 463.084, |
|
"eval_steps_per_second": 57.886, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.014739427969819266, |
|
"grad_norm": 0.08976900577545166, |
|
"learning_rate": 8.247240241650918e-05, |
|
"loss": 10.3723, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.016845060536936303, |
|
"grad_norm": 0.10850024223327637, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 10.3716, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.01754693805930865, |
|
"eval_loss": 10.368788719177246, |
|
"eval_runtime": 5.2374, |
|
"eval_samples_per_second": 458.24, |
|
"eval_steps_per_second": 57.28, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.018950693104053343, |
|
"grad_norm": 0.12474418431520462, |
|
"learning_rate": 6.167226819279528e-05, |
|
"loss": 10.3719, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.02105632567117038, |
|
"grad_norm": 0.1344069540500641, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3664, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02105632567117038, |
|
"eval_loss": 10.365849494934082, |
|
"eval_runtime": 5.3008, |
|
"eval_samples_per_second": 452.766, |
|
"eval_steps_per_second": 56.596, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.023161958238287417, |
|
"grad_norm": 0.12643229961395264, |
|
"learning_rate": 3.832773180720475e-05, |
|
"loss": 10.3652, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.02456571328303211, |
|
"eval_loss": 10.363306999206543, |
|
"eval_runtime": 5.2135, |
|
"eval_samples_per_second": 460.341, |
|
"eval_steps_per_second": 57.543, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.025267590805404458, |
|
"grad_norm": 0.12015967071056366, |
|
"learning_rate": 2.7300475013022663e-05, |
|
"loss": 10.366, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.027373223372521495, |
|
"grad_norm": 0.1509571224451065, |
|
"learning_rate": 1.7527597583490822e-05, |
|
"loss": 10.3625, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.02807510089489384, |
|
"eval_loss": 10.361713409423828, |
|
"eval_runtime": 5.2202, |
|
"eval_samples_per_second": 459.755, |
|
"eval_steps_per_second": 57.469, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.029478855939638532, |
|
"grad_norm": 0.15704847872257233, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 10.3618, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03158448850675557, |
|
"grad_norm": 0.1968822330236435, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 10.3589, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03158448850675557, |
|
"eval_loss": 10.36101245880127, |
|
"eval_runtime": 5.1901, |
|
"eval_samples_per_second": 462.416, |
|
"eval_steps_per_second": 57.802, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.033690121073872606, |
|
"grad_norm": 0.18043430149555206, |
|
"learning_rate": 6.15582970243117e-07, |
|
"loss": 10.3625, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0350938761186173, |
|
"eval_loss": 10.3608980178833, |
|
"eval_runtime": 5.2384, |
|
"eval_samples_per_second": 458.151, |
|
"eval_steps_per_second": 57.269, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5828522016768.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|