|
{ |
|
"best_metric": 0.7155137062072754, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.1870967741935483, |
|
"eval_steps": 25, |
|
"global_step": 29, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1032258064516129, |
|
"grad_norm": 32.42584228515625, |
|
"learning_rate": 5e-05, |
|
"loss": 9.994, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1032258064516129, |
|
"eval_loss": 10.080071449279785, |
|
"eval_runtime": 1.4018, |
|
"eval_samples_per_second": 46.368, |
|
"eval_steps_per_second": 6.42, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.2064516129032258, |
|
"grad_norm": 31.778467178344727, |
|
"learning_rate": 0.0001, |
|
"loss": 10.2471, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.3096774193548387, |
|
"grad_norm": 28.872024536132812, |
|
"learning_rate": 9.966191788709716e-05, |
|
"loss": 8.9676, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.4129032258064516, |
|
"grad_norm": 24.91484832763672, |
|
"learning_rate": 9.865224352899119e-05, |
|
"loss": 5.6914, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.5161290322580645, |
|
"grad_norm": 19.651994705200195, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 3.4858, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.6193548387096774, |
|
"grad_norm": 12.760610580444336, |
|
"learning_rate": 9.468163201617062e-05, |
|
"loss": 2.0323, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.7225806451612903, |
|
"grad_norm": 8.97213363647461, |
|
"learning_rate": 9.177439057064683e-05, |
|
"loss": 1.2647, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.8258064516129032, |
|
"grad_norm": 7.663300514221191, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 1.0998, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.9290322580645162, |
|
"grad_norm": 4.413909912109375, |
|
"learning_rate": 8.43120818934367e-05, |
|
"loss": 0.8302, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 1.096774193548387, |
|
"grad_norm": 7.911396503448486, |
|
"learning_rate": 7.985792958513931e-05, |
|
"loss": 1.5907, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 1.8574804067611694, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.8178, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.303225806451613, |
|
"grad_norm": 2.2993381023406982, |
|
"learning_rate": 6.980398830195785e-05, |
|
"loss": 0.7621, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.4064516129032258, |
|
"grad_norm": 1.3951315879821777, |
|
"learning_rate": 6.434016163555452e-05, |
|
"loss": 0.8151, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.5096774193548388, |
|
"grad_norm": 1.6730773448944092, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.7631, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.6129032258064515, |
|
"grad_norm": 2.9503276348114014, |
|
"learning_rate": 5.290724144552379e-05, |
|
"loss": 0.831, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.7161290322580647, |
|
"grad_norm": 2.8496549129486084, |
|
"learning_rate": 4.709275855447621e-05, |
|
"loss": 0.7962, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.8193548387096774, |
|
"grad_norm": 2.765697479248047, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.7946, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.9225806451612903, |
|
"grad_norm": 2.062514066696167, |
|
"learning_rate": 3.5659838364445505e-05, |
|
"loss": 0.7552, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 2.0903225806451613, |
|
"grad_norm": 5.532806396484375, |
|
"learning_rate": 3.019601169804216e-05, |
|
"loss": 1.3519, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 2.193548387096774, |
|
"grad_norm": 1.1353923082351685, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.7581, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 2.296774193548387, |
|
"grad_norm": 1.3633109331130981, |
|
"learning_rate": 2.0142070414860704e-05, |
|
"loss": 0.7257, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 3.0388028621673584, |
|
"learning_rate": 1.5687918106563326e-05, |
|
"loss": 0.8124, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.5032258064516126, |
|
"grad_norm": 1.115830421447754, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.7593, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.606451612903226, |
|
"grad_norm": 3.475860118865967, |
|
"learning_rate": 8.225609429353187e-06, |
|
"loss": 0.8125, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.709677419354839, |
|
"grad_norm": 1.7441613674163818, |
|
"learning_rate": 5.318367983829392e-06, |
|
"loss": 0.787, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.709677419354839, |
|
"eval_loss": 0.7155137062072754, |
|
"eval_runtime": 1.3781, |
|
"eval_samples_per_second": 47.165, |
|
"eval_steps_per_second": 6.531, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.8129032258064517, |
|
"grad_norm": 1.4053550958633423, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.8174, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.9161290322580644, |
|
"grad_norm": 2.1084353923797607, |
|
"learning_rate": 1.3477564710088098e-06, |
|
"loss": 0.7691, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 3.0838709677419356, |
|
"grad_norm": 5.000555515289307, |
|
"learning_rate": 3.380821129028489e-07, |
|
"loss": 1.2892, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 3.1870967741935483, |
|
"grad_norm": 2.501575469970703, |
|
"learning_rate": 0.0, |
|
"loss": 0.8028, |
|
"step": 29 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 29, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.541648635271578e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|