|
{ |
|
"best_metric": 1.7455496788024902, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 2.1382978723404253, |
|
"eval_steps": 25, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0851063829787234, |
|
"grad_norm": 0.337377667427063, |
|
"learning_rate": 5e-05, |
|
"loss": 1.543, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0851063829787234, |
|
"eval_loss": 2.8193228244781494, |
|
"eval_runtime": 1.9155, |
|
"eval_samples_per_second": 26.103, |
|
"eval_steps_per_second": 6.787, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1702127659574468, |
|
"grad_norm": 1.0103968381881714, |
|
"learning_rate": 0.0001, |
|
"loss": 2.3898, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.2553191489361702, |
|
"grad_norm": 0.40538832545280457, |
|
"learning_rate": 9.980803793327656e-05, |
|
"loss": 1.3125, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.3404255319148936, |
|
"grad_norm": 0.30337655544281006, |
|
"learning_rate": 9.923378948577559e-05, |
|
"loss": 1.6227, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.425531914893617, |
|
"grad_norm": 2.6769063472747803, |
|
"learning_rate": 9.828215394277687e-05, |
|
"loss": 2.5671, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.5106382978723404, |
|
"grad_norm": 0.2441849261522293, |
|
"learning_rate": 9.6961250323196e-05, |
|
"loss": 1.1992, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5957446808510638, |
|
"grad_norm": 0.3681361973285675, |
|
"learning_rate": 9.528234811097782e-05, |
|
"loss": 1.7804, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.6808510638297872, |
|
"grad_norm": 1.3270093202590942, |
|
"learning_rate": 9.325977110783264e-05, |
|
"loss": 2.2679, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.7659574468085106, |
|
"grad_norm": 0.2342531681060791, |
|
"learning_rate": 9.091077522761079e-05, |
|
"loss": 1.1517, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.851063829787234, |
|
"grad_norm": 1.1225703954696655, |
|
"learning_rate": 8.825540127492967e-05, |
|
"loss": 1.8651, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.9361702127659575, |
|
"grad_norm": 1.0222384929656982, |
|
"learning_rate": 8.531630396409507e-05, |
|
"loss": 2.0007, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.0265957446808511, |
|
"grad_norm": 0.36375167965888977, |
|
"learning_rate": 8.211855863706654e-05, |
|
"loss": 1.9288, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.1117021276595744, |
|
"grad_norm": 0.33239567279815674, |
|
"learning_rate": 7.868944732948101e-05, |
|
"loss": 1.6168, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.196808510638298, |
|
"grad_norm": 0.26805180311203003, |
|
"learning_rate": 7.505822600994424e-05, |
|
"loss": 1.0579, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.2819148936170213, |
|
"grad_norm": 0.3009487986564636, |
|
"learning_rate": 7.12558749784219e-05, |
|
"loss": 1.5312, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.3670212765957448, |
|
"grad_norm": 0.39399126172065735, |
|
"learning_rate": 6.731483455324374e-05, |
|
"loss": 1.8618, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.452127659574468, |
|
"grad_norm": 0.34218811988830566, |
|
"learning_rate": 6.326872830174567e-05, |
|
"loss": 0.9687, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.5372340425531914, |
|
"grad_norm": 0.3016256093978882, |
|
"learning_rate": 5.9152076175848594e-05, |
|
"loss": 1.8985, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.622340425531915, |
|
"grad_norm": 0.38652870059013367, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 1.827, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.7074468085106385, |
|
"grad_norm": 0.4191890060901642, |
|
"learning_rate": 5.0847923824151424e-05, |
|
"loss": 1.2199, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.7925531914893615, |
|
"grad_norm": 0.278137743473053, |
|
"learning_rate": 4.673127169825433e-05, |
|
"loss": 1.7785, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.877659574468085, |
|
"grad_norm": 0.4404103755950928, |
|
"learning_rate": 4.268516544675628e-05, |
|
"loss": 2.0553, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.9627659574468086, |
|
"grad_norm": 0.537523627281189, |
|
"learning_rate": 3.8744125021578126e-05, |
|
"loss": 2.2151, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.0531914893617023, |
|
"grad_norm": 0.16800740361213684, |
|
"learning_rate": 3.494177399005578e-05, |
|
"loss": 1.1205, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.1382978723404253, |
|
"grad_norm": 0.34991270303726196, |
|
"learning_rate": 3.1310552670518986e-05, |
|
"loss": 1.9167, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.1382978723404253, |
|
"eval_loss": 1.7455496788024902, |
|
"eval_runtime": 0.9406, |
|
"eval_samples_per_second": 53.157, |
|
"eval_steps_per_second": 13.821, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 36, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.91521434075136e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|