|
{ |
|
"best_metric": 3.6852939128875732, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.0017573148229505316, |
|
"eval_steps": 25, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 7.029259291802126e-05, |
|
"grad_norm": 9.411466598510742, |
|
"learning_rate": 0.0001, |
|
"loss": 6.8067, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 7.029259291802126e-05, |
|
"eval_loss": 7.7762651443481445, |
|
"eval_runtime": 4432.0766, |
|
"eval_samples_per_second": 1.352, |
|
"eval_steps_per_second": 0.676, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00014058518583604253, |
|
"grad_norm": 9.495089530944824, |
|
"learning_rate": 0.0002, |
|
"loss": 6.2361, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00021087777875406378, |
|
"grad_norm": 8.74798583984375, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 5.1303, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00028117037167208506, |
|
"grad_norm": 4.712350845336914, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 4.4543, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00035146296459010633, |
|
"grad_norm": 4.159116744995117, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 4.0895, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00042175555750812756, |
|
"grad_norm": 3.948648691177368, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 3.9215, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0004920481504261489, |
|
"grad_norm": 4.281744003295898, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 4.1163, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0005623407433441701, |
|
"grad_norm": 4.81821346282959, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 3.53, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0006326333362621913, |
|
"grad_norm": 4.640680313110352, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 3.731, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0007029259291802127, |
|
"grad_norm": 4.920532703399658, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 3.3617, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0007732185220982339, |
|
"grad_norm": 4.455087661743164, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 3.7523, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0008435111150162551, |
|
"grad_norm": 3.681004285812378, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 3.9495, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0009138037079342764, |
|
"grad_norm": 3.6873984336853027, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 3.2534, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0009840963008522978, |
|
"grad_norm": 4.170083522796631, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 3.6071, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.001054388893770319, |
|
"grad_norm": 3.7524523735046387, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 3.2183, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0011246814866883402, |
|
"grad_norm": 3.95082426071167, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 3.6198, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0011949740796063616, |
|
"grad_norm": 4.456296920776367, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 3.3993, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0012652666725243827, |
|
"grad_norm": 4.209654808044434, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 3.1265, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.001335559265442404, |
|
"grad_norm": 4.037990093231201, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 3.7803, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0014058518583604253, |
|
"grad_norm": 4.132318496704102, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 3.8329, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0014761444512784464, |
|
"grad_norm": 4.986948490142822, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 4.0136, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0015464370441964678, |
|
"grad_norm": 3.795536518096924, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 3.6991, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0016167296371144891, |
|
"grad_norm": 4.052053451538086, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 3.9433, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0016870222300325102, |
|
"grad_norm": 3.4409587383270264, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 3.2573, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0017573148229505316, |
|
"grad_norm": 4.064533710479736, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 3.4339, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0017573148229505316, |
|
"eval_loss": 3.6852939128875732, |
|
"eval_runtime": 4443.7138, |
|
"eval_samples_per_second": 1.348, |
|
"eval_steps_per_second": 0.674, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.11806271012864e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|