|
{ |
|
"best_metric": 2.112521171569824, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.04958472790380563, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0009916945580761127, |
|
"grad_norm": 1.8448106050491333, |
|
"learning_rate": 5e-05, |
|
"loss": 2.9265, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009916945580761127, |
|
"eval_loss": 2.9538509845733643, |
|
"eval_runtime": 1.4353, |
|
"eval_samples_per_second": 34.836, |
|
"eval_steps_per_second": 9.057, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0019833891161522253, |
|
"grad_norm": 1.9753819704055786, |
|
"learning_rate": 0.0001, |
|
"loss": 2.842, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0029750836742283376, |
|
"grad_norm": 1.7899694442749023, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 2.776, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.003966778232304451, |
|
"grad_norm": 1.7245805263519287, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 2.5315, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0049584727903805624, |
|
"grad_norm": 1.708620309829712, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 2.4045, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.005950167348456675, |
|
"grad_norm": 1.0672885179519653, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 2.3025, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.006941861906532788, |
|
"grad_norm": 0.992870032787323, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 2.2333, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.007933556464608901, |
|
"grad_norm": 0.9287258386611938, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 2.1955, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.008925251022685012, |
|
"grad_norm": 0.9728707075119019, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 2.1102, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.009916945580761125, |
|
"grad_norm": 1.126459002494812, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 2.3005, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.010908640138837238, |
|
"grad_norm": 0.9696996808052063, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 2.185, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01190033469691335, |
|
"grad_norm": 1.0382564067840576, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 2.1723, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.012892029254989463, |
|
"grad_norm": 0.8205527663230896, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 2.2143, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.013883723813065576, |
|
"grad_norm": 0.7997962236404419, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 2.1577, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.014875418371141688, |
|
"grad_norm": 0.7631952166557312, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 2.1757, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.015867112929217803, |
|
"grad_norm": 0.7147464752197266, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 2.2479, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.016858807487293914, |
|
"grad_norm": 0.6940491795539856, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 2.0892, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.017850502045370024, |
|
"grad_norm": 0.6920349597930908, |
|
"learning_rate": 7.75e-05, |
|
"loss": 2.1457, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01884219660344614, |
|
"grad_norm": 0.6729762554168701, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 2.0448, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01983389116152225, |
|
"grad_norm": 0.673019289970398, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 2.1296, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.020825585719598364, |
|
"grad_norm": 0.6313472986221313, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 2.1292, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.021817280277674475, |
|
"grad_norm": 0.6517207622528076, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 2.111, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.02280897483575059, |
|
"grad_norm": 0.6630841493606567, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 2.1643, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0238006693938267, |
|
"grad_norm": 0.6734326481819153, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 2.1137, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.024792363951902815, |
|
"grad_norm": 0.6314123272895813, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 2.0499, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.024792363951902815, |
|
"eval_loss": 2.1400809288024902, |
|
"eval_runtime": 1.4334, |
|
"eval_samples_per_second": 34.882, |
|
"eval_steps_per_second": 9.069, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.025784058509978926, |
|
"grad_norm": 0.6622329950332642, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 2.1491, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.02677575306805504, |
|
"grad_norm": 0.6250461339950562, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 2.1379, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.02776744762613115, |
|
"grad_norm": 0.5818557143211365, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 2.0769, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.028759142184207265, |
|
"grad_norm": 0.6050412058830261, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 2.135, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.029750836742283376, |
|
"grad_norm": 0.634053111076355, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 2.0189, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03074253130035949, |
|
"grad_norm": 0.6110137701034546, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 2.0755, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.031734225858435605, |
|
"grad_norm": 0.5948219299316406, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 2.0746, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.03272592041651171, |
|
"grad_norm": 0.5863483548164368, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 2.1102, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.03371761497458783, |
|
"grad_norm": 0.5736821293830872, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 1.9682, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.03470930953266394, |
|
"grad_norm": 0.5776990652084351, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 2.0821, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.03570100409074005, |
|
"grad_norm": 0.6396189332008362, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 2.1349, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03669269864881616, |
|
"grad_norm": 0.606553852558136, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 2.0829, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.03768439320689228, |
|
"grad_norm": 0.6115500330924988, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 2.2099, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.03867608776496839, |
|
"grad_norm": 0.606846272945404, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 2.1354, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0396677823230445, |
|
"grad_norm": 0.5994709134101868, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 2.1579, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.040659476881120614, |
|
"grad_norm": 0.5895546674728394, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 2.0635, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.04165117143919673, |
|
"grad_norm": 0.5925672650337219, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 2.0997, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.04264286599727284, |
|
"grad_norm": 0.6125743985176086, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 2.0312, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.04363456055534895, |
|
"grad_norm": 0.593887448310852, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 2.0319, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.044626255113425065, |
|
"grad_norm": 0.5913119316101074, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 2.0568, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04561794967150118, |
|
"grad_norm": 0.6023686528205872, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 2.1055, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.04660964422957729, |
|
"grad_norm": 0.5734410881996155, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 2.0804, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0476013387876534, |
|
"grad_norm": 0.601888120174408, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 2.1679, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.048593033345729515, |
|
"grad_norm": 0.6172356605529785, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 2.2193, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.04958472790380563, |
|
"grad_norm": 0.6247469782829285, |
|
"learning_rate": 1e-05, |
|
"loss": 2.0932, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04958472790380563, |
|
"eval_loss": 2.112521171569824, |
|
"eval_runtime": 1.4403, |
|
"eval_samples_per_second": 34.715, |
|
"eval_steps_per_second": 9.026, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.83042868150272e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|