|
{ |
|
"best_metric": 0.8075627684593201, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 0.05259870475689536, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0010519740951379073, |
|
"grad_norm": 37.66078567504883, |
|
"learning_rate": 5e-05, |
|
"loss": 14.8109, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0010519740951379073, |
|
"eval_loss": 13.268001556396484, |
|
"eval_runtime": 2.0272, |
|
"eval_samples_per_second": 24.664, |
|
"eval_steps_per_second": 6.413, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0021039481902758147, |
|
"grad_norm": 48.14912033081055, |
|
"learning_rate": 0.0001, |
|
"loss": 14.1398, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0031559222854137215, |
|
"grad_norm": 41.73773956298828, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 12.5357, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.004207896380551629, |
|
"grad_norm": 39.7030029296875, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 7.3614, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.005259870475689536, |
|
"grad_norm": 23.408571243286133, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 3.6285, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.006311844570827443, |
|
"grad_norm": 23.954805374145508, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 2.3564, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.007363818665965351, |
|
"grad_norm": 12.64648723602295, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 1.4816, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.008415792761103259, |
|
"grad_norm": 7.248321533203125, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 1.0535, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.009467766856241165, |
|
"grad_norm": 4.235668659210205, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 0.8985, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.010519740951379072, |
|
"grad_norm": 3.8979365825653076, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 0.8346, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01157171504651698, |
|
"grad_norm": 6.265169620513916, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 0.853, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.012623689141654886, |
|
"grad_norm": 6.652163028717041, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 0.8676, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.013675663236792795, |
|
"grad_norm": 12.269131660461426, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 1.1406, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.014727637331930702, |
|
"grad_norm": 6.396784782409668, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 0.9183, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01577961142706861, |
|
"grad_norm": 3.7699356079101562, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 0.827, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.016831585522206517, |
|
"grad_norm": 2.2097628116607666, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 0.8275, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.017883559617344422, |
|
"grad_norm": 1.7083959579467773, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 0.8225, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.01893553371248233, |
|
"grad_norm": 2.439757823944092, |
|
"learning_rate": 7.75e-05, |
|
"loss": 0.828, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.019987507807620236, |
|
"grad_norm": 2.4405243396759033, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 0.812, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.021039481902758145, |
|
"grad_norm": 2.1622493267059326, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 0.8154, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.022091455997896053, |
|
"grad_norm": 2.0287113189697266, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 0.8343, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.02314343009303396, |
|
"grad_norm": 2.6538174152374268, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 0.8351, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.024195404188171867, |
|
"grad_norm": 2.780940294265747, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 0.8283, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.025247378283309772, |
|
"grad_norm": 2.008140802383423, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 0.8269, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.02629935237844768, |
|
"grad_norm": 1.1456310749053955, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 0.8082, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02629935237844768, |
|
"eval_loss": 0.8075627684593201, |
|
"eval_runtime": 2.0782, |
|
"eval_samples_per_second": 24.06, |
|
"eval_steps_per_second": 6.256, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02735132647358559, |
|
"grad_norm": 1.6255582571029663, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.8066, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.028403300568723495, |
|
"grad_norm": 0.809135377407074, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 0.8051, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.029455274663861403, |
|
"grad_norm": 1.7985752820968628, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 0.8179, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.03050724875899931, |
|
"grad_norm": 1.2974883317947388, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 0.808, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.03155922285413722, |
|
"grad_norm": 1.3518906831741333, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 0.8031, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.032611196949275126, |
|
"grad_norm": 1.5183818340301514, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 0.8073, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.033663171044413034, |
|
"grad_norm": 1.6260573863983154, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 0.8144, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.034715145139550936, |
|
"grad_norm": 2.1896719932556152, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 0.8099, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.035767119234688845, |
|
"grad_norm": 1.9014267921447754, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 0.8069, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.03681909332982675, |
|
"grad_norm": 1.9700180292129517, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 0.8166, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.03787106742496466, |
|
"grad_norm": 1.4387903213500977, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 0.8091, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03892304152010257, |
|
"grad_norm": 1.0182123184204102, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 0.8039, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.03997501561524047, |
|
"grad_norm": 1.6459559202194214, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 0.8475, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.04102698971037838, |
|
"grad_norm": 1.9654988050460815, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 0.8169, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.04207896380551629, |
|
"grad_norm": 1.2888116836547852, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 0.8084, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0431309379006542, |
|
"grad_norm": 1.0997812747955322, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 0.794, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.04418291199579211, |
|
"grad_norm": 1.8465800285339355, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 0.8217, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.04523488609093001, |
|
"grad_norm": 1.5212135314941406, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 0.8128, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.04628686018606792, |
|
"grad_norm": 1.6653895378112793, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 0.8139, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.047338834281205826, |
|
"grad_norm": 1.3936947584152222, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 0.7985, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.048390808376343734, |
|
"grad_norm": 2.1122493743896484, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 0.8154, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.04944278247148164, |
|
"grad_norm": 0.8528788685798645, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 0.8162, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.050494756566619545, |
|
"grad_norm": 1.5338293313980103, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 0.7937, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.05154673066175745, |
|
"grad_norm": 1.9110890626907349, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 0.8225, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.05259870475689536, |
|
"grad_norm": 1.324634313583374, |
|
"learning_rate": 1e-05, |
|
"loss": 0.8084, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05259870475689536, |
|
"eval_loss": 0.8132852911949158, |
|
"eval_runtime": 2.0735, |
|
"eval_samples_per_second": 24.114, |
|
"eval_steps_per_second": 6.27, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.6522972753402266e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|