|
{ |
|
"best_metric": 0.3330279588699341, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.06415911460421846, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0012831822920843692, |
|
"grad_norm": 9.69926643371582, |
|
"learning_rate": 5e-05, |
|
"loss": 3.4686, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0012831822920843692, |
|
"eval_loss": 4.248011589050293, |
|
"eval_runtime": 0.4792, |
|
"eval_samples_per_second": 104.347, |
|
"eval_steps_per_second": 27.13, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0025663645841687384, |
|
"grad_norm": 9.604711532592773, |
|
"learning_rate": 0.0001, |
|
"loss": 3.5721, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0038495468762531075, |
|
"grad_norm": 9.658062934875488, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 3.5483, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.005132729168337477, |
|
"grad_norm": 8.972732543945312, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 2.7079, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.006415911460421846, |
|
"grad_norm": 7.0797810554504395, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 1.846, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.007699093752506215, |
|
"grad_norm": 5.179475784301758, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 1.4564, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.008982276044590584, |
|
"grad_norm": 4.027368068695068, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 1.2109, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.010265458336674953, |
|
"grad_norm": 3.563283681869507, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 0.9135, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.011548640628759323, |
|
"grad_norm": 2.7347819805145264, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 0.7636, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.012831822920843693, |
|
"grad_norm": 2.4964144229888916, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 0.6791, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.014115005212928062, |
|
"grad_norm": 2.7495129108428955, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 0.6407, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01539818750501243, |
|
"grad_norm": 2.3513643741607666, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 0.5507, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0166813697970968, |
|
"grad_norm": 1.8642207384109497, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 0.5893, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.017964552089181168, |
|
"grad_norm": 2.0092697143554688, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 0.6107, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.01924773438126554, |
|
"grad_norm": 1.567659616470337, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 0.494, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.020530916673349907, |
|
"grad_norm": 1.9570163488388062, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 0.4533, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.021814098965434278, |
|
"grad_norm": 1.603574514389038, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 0.4829, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.023097281257518646, |
|
"grad_norm": 1.8064993619918823, |
|
"learning_rate": 7.75e-05, |
|
"loss": 0.5042, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.024380463549603014, |
|
"grad_norm": 1.494826078414917, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 0.4298, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.025663645841687385, |
|
"grad_norm": 1.5904885530471802, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 0.5216, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.026946828133771753, |
|
"grad_norm": 1.4508434534072876, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 0.4252, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.028230010425856124, |
|
"grad_norm": 1.796913981437683, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 0.4087, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.029513192717940492, |
|
"grad_norm": 1.3291597366333008, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 0.3526, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.03079637501002486, |
|
"grad_norm": 1.1371921300888062, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 0.3505, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.03207955730210923, |
|
"grad_norm": 1.614019513130188, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 0.6021, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03207955730210923, |
|
"eval_loss": 0.36776840686798096, |
|
"eval_runtime": 0.4735, |
|
"eval_samples_per_second": 105.59, |
|
"eval_steps_per_second": 27.453, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0333627395941936, |
|
"grad_norm": 1.6320197582244873, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.4499, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.03464592188627797, |
|
"grad_norm": 1.1622369289398193, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 0.3862, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.035929104178362335, |
|
"grad_norm": 1.5270744562149048, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 0.391, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.03721228647044671, |
|
"grad_norm": 1.5034524202346802, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 0.3603, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.03849546876253108, |
|
"grad_norm": 1.092508316040039, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 0.3308, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03977865105461545, |
|
"grad_norm": 1.2671431303024292, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 0.4037, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.041061833346699814, |
|
"grad_norm": 1.2833945751190186, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 0.3982, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.042345015638784185, |
|
"grad_norm": 1.186705231666565, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 0.362, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.043628197930868556, |
|
"grad_norm": 1.2128040790557861, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 0.401, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.04491138022295292, |
|
"grad_norm": 1.496416687965393, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 0.4697, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04619456251503729, |
|
"grad_norm": 1.348049521446228, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 0.3377, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.04747774480712166, |
|
"grad_norm": 1.1160104274749756, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 0.341, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.04876092709920603, |
|
"grad_norm": 1.1745514869689941, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 0.4423, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0500441093912904, |
|
"grad_norm": 1.4134280681610107, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 0.3774, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.05132729168337477, |
|
"grad_norm": 1.0615042448043823, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 0.2585, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05261047397545914, |
|
"grad_norm": 1.5986738204956055, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 0.455, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.053893656267543506, |
|
"grad_norm": 1.1567108631134033, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 0.3465, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.05517683855962788, |
|
"grad_norm": 1.347766399383545, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 0.4256, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.05646002085171225, |
|
"grad_norm": 1.1334179639816284, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 0.2994, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.05774320314379661, |
|
"grad_norm": 1.1669671535491943, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 0.3003, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.059026385435880985, |
|
"grad_norm": 1.4140655994415283, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 0.3947, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.060309567727965356, |
|
"grad_norm": 1.5222396850585938, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 0.3928, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.06159275002004972, |
|
"grad_norm": 1.328269600868225, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 0.4035, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.06287593231213409, |
|
"grad_norm": 1.2904680967330933, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 0.37, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.06415911460421846, |
|
"grad_norm": 1.46721613407135, |
|
"learning_rate": 1e-05, |
|
"loss": 0.4614, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06415911460421846, |
|
"eval_loss": 0.3330279588699341, |
|
"eval_runtime": 0.4741, |
|
"eval_samples_per_second": 105.458, |
|
"eval_steps_per_second": 27.419, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.544851626439475e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|