{ "best_metric": 3.5215563774108887, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.003514629645901063, "eval_steps": 25, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 7.029259291802126e-05, "grad_norm": 9.411466598510742, "learning_rate": 0.0001, "loss": 6.8067, "step": 1 }, { "epoch": 7.029259291802126e-05, "eval_loss": 7.7762651443481445, "eval_runtime": 4432.0766, "eval_samples_per_second": 1.352, "eval_steps_per_second": 0.676, "step": 1 }, { "epoch": 0.00014058518583604253, "grad_norm": 9.495089530944824, "learning_rate": 0.0002, "loss": 6.2361, "step": 2 }, { "epoch": 0.00021087777875406378, "grad_norm": 8.74798583984375, "learning_rate": 0.00019978589232386035, "loss": 5.1303, "step": 3 }, { "epoch": 0.00028117037167208506, "grad_norm": 4.712350845336914, "learning_rate": 0.00019914448613738106, "loss": 4.4543, "step": 4 }, { "epoch": 0.00035146296459010633, "grad_norm": 4.159116744995117, "learning_rate": 0.00019807852804032305, "loss": 4.0895, "step": 5 }, { "epoch": 0.00042175555750812756, "grad_norm": 3.948648691177368, "learning_rate": 0.00019659258262890683, "loss": 3.9215, "step": 6 }, { "epoch": 0.0004920481504261489, "grad_norm": 4.281744003295898, "learning_rate": 0.0001946930129495106, "loss": 4.1163, "step": 7 }, { "epoch": 0.0005623407433441701, "grad_norm": 4.81821346282959, "learning_rate": 0.0001923879532511287, "loss": 3.53, "step": 8 }, { "epoch": 0.0006326333362621913, "grad_norm": 4.640680313110352, "learning_rate": 0.00018968727415326884, "loss": 3.731, "step": 9 }, { "epoch": 0.0007029259291802127, "grad_norm": 4.920532703399658, "learning_rate": 0.00018660254037844388, "loss": 3.3617, "step": 10 }, { "epoch": 0.0007732185220982339, "grad_norm": 4.455087661743164, "learning_rate": 0.00018314696123025454, "loss": 3.7523, "step": 11 }, { "epoch": 0.0008435111150162551, "grad_norm": 3.681004285812378, "learning_rate": 0.00017933533402912354, "loss": 3.9495, "step": 12 }, { "epoch": 0.0009138037079342764, "grad_norm": 3.6873984336853027, "learning_rate": 0.00017518398074789775, "loss": 3.2534, "step": 13 }, { "epoch": 0.0009840963008522978, "grad_norm": 4.170083522796631, "learning_rate": 0.00017071067811865476, "loss": 3.6071, "step": 14 }, { "epoch": 0.001054388893770319, "grad_norm": 3.7524523735046387, "learning_rate": 0.00016593458151000688, "loss": 3.2183, "step": 15 }, { "epoch": 0.0011246814866883402, "grad_norm": 3.95082426071167, "learning_rate": 0.00016087614290087208, "loss": 3.6198, "step": 16 }, { "epoch": 0.0011949740796063616, "grad_norm": 4.456296920776367, "learning_rate": 0.00015555702330196023, "loss": 3.3993, "step": 17 }, { "epoch": 0.0012652666725243827, "grad_norm": 4.209654808044434, "learning_rate": 0.00015000000000000001, "loss": 3.1265, "step": 18 }, { "epoch": 0.001335559265442404, "grad_norm": 4.037990093231201, "learning_rate": 0.00014422886902190014, "loss": 3.7803, "step": 19 }, { "epoch": 0.0014058518583604253, "grad_norm": 4.132318496704102, "learning_rate": 0.000138268343236509, "loss": 3.8329, "step": 20 }, { "epoch": 0.0014761444512784464, "grad_norm": 4.986948490142822, "learning_rate": 0.00013214394653031616, "loss": 4.0136, "step": 21 }, { "epoch": 0.0015464370441964678, "grad_norm": 3.795536518096924, "learning_rate": 0.00012588190451025207, "loss": 3.6991, "step": 22 }, { "epoch": 0.0016167296371144891, "grad_norm": 4.052053451538086, "learning_rate": 0.00011950903220161285, "loss": 3.9433, "step": 23 }, { "epoch": 0.0016870222300325102, "grad_norm": 3.4409587383270264, "learning_rate": 0.00011305261922200519, "loss": 3.2573, "step": 24 }, { "epoch": 0.0017573148229505316, "grad_norm": 4.064533710479736, "learning_rate": 0.00010654031292301432, "loss": 3.4339, "step": 25 }, { "epoch": 0.0017573148229505316, "eval_loss": 3.6852939128875732, "eval_runtime": 4443.7138, "eval_samples_per_second": 1.348, "eval_steps_per_second": 0.674, "step": 25 }, { "epoch": 0.001827607415868553, "grad_norm": 4.198983192443848, "learning_rate": 0.0001, "loss": 3.808, "step": 26 }, { "epoch": 0.001897900008786574, "grad_norm": 4.138834476470947, "learning_rate": 9.345968707698569e-05, "loss": 3.1475, "step": 27 }, { "epoch": 0.0019681926017045956, "grad_norm": 4.510487079620361, "learning_rate": 8.694738077799488e-05, "loss": 3.5543, "step": 28 }, { "epoch": 0.0020384851946226167, "grad_norm": 4.65043830871582, "learning_rate": 8.049096779838719e-05, "loss": 4.5274, "step": 29 }, { "epoch": 0.002108777787540638, "grad_norm": 4.022218704223633, "learning_rate": 7.411809548974792e-05, "loss": 3.8214, "step": 30 }, { "epoch": 0.0021790703804586593, "grad_norm": 3.9271130561828613, "learning_rate": 6.785605346968386e-05, "loss": 3.8296, "step": 31 }, { "epoch": 0.0022493629733766804, "grad_norm": 4.240511417388916, "learning_rate": 6.173165676349103e-05, "loss": 3.8647, "step": 32 }, { "epoch": 0.0023196555662947016, "grad_norm": 3.677292585372925, "learning_rate": 5.577113097809989e-05, "loss": 3.6044, "step": 33 }, { "epoch": 0.002389948159212723, "grad_norm": 4.133325099945068, "learning_rate": 5.000000000000002e-05, "loss": 4.2386, "step": 34 }, { "epoch": 0.0024602407521307442, "grad_norm": 4.031435012817383, "learning_rate": 4.444297669803981e-05, "loss": 3.8542, "step": 35 }, { "epoch": 0.0025305333450487653, "grad_norm": 3.931450843811035, "learning_rate": 3.9123857099127936e-05, "loss": 3.8882, "step": 36 }, { "epoch": 0.002600825937966787, "grad_norm": 4.674798488616943, "learning_rate": 3.406541848999312e-05, "loss": 4.5656, "step": 37 }, { "epoch": 0.002671118530884808, "grad_norm": 3.8759536743164062, "learning_rate": 2.9289321881345254e-05, "loss": 3.3743, "step": 38 }, { "epoch": 0.002741411123802829, "grad_norm": 3.7546091079711914, "learning_rate": 2.4816019252102273e-05, "loss": 3.6026, "step": 39 }, { "epoch": 0.0028117037167208507, "grad_norm": 4.065143585205078, "learning_rate": 2.0664665970876496e-05, "loss": 3.7303, "step": 40 }, { "epoch": 0.002881996309638872, "grad_norm": 4.17460298538208, "learning_rate": 1.6853038769745467e-05, "loss": 4.0344, "step": 41 }, { "epoch": 0.002952288902556893, "grad_norm": 4.61301851272583, "learning_rate": 1.339745962155613e-05, "loss": 3.849, "step": 42 }, { "epoch": 0.0030225814954749144, "grad_norm": 4.693392753601074, "learning_rate": 1.0312725846731175e-05, "loss": 4.1393, "step": 43 }, { "epoch": 0.0030928740883929356, "grad_norm": 4.002664566040039, "learning_rate": 7.612046748871327e-06, "loss": 3.7645, "step": 44 }, { "epoch": 0.0031631666813109567, "grad_norm": 3.897087812423706, "learning_rate": 5.306987050489442e-06, "loss": 3.599, "step": 45 }, { "epoch": 0.0032334592742289782, "grad_norm": 4.300354957580566, "learning_rate": 3.40741737109318e-06, "loss": 4.1152, "step": 46 }, { "epoch": 0.0033037518671469993, "grad_norm": 4.223507404327393, "learning_rate": 1.921471959676957e-06, "loss": 3.5119, "step": 47 }, { "epoch": 0.0033740444600650205, "grad_norm": 4.682356834411621, "learning_rate": 8.555138626189618e-07, "loss": 4.2198, "step": 48 }, { "epoch": 0.003444337052983042, "grad_norm": 4.126323223114014, "learning_rate": 2.141076761396521e-07, "loss": 3.8349, "step": 49 }, { "epoch": 0.003514629645901063, "grad_norm": 5.588482856750488, "learning_rate": 0.0, "loss": 4.7042, "step": 50 }, { "epoch": 0.003514629645901063, "eval_loss": 3.5215563774108887, "eval_runtime": 4445.4742, "eval_samples_per_second": 1.347, "eval_steps_per_second": 0.674, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 1, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 8.23612542025728e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }