{ "best_metric": 0.7244487404823303, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 0.4048582995951417, "eval_steps": 50, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.008097165991902834, "grad_norm": 160.5043182373047, "learning_rate": 1e-06, "loss": 11.5445, "step": 1 }, { "epoch": 0.008097165991902834, "eval_loss": 11.895453453063965, "eval_runtime": 1.2561, "eval_samples_per_second": 41.398, "eval_steps_per_second": 10.349, "step": 1 }, { "epoch": 0.016194331983805668, "grad_norm": 113.18714904785156, "learning_rate": 2e-06, "loss": 12.2262, "step": 2 }, { "epoch": 0.024291497975708502, "grad_norm": 134.26119995117188, "learning_rate": 3e-06, "loss": 12.1703, "step": 3 }, { "epoch": 0.032388663967611336, "grad_norm": 151.49256896972656, "learning_rate": 4e-06, "loss": 11.2431, "step": 4 }, { "epoch": 0.04048582995951417, "grad_norm": 128.835205078125, "learning_rate": 4.9999999999999996e-06, "loss": 12.309, "step": 5 }, { "epoch": 0.048582995951417005, "grad_norm": 126.78450012207031, "learning_rate": 6e-06, "loss": 12.4684, "step": 6 }, { "epoch": 0.05668016194331984, "grad_norm": 128.61981201171875, "learning_rate": 7e-06, "loss": 11.275, "step": 7 }, { "epoch": 0.06477732793522267, "grad_norm": 132.9573516845703, "learning_rate": 8e-06, "loss": 9.7015, "step": 8 }, { "epoch": 0.0728744939271255, "grad_norm": 114.10719299316406, "learning_rate": 9e-06, "loss": 8.4523, "step": 9 }, { "epoch": 0.08097165991902834, "grad_norm": 102.33385467529297, "learning_rate": 9.999999999999999e-06, "loss": 7.9721, "step": 10 }, { "epoch": 0.08906882591093117, "grad_norm": 85.0543212890625, "learning_rate": 1.1e-05, "loss": 6.5994, "step": 11 }, { "epoch": 0.09716599190283401, "grad_norm": 114.57733917236328, "learning_rate": 1.2e-05, "loss": 5.7194, "step": 12 }, { "epoch": 0.10526315789473684, "grad_norm": 84.9945297241211, "learning_rate": 1.3000000000000001e-05, "loss": 4.7599, "step": 13 }, { "epoch": 0.11336032388663968, "grad_norm": 82.66494750976562, "learning_rate": 1.4e-05, "loss": 6.2553, "step": 14 }, { "epoch": 0.1214574898785425, "grad_norm": 66.0272445678711, "learning_rate": 1.5e-05, "loss": 3.5185, "step": 15 }, { "epoch": 0.12955465587044535, "grad_norm": 59.03416061401367, "learning_rate": 1.6e-05, "loss": 2.4052, "step": 16 }, { "epoch": 0.13765182186234817, "grad_norm": 60.23641586303711, "learning_rate": 1.7e-05, "loss": 2.7286, "step": 17 }, { "epoch": 0.145748987854251, "grad_norm": 37.41645431518555, "learning_rate": 1.8e-05, "loss": 2.7418, "step": 18 }, { "epoch": 0.15384615384615385, "grad_norm": 31.624427795410156, "learning_rate": 1.9e-05, "loss": 1.5455, "step": 19 }, { "epoch": 0.16194331983805668, "grad_norm": 33.63788986206055, "learning_rate": 1.9999999999999998e-05, "loss": 2.2783, "step": 20 }, { "epoch": 0.1700404858299595, "grad_norm": 35.38190841674805, "learning_rate": 2.1e-05, "loss": 1.6235, "step": 21 }, { "epoch": 0.17813765182186234, "grad_norm": 46.83808135986328, "learning_rate": 2.2e-05, "loss": 1.8509, "step": 22 }, { "epoch": 0.1862348178137652, "grad_norm": 21.023977279663086, "learning_rate": 2.3000000000000003e-05, "loss": 0.9755, "step": 23 }, { "epoch": 0.19433198380566802, "grad_norm": 34.79740905761719, "learning_rate": 2.4e-05, "loss": 0.9948, "step": 24 }, { "epoch": 0.20242914979757085, "grad_norm": 26.90549659729004, "learning_rate": 2.5e-05, "loss": 0.7971, "step": 25 }, { "epoch": 0.21052631578947367, "grad_norm": 30.40579605102539, "learning_rate": 2.6000000000000002e-05, "loss": 1.1038, "step": 26 }, { "epoch": 0.21862348178137653, "grad_norm": 36.04926681518555, "learning_rate": 2.7000000000000002e-05, "loss": 0.7739, "step": 27 }, { "epoch": 0.22672064777327935, "grad_norm": 35.41645050048828, "learning_rate": 2.8e-05, "loss": 1.3239, "step": 28 }, { "epoch": 0.23481781376518218, "grad_norm": 26.374845504760742, "learning_rate": 2.9e-05, "loss": 1.2692, "step": 29 }, { "epoch": 0.242914979757085, "grad_norm": 24.77640151977539, "learning_rate": 3e-05, "loss": 0.8267, "step": 30 }, { "epoch": 0.25101214574898784, "grad_norm": 27.26534652709961, "learning_rate": 2.9984895998119723e-05, "loss": 0.8929, "step": 31 }, { "epoch": 0.2591093117408907, "grad_norm": 16.130844116210938, "learning_rate": 2.993961440992859e-05, "loss": 0.8517, "step": 32 }, { "epoch": 0.26720647773279355, "grad_norm": 18.54865074157715, "learning_rate": 2.9864246426519023e-05, "loss": 0.7742, "step": 33 }, { "epoch": 0.27530364372469635, "grad_norm": 12.706560134887695, "learning_rate": 2.9758943828979444e-05, "loss": 0.6831, "step": 34 }, { "epoch": 0.2834008097165992, "grad_norm": 12.227797508239746, "learning_rate": 2.9623918682727355e-05, "loss": 0.749, "step": 35 }, { "epoch": 0.291497975708502, "grad_norm": 17.94428062438965, "learning_rate": 2.9459442910437798e-05, "loss": 0.7489, "step": 36 }, { "epoch": 0.29959514170040485, "grad_norm": 12.291033744812012, "learning_rate": 2.9265847744427305e-05, "loss": 0.6317, "step": 37 }, { "epoch": 0.3076923076923077, "grad_norm": 23.2242431640625, "learning_rate": 2.904352305959606e-05, "loss": 0.9003, "step": 38 }, { "epoch": 0.3157894736842105, "grad_norm": 15.302254676818848, "learning_rate": 2.8792916588271762e-05, "loss": 0.6306, "step": 39 }, { "epoch": 0.32388663967611336, "grad_norm": 17.329072952270508, "learning_rate": 2.8514533018536286e-05, "loss": 0.8858, "step": 40 }, { "epoch": 0.3319838056680162, "grad_norm": 11.782532691955566, "learning_rate": 2.820893297785107e-05, "loss": 0.7977, "step": 41 }, { "epoch": 0.340080971659919, "grad_norm": 10.306597709655762, "learning_rate": 2.7876731904027994e-05, "loss": 0.6432, "step": 42 }, { "epoch": 0.3481781376518219, "grad_norm": 15.597854614257812, "learning_rate": 2.7518598805819542e-05, "loss": 0.6245, "step": 43 }, { "epoch": 0.3562753036437247, "grad_norm": 13.594710350036621, "learning_rate": 2.7135254915624213e-05, "loss": 0.7767, "step": 44 }, { "epoch": 0.3643724696356275, "grad_norm": 9.834576606750488, "learning_rate": 2.672747223702045e-05, "loss": 0.768, "step": 45 }, { "epoch": 0.3724696356275304, "grad_norm": 4.552821159362793, "learning_rate": 2.6296071990054167e-05, "loss": 0.6271, "step": 46 }, { "epoch": 0.3805668016194332, "grad_norm": 9.330282211303711, "learning_rate": 2.5841922957410875e-05, "loss": 0.7619, "step": 47 }, { "epoch": 0.38866396761133604, "grad_norm": 10.170756340026855, "learning_rate": 2.5365939734802973e-05, "loss": 0.7114, "step": 48 }, { "epoch": 0.3967611336032389, "grad_norm": 7.350127696990967, "learning_rate": 2.4869080889095693e-05, "loss": 0.7545, "step": 49 }, { "epoch": 0.4048582995951417, "grad_norm": 11.022176742553711, "learning_rate": 2.4352347027881003e-05, "loss": 0.7612, "step": 50 }, { "epoch": 0.4048582995951417, "eval_loss": 0.7244487404823303, "eval_runtime": 1.2651, "eval_samples_per_second": 41.102, "eval_steps_per_second": 10.275, "step": 50 } ], "logging_steps": 1, "max_steps": 100, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 50, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 5, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 966060303974400.0, "train_batch_size": 4, "trial_name": null, "trial_params": null }