|
{ |
|
"best_metric": 2.345757484436035, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.31583103039873667, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.006316620607974733, |
|
"grad_norm": 1.0546115636825562, |
|
"learning_rate": 5e-05, |
|
"loss": 3.1575, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006316620607974733, |
|
"eval_loss": 4.693485736846924, |
|
"eval_runtime": 12.1496, |
|
"eval_samples_per_second": 87.822, |
|
"eval_steps_per_second": 11.029, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.012633241215949467, |
|
"grad_norm": 1.034112811088562, |
|
"learning_rate": 0.0001, |
|
"loss": 3.4964, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0189498618239242, |
|
"grad_norm": 1.0966734886169434, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 3.4648, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.025266482431898933, |
|
"grad_norm": 1.093819260597229, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 3.257, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03158310303987367, |
|
"grad_norm": 1.0864986181259155, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 3.4582, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0378997236478484, |
|
"grad_norm": 1.3955763578414917, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 3.3022, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.04421634425582313, |
|
"grad_norm": 1.4181126356124878, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 3.5895, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.050532964863797866, |
|
"grad_norm": 1.4454867839813232, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 3.3887, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0568495854717726, |
|
"grad_norm": 1.712357759475708, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 3.6758, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.06316620607974734, |
|
"grad_norm": 1.7403721809387207, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 3.4681, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06948282668772207, |
|
"grad_norm": 1.7750122547149658, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 3.5671, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0757994472956968, |
|
"grad_norm": 1.7761714458465576, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 3.612, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.08211606790367154, |
|
"grad_norm": 1.295621633529663, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 2.9946, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.08843268851164626, |
|
"grad_norm": 1.2954901456832886, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 2.5626, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.094749309119621, |
|
"grad_norm": 1.24658203125, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 2.456, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.10106592972759573, |
|
"grad_norm": 1.1547346115112305, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 2.6964, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.10738255033557047, |
|
"grad_norm": 0.9776915311813354, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 2.5982, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.1136991709435452, |
|
"grad_norm": 0.916656494140625, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.7325, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.12001579155151994, |
|
"grad_norm": 1.0522651672363281, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 2.449, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.12633241215949467, |
|
"grad_norm": 1.15105402469635, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 2.6179, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1326490327674694, |
|
"grad_norm": 1.0923116207122803, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 2.4007, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.13896565337544414, |
|
"grad_norm": 1.48521089553833, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 2.6068, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.14528227398341886, |
|
"grad_norm": 1.5622447729110718, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 3.1056, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.1515988945913936, |
|
"grad_norm": 1.3226696252822876, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 2.8389, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.15791551519936833, |
|
"grad_norm": 1.5947855710983276, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 3.1764, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.15791551519936833, |
|
"eval_loss": 2.517580032348633, |
|
"eval_runtime": 12.3821, |
|
"eval_samples_per_second": 86.172, |
|
"eval_steps_per_second": 10.822, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.16423213580734308, |
|
"grad_norm": 0.7472804188728333, |
|
"learning_rate": 5e-05, |
|
"loss": 2.1701, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.1705487564153178, |
|
"grad_norm": 0.7590801119804382, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 2.3226, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.17686537702329252, |
|
"grad_norm": 0.8624691367149353, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 2.1347, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.18318199763126727, |
|
"grad_norm": 0.7706214189529419, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 2.2104, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.189498618239242, |
|
"grad_norm": 0.8338087201118469, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 2.4114, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.19581523884721674, |
|
"grad_norm": 0.9004456996917725, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 2.487, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.20213185945519146, |
|
"grad_norm": 0.9136253595352173, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 2.4893, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2084484800631662, |
|
"grad_norm": 0.8845799565315247, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 2.2766, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.21476510067114093, |
|
"grad_norm": 1.1320760250091553, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.6643, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.22108172127911568, |
|
"grad_norm": 1.2713851928710938, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 2.7563, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.2273983418870904, |
|
"grad_norm": 0.9692124128341675, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 2.7175, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.23371496249506515, |
|
"grad_norm": 1.1749975681304932, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 2.5021, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.24003158310303987, |
|
"grad_norm": 0.8900548219680786, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 2.3335, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.2463482037110146, |
|
"grad_norm": 0.689755916595459, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 2.1621, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.25266482431898935, |
|
"grad_norm": 0.7295820713043213, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 2.0389, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.25898144492696407, |
|
"grad_norm": 0.7669286131858826, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 2.2391, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.2652980655349388, |
|
"grad_norm": 0.7550666332244873, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 2.0155, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.27161468614291356, |
|
"grad_norm": 0.7518640160560608, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 2.2435, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.2779313067508883, |
|
"grad_norm": 0.8904653191566467, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 2.4775, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.284247927358863, |
|
"grad_norm": 0.891883134841919, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 2.4781, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.2905645479668377, |
|
"grad_norm": 0.884515106678009, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 2.2253, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.29688116857481245, |
|
"grad_norm": 1.0977036952972412, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 2.4993, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.3031977891827872, |
|
"grad_norm": 1.0684934854507446, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 2.4539, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.30951440979076195, |
|
"grad_norm": 1.174559235572815, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 2.9581, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.31583103039873667, |
|
"grad_norm": 1.3068698644638062, |
|
"learning_rate": 0.0, |
|
"loss": 2.9451, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.31583103039873667, |
|
"eval_loss": 2.345757484436035, |
|
"eval_runtime": 12.1674, |
|
"eval_samples_per_second": 87.693, |
|
"eval_steps_per_second": 11.013, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.059536353886208e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|