|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.114864864864865, |
|
"eval_steps": 25, |
|
"global_step": 42, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07207207207207207, |
|
"grad_norm": 0.2711382210254669, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3715, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07207207207207207, |
|
"eval_loss": NaN, |
|
"eval_runtime": 0.1178, |
|
"eval_samples_per_second": 424.357, |
|
"eval_steps_per_second": 110.333, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.14414414414414414, |
|
"grad_norm": 0.2941465973854065, |
|
"learning_rate": 0.0001, |
|
"loss": 10.38, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.21621621621621623, |
|
"grad_norm": 0.3280700147151947, |
|
"learning_rate": 9.986128001799077e-05, |
|
"loss": 10.3772, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.2882882882882883, |
|
"grad_norm": 0.25955256819725037, |
|
"learning_rate": 9.94459753267812e-05, |
|
"loss": 10.3711, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.36036036036036034, |
|
"grad_norm": 0.2933718264102936, |
|
"learning_rate": 9.875664641789545e-05, |
|
"loss": 10.3694, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.43243243243243246, |
|
"grad_norm": 0.31816211342811584, |
|
"learning_rate": 9.779754323328192e-05, |
|
"loss": 10.3844, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5045045045045045, |
|
"grad_norm": 0.2863932251930237, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 10.3909, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5765765765765766, |
|
"grad_norm": 0.30337193608283997, |
|
"learning_rate": 9.509529358847655e-05, |
|
"loss": 10.3832, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.6486486486486487, |
|
"grad_norm": 0.32591405510902405, |
|
"learning_rate": 9.336880739593416e-05, |
|
"loss": 10.3792, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.7207207207207207, |
|
"grad_norm": 0.3342682719230652, |
|
"learning_rate": 9.140576474687264e-05, |
|
"loss": 10.3605, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.7927927927927928, |
|
"grad_norm": 0.28455355763435364, |
|
"learning_rate": 8.921826845200139e-05, |
|
"loss": 10.3871, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.8648648648648649, |
|
"grad_norm": 0.3578994870185852, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 10.3726, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.9369369369369369, |
|
"grad_norm": 0.44790729880332947, |
|
"learning_rate": 8.422516217485826e-05, |
|
"loss": 10.3726, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.0382882882882882, |
|
"grad_norm": 0.5053783059120178, |
|
"learning_rate": 8.14503363531613e-05, |
|
"loss": 15.9327, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.1103603603603605, |
|
"grad_norm": 0.3347855806350708, |
|
"learning_rate": 7.85124354122177e-05, |
|
"loss": 9.7211, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.1824324324324325, |
|
"grad_norm": 0.4577144384384155, |
|
"learning_rate": 7.542957248827961e-05, |
|
"loss": 12.3588, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.2545045045045045, |
|
"grad_norm": 0.30119043588638306, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 8.5601, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.3265765765765765, |
|
"grad_norm": 0.3898301124572754, |
|
"learning_rate": 6.890576474687263e-05, |
|
"loss": 10.5993, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.3986486486486487, |
|
"grad_norm": 0.4105931222438812, |
|
"learning_rate": 6.550504137351576e-05, |
|
"loss": 10.4923, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.4707207207207207, |
|
"grad_norm": 0.40458568930625916, |
|
"learning_rate": 6.203955092681039e-05, |
|
"loss": 9.1102, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.5427927927927927, |
|
"grad_norm": 0.37626734375953674, |
|
"learning_rate": 5.8530659307753036e-05, |
|
"loss": 11.0968, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.614864864864865, |
|
"grad_norm": 0.425431489944458, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 10.8335, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.686936936936937, |
|
"grad_norm": 0.47822538018226624, |
|
"learning_rate": 5.1469340692246995e-05, |
|
"loss": 10.5662, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.759009009009009, |
|
"grad_norm": 0.3679811656475067, |
|
"learning_rate": 4.7960449073189606e-05, |
|
"loss": 10.1607, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.8310810810810811, |
|
"grad_norm": 0.4539155662059784, |
|
"learning_rate": 4.4494958626484276e-05, |
|
"loss": 10.2808, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.8310810810810811, |
|
"eval_loss": NaN, |
|
"eval_runtime": 0.1008, |
|
"eval_samples_per_second": 495.946, |
|
"eval_steps_per_second": 128.946, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.9031531531531531, |
|
"grad_norm": 0.6299550533294678, |
|
"learning_rate": 4.109423525312738e-05, |
|
"loss": 11.6034, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.0045045045045047, |
|
"grad_norm": 0.6582178473472595, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 14.3429, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.0765765765765765, |
|
"grad_norm": 0.4390489459037781, |
|
"learning_rate": 3.45704275117204e-05, |
|
"loss": 10.5127, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.1486486486486487, |
|
"grad_norm": 0.5411266088485718, |
|
"learning_rate": 3.1487564587782306e-05, |
|
"loss": 10.7874, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.220720720720721, |
|
"grad_norm": 0.5138953328132629, |
|
"learning_rate": 2.854966364683872e-05, |
|
"loss": 9.6413, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.2927927927927927, |
|
"grad_norm": 0.45892640948295593, |
|
"learning_rate": 2.577483782514174e-05, |
|
"loss": 10.7246, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.364864864864865, |
|
"grad_norm": 0.4862615466117859, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 10.6531, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.436936936936937, |
|
"grad_norm": 0.5631672143936157, |
|
"learning_rate": 2.0781731547998614e-05, |
|
"loss": 10.1699, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.509009009009009, |
|
"grad_norm": 0.4757976233959198, |
|
"learning_rate": 1.8594235253127375e-05, |
|
"loss": 9.9347, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.581081081081081, |
|
"grad_norm": 0.5032361745834351, |
|
"learning_rate": 1.6631192604065855e-05, |
|
"loss": 10.5629, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.653153153153153, |
|
"grad_norm": 0.514340877532959, |
|
"learning_rate": 1.490470641152345e-05, |
|
"loss": 10.7382, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.725225225225225, |
|
"grad_norm": 0.501396656036377, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 10.0469, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.7972972972972974, |
|
"grad_norm": 0.4644583463668823, |
|
"learning_rate": 1.2202456766718093e-05, |
|
"loss": 10.503, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.8693693693693696, |
|
"grad_norm": 0.5667948126792908, |
|
"learning_rate": 1.1243353582104556e-05, |
|
"loss": 10.8346, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.9414414414414414, |
|
"grad_norm": 0.5867165923118591, |
|
"learning_rate": 1.0554024673218807e-05, |
|
"loss": 10.6693, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.0427927927927927, |
|
"grad_norm": 0.7453739047050476, |
|
"learning_rate": 1.0138719982009242e-05, |
|
"loss": 15.0657, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 3.114864864864865, |
|
"grad_norm": 0.5105739235877991, |
|
"learning_rate": 1e-05, |
|
"loss": 10.1017, |
|
"step": 42 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 42, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 35925170061312.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|