{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.7032967032967034, "eval_steps": 13, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.014065934065934066, "grad_norm": 0.37834057211875916, "learning_rate": 5e-05, "loss": 1.3066, "step": 1 }, { "epoch": 0.014065934065934066, "eval_loss": 1.3235194683074951, "eval_runtime": 41.1821, "eval_samples_per_second": 11.631, "eval_steps_per_second": 2.914, "step": 1 }, { "epoch": 0.028131868131868132, "grad_norm": 0.3664795160293579, "learning_rate": 0.0001, "loss": 1.3373, "step": 2 }, { "epoch": 0.0421978021978022, "grad_norm": 0.576968789100647, "learning_rate": 9.989294616193017e-05, "loss": 1.322, "step": 3 }, { "epoch": 0.056263736263736264, "grad_norm": 1.1225945949554443, "learning_rate": 9.957224306869053e-05, "loss": 1.2498, "step": 4 }, { "epoch": 0.07032967032967033, "grad_norm": 0.5157985687255859, "learning_rate": 9.903926402016153e-05, "loss": 1.1756, "step": 5 }, { "epoch": 0.0843956043956044, "grad_norm": 0.7155728936195374, "learning_rate": 9.829629131445342e-05, "loss": 1.1287, "step": 6 }, { "epoch": 0.09846153846153846, "grad_norm": 0.46939119696617126, "learning_rate": 9.73465064747553e-05, "loss": 1.0725, "step": 7 }, { "epoch": 0.11252747252747253, "grad_norm": 0.3746448755264282, "learning_rate": 9.619397662556435e-05, "loss": 1.009, "step": 8 }, { "epoch": 0.1265934065934066, "grad_norm": 0.3685707747936249, "learning_rate": 9.484363707663442e-05, "loss": 0.9849, "step": 9 }, { "epoch": 0.14065934065934066, "grad_norm": 0.3576546907424927, "learning_rate": 9.330127018922194e-05, "loss": 0.9647, "step": 10 }, { "epoch": 0.15472527472527473, "grad_norm": 0.2630157470703125, "learning_rate": 9.157348061512727e-05, "loss": 0.9024, "step": 11 }, { "epoch": 0.1687912087912088, "grad_norm": 0.22557401657104492, "learning_rate": 8.966766701456177e-05, "loss": 0.8968, "step": 12 }, { "epoch": 0.18285714285714286, "grad_norm": 0.2797858715057373, "learning_rate": 8.759199037394887e-05, "loss": 0.8649, "step": 13 }, { "epoch": 0.18285714285714286, "eval_loss": 0.8646656274795532, "eval_runtime": 41.2959, "eval_samples_per_second": 11.599, "eval_steps_per_second": 2.906, "step": 13 }, { "epoch": 0.19692307692307692, "grad_norm": 0.2747080326080322, "learning_rate": 8.535533905932738e-05, "loss": 0.8738, "step": 14 }, { "epoch": 0.210989010989011, "grad_norm": 0.2343476116657257, "learning_rate": 8.296729075500344e-05, "loss": 0.8745, "step": 15 }, { "epoch": 0.22505494505494505, "grad_norm": 0.2237587720155716, "learning_rate": 8.043807145043604e-05, "loss": 0.8282, "step": 16 }, { "epoch": 0.23912087912087912, "grad_norm": 0.19825835525989532, "learning_rate": 7.777851165098012e-05, "loss": 0.842, "step": 17 }, { "epoch": 0.2531868131868132, "grad_norm": 0.16660410165786743, "learning_rate": 7.500000000000001e-05, "loss": 0.8453, "step": 18 }, { "epoch": 0.2672527472527473, "grad_norm": 0.1577339917421341, "learning_rate": 7.211443451095007e-05, "loss": 0.844, "step": 19 }, { "epoch": 0.2813186813186813, "grad_norm": 0.1596086323261261, "learning_rate": 6.91341716182545e-05, "loss": 0.8101, "step": 20 }, { "epoch": 0.2953846153846154, "grad_norm": 0.1533115953207016, "learning_rate": 6.607197326515808e-05, "loss": 0.8229, "step": 21 }, { "epoch": 0.30945054945054945, "grad_norm": 0.1380361020565033, "learning_rate": 6.294095225512603e-05, "loss": 0.8142, "step": 22 }, { "epoch": 0.32351648351648354, "grad_norm": 0.12135830521583557, "learning_rate": 5.9754516100806423e-05, "loss": 0.7935, "step": 23 }, { "epoch": 0.3375824175824176, "grad_norm": 0.12721355259418488, "learning_rate": 5.6526309611002594e-05, "loss": 0.8114, "step": 24 }, { "epoch": 0.3516483516483517, "grad_norm": 0.12719641625881195, "learning_rate": 5.327015646150716e-05, "loss": 0.8137, "step": 25 }, { "epoch": 0.3657142857142857, "grad_norm": 0.12431051582098007, "learning_rate": 5e-05, "loss": 0.8016, "step": 26 }, { "epoch": 0.3657142857142857, "eval_loss": 0.7906247973442078, "eval_runtime": 41.3118, "eval_samples_per_second": 11.595, "eval_steps_per_second": 2.905, "step": 26 }, { "epoch": 0.3797802197802198, "grad_norm": 0.1247800961136818, "learning_rate": 4.6729843538492847e-05, "loss": 0.7944, "step": 27 }, { "epoch": 0.39384615384615385, "grad_norm": 0.11753375828266144, "learning_rate": 4.347369038899744e-05, "loss": 0.7916, "step": 28 }, { "epoch": 0.40791208791208794, "grad_norm": 0.10586351901292801, "learning_rate": 4.0245483899193595e-05, "loss": 0.7824, "step": 29 }, { "epoch": 0.421978021978022, "grad_norm": 0.10135481506586075, "learning_rate": 3.705904774487396e-05, "loss": 0.7672, "step": 30 }, { "epoch": 0.43604395604395607, "grad_norm": 0.10301663726568222, "learning_rate": 3.392802673484193e-05, "loss": 0.7961, "step": 31 }, { "epoch": 0.4501098901098901, "grad_norm": 0.1041460931301117, "learning_rate": 3.086582838174551e-05, "loss": 0.7891, "step": 32 }, { "epoch": 0.4641758241758242, "grad_norm": 0.09919846802949905, "learning_rate": 2.7885565489049946e-05, "loss": 0.8119, "step": 33 }, { "epoch": 0.47824175824175824, "grad_norm": 0.09898246824741364, "learning_rate": 2.500000000000001e-05, "loss": 0.7787, "step": 34 }, { "epoch": 0.49230769230769234, "grad_norm": 0.10475801676511765, "learning_rate": 2.2221488349019903e-05, "loss": 0.7885, "step": 35 }, { "epoch": 0.5063736263736264, "grad_norm": 0.09680575132369995, "learning_rate": 1.9561928549563968e-05, "loss": 0.7795, "step": 36 }, { "epoch": 0.5204395604395604, "grad_norm": 0.10325780510902405, "learning_rate": 1.703270924499656e-05, "loss": 0.7897, "step": 37 }, { "epoch": 0.5345054945054946, "grad_norm": 0.09474309533834457, "learning_rate": 1.4644660940672627e-05, "loss": 0.7936, "step": 38 }, { "epoch": 0.5485714285714286, "grad_norm": 0.09674929827451706, "learning_rate": 1.2408009626051137e-05, "loss": 0.8068, "step": 39 }, { "epoch": 0.5485714285714286, "eval_loss": 0.7735550403594971, "eval_runtime": 41.3006, "eval_samples_per_second": 11.598, "eval_steps_per_second": 2.906, "step": 39 }, { "epoch": 0.5626373626373626, "grad_norm": 0.09625887870788574, "learning_rate": 1.0332332985438248e-05, "loss": 0.7666, "step": 40 }, { "epoch": 0.5767032967032967, "grad_norm": 0.09051655977964401, "learning_rate": 8.426519384872733e-06, "loss": 0.7939, "step": 41 }, { "epoch": 0.5907692307692308, "grad_norm": 0.09801479429006577, "learning_rate": 6.698729810778065e-06, "loss": 0.762, "step": 42 }, { "epoch": 0.6048351648351649, "grad_norm": 0.0950697809457779, "learning_rate": 5.156362923365588e-06, "loss": 0.8127, "step": 43 }, { "epoch": 0.6189010989010989, "grad_norm": 0.08845943212509155, "learning_rate": 3.8060233744356633e-06, "loss": 0.7691, "step": 44 }, { "epoch": 0.6329670329670329, "grad_norm": 0.09417203068733215, "learning_rate": 2.653493525244721e-06, "loss": 0.7847, "step": 45 }, { "epoch": 0.6470329670329671, "grad_norm": 0.09285993129014969, "learning_rate": 1.70370868554659e-06, "loss": 0.7798, "step": 46 }, { "epoch": 0.6610989010989011, "grad_norm": 0.09379366785287857, "learning_rate": 9.607359798384785e-07, "loss": 0.7899, "step": 47 }, { "epoch": 0.6751648351648352, "grad_norm": 0.08996466547250748, "learning_rate": 4.277569313094809e-07, "loss": 0.8066, "step": 48 }, { "epoch": 0.6892307692307692, "grad_norm": 0.09068991243839264, "learning_rate": 1.0705383806982606e-07, "loss": 0.7763, "step": 49 }, { "epoch": 0.7032967032967034, "grad_norm": 0.089011549949646, "learning_rate": 0.0, "loss": 0.7753, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 13, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.0547690207182848e+18, "train_batch_size": 2, "trial_name": null, "trial_params": null }