{ "best_metric": null, "best_model_checkpoint": null, "epoch": 3.1422121896162527, "eval_steps": 25, "global_step": 42, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.07223476297968397, "grad_norm": 0.008988643996417522, "learning_rate": 5e-05, "loss": 11.9328, "step": 1 }, { "epoch": 0.07223476297968397, "eval_loss": NaN, "eval_runtime": 0.1775, "eval_samples_per_second": 281.628, "eval_steps_per_second": 73.223, "step": 1 }, { "epoch": 0.14446952595936793, "grad_norm": 0.009210865013301373, "learning_rate": 0.0001, "loss": 11.933, "step": 2 }, { "epoch": 0.21670428893905191, "grad_norm": 0.009502468630671501, "learning_rate": 9.986128001799077e-05, "loss": 11.9332, "step": 3 }, { "epoch": 0.28893905191873587, "grad_norm": 0.00922730565071106, "learning_rate": 9.94459753267812e-05, "loss": 11.9325, "step": 4 }, { "epoch": 0.3611738148984199, "grad_norm": 0.009089686907827854, "learning_rate": 9.875664641789545e-05, "loss": 11.9326, "step": 5 }, { "epoch": 0.43340857787810383, "grad_norm": 0.009873133152723312, "learning_rate": 9.779754323328192e-05, "loss": 11.933, "step": 6 }, { "epoch": 0.5056433408577878, "grad_norm": 0.011123728938400745, "learning_rate": 9.657457896300791e-05, "loss": 11.9327, "step": 7 }, { "epoch": 0.5778781038374717, "grad_norm": 0.012063764967024326, "learning_rate": 9.509529358847655e-05, "loss": 11.9327, "step": 8 }, { "epoch": 0.6501128668171557, "grad_norm": 0.011134549975395203, "learning_rate": 9.336880739593416e-05, "loss": 11.933, "step": 9 }, { "epoch": 0.7223476297968398, "grad_norm": 0.01320857834070921, "learning_rate": 9.140576474687264e-05, "loss": 11.9324, "step": 10 }, { "epoch": 0.7945823927765236, "grad_norm": 0.012652316130697727, "learning_rate": 8.921826845200139e-05, "loss": 11.9326, "step": 11 }, { "epoch": 0.8668171557562077, "grad_norm": 0.013287683948874474, "learning_rate": 8.681980515339464e-05, "loss": 11.9325, "step": 12 }, { "epoch": 0.9390519187358917, "grad_norm": 0.014345780946314335, "learning_rate": 8.422516217485826e-05, "loss": 11.9323, "step": 13 }, { "epoch": 1.0474040632054176, "grad_norm": 0.02747885324060917, "learning_rate": 8.14503363531613e-05, "loss": 20.5712, "step": 14 }, { "epoch": 1.1196388261851016, "grad_norm": 0.015461836010217667, "learning_rate": 7.85124354122177e-05, "loss": 11.7716, "step": 15 }, { "epoch": 1.1918735891647856, "grad_norm": 0.018558837473392487, "learning_rate": 7.542957248827961e-05, "loss": 13.575, "step": 16 }, { "epoch": 1.2641083521444696, "grad_norm": 0.015563758090138435, "learning_rate": 7.222075445642904e-05, "loss": 9.4655, "step": 17 }, { "epoch": 1.3363431151241536, "grad_norm": 0.01990383490920067, "learning_rate": 6.890576474687263e-05, "loss": 12.6392, "step": 18 }, { "epoch": 1.4085778781038374, "grad_norm": 0.021893231198191643, "learning_rate": 6.550504137351576e-05, "loss": 13.4199, "step": 19 }, { "epoch": 1.4808126410835214, "grad_norm": 0.011745378375053406, "learning_rate": 6.203955092681039e-05, "loss": 6.7549, "step": 20 }, { "epoch": 1.5530474040632054, "grad_norm": 0.026938999071717262, "learning_rate": 5.8530659307753036e-05, "loss": 15.6081, "step": 21 }, { "epoch": 1.6252821670428894, "grad_norm": 0.01988409459590912, "learning_rate": 5.500000000000001e-05, "loss": 11.9145, "step": 22 }, { "epoch": 1.6975169300225734, "grad_norm": 0.009685815311968327, "learning_rate": 5.1469340692246995e-05, "loss": 6.0489, "step": 23 }, { "epoch": 1.7697516930022572, "grad_norm": 0.030963227152824402, "learning_rate": 4.7960449073189606e-05, "loss": 17.7904, "step": 24 }, { "epoch": 1.8419864559819414, "grad_norm": 0.022139111533761024, "learning_rate": 4.4494958626484276e-05, "loss": 12.1609, "step": 25 }, { "epoch": 1.8419864559819414, "eval_loss": NaN, "eval_runtime": 0.1732, "eval_samples_per_second": 288.718, "eval_steps_per_second": 75.067, "step": 25 }, { "epoch": 1.9142212189616252, "grad_norm": 0.030451903119683266, "learning_rate": 4.109423525312738e-05, "loss": 15.3316, "step": 26 }, { "epoch": 2.0225733634311513, "grad_norm": 0.03348216786980629, "learning_rate": 3.777924554357096e-05, "loss": 16.8198, "step": 27 }, { "epoch": 2.094808126410835, "grad_norm": 0.02313702367246151, "learning_rate": 3.45704275117204e-05, "loss": 11.3532, "step": 28 }, { "epoch": 2.1670428893905194, "grad_norm": 0.02855922095477581, "learning_rate": 3.1487564587782306e-05, "loss": 13.9424, "step": 29 }, { "epoch": 2.239277652370203, "grad_norm": 0.013586718589067459, "learning_rate": 2.854966364683872e-05, "loss": 7.0922, "step": 30 }, { "epoch": 2.311512415349887, "grad_norm": 0.03476588800549507, "learning_rate": 2.577483782514174e-05, "loss": 14.6245, "step": 31 }, { "epoch": 2.383747178329571, "grad_norm": 0.0276983343064785, "learning_rate": 2.3180194846605367e-05, "loss": 12.0846, "step": 32 }, { "epoch": 2.455981941309255, "grad_norm": 0.017914237454533577, "learning_rate": 2.0781731547998614e-05, "loss": 7.768, "step": 33 }, { "epoch": 2.528216704288939, "grad_norm": 0.03817462921142578, "learning_rate": 1.8594235253127375e-05, "loss": 16.043, "step": 34 }, { "epoch": 2.600451467268623, "grad_norm": 0.02697724476456642, "learning_rate": 1.6631192604065855e-05, "loss": 11.9938, "step": 35 }, { "epoch": 2.672686230248307, "grad_norm": 0.01927335001528263, "learning_rate": 1.490470641152345e-05, "loss": 9.3809, "step": 36 }, { "epoch": 2.744920993227991, "grad_norm": 0.03292158618569374, "learning_rate": 1.3425421036992098e-05, "loss": 14.5154, "step": 37 }, { "epoch": 2.8171557562076748, "grad_norm": 0.03068559244275093, "learning_rate": 1.2202456766718093e-05, "loss": 12.0212, "step": 38 }, { "epoch": 2.889390519187359, "grad_norm": 0.03927991911768913, "learning_rate": 1.1243353582104556e-05, "loss": 16.2666, "step": 39 }, { "epoch": 2.961625282167043, "grad_norm": 0.032981183379888535, "learning_rate": 1.0554024673218807e-05, "loss": 14.3015, "step": 40 }, { "epoch": 3.069977426636569, "grad_norm": 0.02991604059934616, "learning_rate": 1.0138719982009242e-05, "loss": 12.4303, "step": 41 }, { "epoch": 3.1422121896162527, "grad_norm": 0.026301313191652298, "learning_rate": 1e-05, "loss": 11.9028, "step": 42 } ], "logging_steps": 1, "max_steps": 42, "num_input_tokens_seen": 0, "num_train_epochs": 4, "save_steps": 25, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 877486964736.0, "train_batch_size": 1, "trial_name": null, "trial_params": null }