|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.004297532500089532, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 5.730043333452709e-05, |
|
"grad_norm": 4.64981746673584, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.7731, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 5.730043333452709e-05, |
|
"eval_loss": 0.5826753377914429, |
|
"eval_runtime": 1985.2029, |
|
"eval_samples_per_second": 7.403, |
|
"eval_steps_per_second": 3.702, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00011460086666905418, |
|
"grad_norm": 5.319647789001465, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 1.7419, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00017190130000358127, |
|
"grad_norm": 6.593022346496582, |
|
"learning_rate": 0.0001, |
|
"loss": 2.172, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00022920173333810837, |
|
"grad_norm": 5.512605667114258, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 2.0215, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00028650216667263544, |
|
"grad_norm": 4.8503241539001465, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 1.8746, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00034380260000716254, |
|
"grad_norm": 8.325742721557617, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 1.1677, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00040110303334168964, |
|
"grad_norm": 4.24470853805542, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 1.0449, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.00045840346667621674, |
|
"grad_norm": 3.902250289916992, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 0.8211, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0005157039000107438, |
|
"grad_norm": 3.2968428134918213, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.9614, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0005730043333452709, |
|
"grad_norm": 3.920433282852173, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 0.8732, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.000630304766679798, |
|
"grad_norm": 4.680196762084961, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.6169, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0006876052000143251, |
|
"grad_norm": 4.031309127807617, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.7551, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0007449056333488522, |
|
"grad_norm": 4.4254469871521, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 0.8059, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0008022060666833793, |
|
"grad_norm": 3.8273370265960693, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 0.7919, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0008595065000179064, |
|
"grad_norm": 3.6414921283721924, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.714, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0009168069333524335, |
|
"grad_norm": 4.078192234039307, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 0.8365, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0009741073666869605, |
|
"grad_norm": 4.631629943847656, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 0.8709, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0010314078000214877, |
|
"grad_norm": 3.5660946369171143, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.7732, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0010887082333560147, |
|
"grad_norm": 4.0769829750061035, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.8697, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0011460086666905418, |
|
"grad_norm": 3.174710273742676, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 0.6654, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.001203309100025069, |
|
"grad_norm": 4.109514236450195, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.7679, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.001260609533359596, |
|
"grad_norm": 3.642437696456909, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 0.734, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.001317909966694123, |
|
"grad_norm": 4.460132598876953, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.951, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0013752104000286502, |
|
"grad_norm": 3.60257625579834, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.5561, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0014325108333631774, |
|
"grad_norm": 3.7138121128082275, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 0.7833, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0014325108333631774, |
|
"eval_loss": 0.1671205759048462, |
|
"eval_runtime": 1990.9333, |
|
"eval_samples_per_second": 7.382, |
|
"eval_steps_per_second": 3.691, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0014898112666977045, |
|
"grad_norm": 4.513851642608643, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 0.5433, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0015471117000322315, |
|
"grad_norm": 5.423880577087402, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.0937, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0016044121333667585, |
|
"grad_norm": 3.9187657833099365, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 0.8542, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0016617125667012856, |
|
"grad_norm": 3.9628372192382812, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.768, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0017190130000358129, |
|
"grad_norm": 5.155542850494385, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.8362, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.00177631343337034, |
|
"grad_norm": 3.2812490463256836, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.7467, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.001833613866704867, |
|
"grad_norm": 4.409944534301758, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 0.8352, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.001890914300039394, |
|
"grad_norm": 4.593567371368408, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 1.0404, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.001948214733373921, |
|
"grad_norm": 4.877197742462158, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 1.1112, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.002005515166708448, |
|
"grad_norm": 7.327693939208984, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 1.0144, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0020628156000429753, |
|
"grad_norm": 6.236719131469727, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.8903, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0021201160333775026, |
|
"grad_norm": 5.947874546051025, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 1.5181, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0021774164667120294, |
|
"grad_norm": 6.993929862976074, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 1.4933, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0022347169000465567, |
|
"grad_norm": 6.632877349853516, |
|
"learning_rate": 5e-05, |
|
"loss": 1.8576, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0022920173333810835, |
|
"grad_norm": 5.726492404937744, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 1.7028, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0023493177667156108, |
|
"grad_norm": 5.678798198699951, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 1.8054, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.002406618200050138, |
|
"grad_norm": 6.203830242156982, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 1.858, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.002463918633384665, |
|
"grad_norm": 12.835216522216797, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 2.839, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.002521219066719192, |
|
"grad_norm": 6.748092174530029, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 1.9296, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.002578519500053719, |
|
"grad_norm": 8.807928085327148, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 1.6976, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.002635819933388246, |
|
"grad_norm": 7.636016845703125, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 1.9868, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.0026931203667227735, |
|
"grad_norm": 10.444618225097656, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 3.4253, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0027504208000573003, |
|
"grad_norm": 11.333175659179688, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 2.2715, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0028077212333918276, |
|
"grad_norm": 10.740086555480957, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 3.6215, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.002865021666726355, |
|
"grad_norm": 14.465156555175781, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 6.0227, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.002865021666726355, |
|
"eval_loss": 0.14761370420455933, |
|
"eval_runtime": 1992.1836, |
|
"eval_samples_per_second": 7.377, |
|
"eval_steps_per_second": 3.689, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0029223221000608817, |
|
"grad_norm": 2.3872838020324707, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.6123, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.002979622533395409, |
|
"grad_norm": 2.11433482170105, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 0.7429, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.0030369229667299357, |
|
"grad_norm": 1.9935492277145386, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 0.4261, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.003094223400064463, |
|
"grad_norm": 2.011484146118164, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.5963, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0031515238333989903, |
|
"grad_norm": 2.5177338123321533, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.4909, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.003208824266733517, |
|
"grad_norm": 2.2302229404449463, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 0.5392, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.0032661247000680444, |
|
"grad_norm": 3.0052223205566406, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.4753, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.003323425133402571, |
|
"grad_norm": 2.423661947250366, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 0.4055, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0033807255667370984, |
|
"grad_norm": 2.8571395874023438, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.7339, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.0034380260000716257, |
|
"grad_norm": 3.235661268234253, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.7574, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0034953264334061525, |
|
"grad_norm": 2.264759063720703, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 0.5166, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.00355262686674068, |
|
"grad_norm": 2.731968402862549, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 0.4679, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.0036099273000752066, |
|
"grad_norm": 2.704561471939087, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.7767, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.003667227733409734, |
|
"grad_norm": 1.7505345344543457, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 0.3995, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.003724528166744261, |
|
"grad_norm": 2.4672224521636963, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 0.6333, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.003781828600078788, |
|
"grad_norm": 3.6237540245056152, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.4109, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.0038391290334133152, |
|
"grad_norm": 3.1202926635742188, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.8672, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.003896429466747842, |
|
"grad_norm": 3.3872885704040527, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 0.753, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.003953729900082369, |
|
"grad_norm": 3.216975212097168, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.6849, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.004011030333416896, |
|
"grad_norm": 2.8667757511138916, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 0.5677, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.004068330766751424, |
|
"grad_norm": 4.095630645751953, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 0.645, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.004125631200085951, |
|
"grad_norm": 3.189215898513794, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.6192, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.0041829316334204775, |
|
"grad_norm": 2.886237144470215, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 0.7971, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.004240232066755005, |
|
"grad_norm": 2.6635372638702393, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 0.6268, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.004297532500089532, |
|
"grad_norm": 2.3732454776763916, |
|
"learning_rate": 0.0, |
|
"loss": 0.5992, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.004297532500089532, |
|
"eval_loss": 0.14407220482826233, |
|
"eval_runtime": 1991.2821, |
|
"eval_samples_per_second": 7.381, |
|
"eval_steps_per_second": 3.691, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.86082145468416e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|