|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.002531560116114224, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 3.3754134881522986e-05, |
|
"grad_norm": 4.89381217956543, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.8117, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 3.3754134881522986e-05, |
|
"eval_loss": 1.4206814765930176, |
|
"eval_runtime": 2761.2734, |
|
"eval_samples_per_second": 9.035, |
|
"eval_steps_per_second": 4.518, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 6.750826976304597e-05, |
|
"grad_norm": 3.2238664627075195, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 1.5532, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00010126240464456896, |
|
"grad_norm": 3.240180253982544, |
|
"learning_rate": 0.0001, |
|
"loss": 1.4341, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00013501653952609194, |
|
"grad_norm": 2.933100461959839, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 1.1723, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00016877067440761493, |
|
"grad_norm": 2.8057668209075928, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 1.012, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00020252480928913792, |
|
"grad_norm": 2.675827741622925, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 0.8673, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0002362789441706609, |
|
"grad_norm": 2.9692628383636475, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.743, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0002700330790521839, |
|
"grad_norm": 2.7837095260620117, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 0.704, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0003037872139337069, |
|
"grad_norm": 2.2439792156219482, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.6491, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.00033754134881522986, |
|
"grad_norm": 1.8346399068832397, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 0.6559, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00037129548369675285, |
|
"grad_norm": 1.8229111433029175, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.6018, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.00040504961857827583, |
|
"grad_norm": 2.420116901397705, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.666, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0004388037534597988, |
|
"grad_norm": 1.892528772354126, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 0.5869, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0004725578883413218, |
|
"grad_norm": 1.9345523118972778, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 0.6242, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0005063120232228448, |
|
"grad_norm": 1.8276655673980713, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.6091, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0005400661581043678, |
|
"grad_norm": 1.4461370706558228, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 0.5649, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0005738202929858908, |
|
"grad_norm": 2.1501457691192627, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 0.6251, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0006075744278674137, |
|
"grad_norm": 1.8506277799606323, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.6026, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0006413285627489368, |
|
"grad_norm": 1.8709357976913452, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.558, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0006750826976304597, |
|
"grad_norm": 1.5602527856826782, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 0.5937, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0007088368325119828, |
|
"grad_norm": 1.809094786643982, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.5577, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0007425909673935057, |
|
"grad_norm": 1.3502001762390137, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 0.5118, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0007763451022750287, |
|
"grad_norm": 1.3252819776535034, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.5418, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0008100992371565517, |
|
"grad_norm": 2.3276662826538086, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.5613, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0008438533720380747, |
|
"grad_norm": 1.6444724798202515, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 0.5491, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0008438533720380747, |
|
"eval_loss": 0.5483559370040894, |
|
"eval_runtime": 2777.4398, |
|
"eval_samples_per_second": 8.983, |
|
"eval_steps_per_second": 4.492, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0008776075069195976, |
|
"grad_norm": 1.2517900466918945, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 0.49, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0009113616418011207, |
|
"grad_norm": 1.1255788803100586, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.4652, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0009451157766826436, |
|
"grad_norm": 1.3350865840911865, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 0.5945, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0009788699115641666, |
|
"grad_norm": 1.255922555923462, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.4186, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0010126240464456897, |
|
"grad_norm": 1.1698178052902222, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.4548, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0010463781813272125, |
|
"grad_norm": 1.1624618768692017, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.5209, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0010801323162087356, |
|
"grad_norm": 1.9189029932022095, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 0.5466, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0011138864510902586, |
|
"grad_norm": 1.1308246850967407, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.445, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0011476405859717816, |
|
"grad_norm": 1.261982798576355, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 0.4534, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0011813947208533045, |
|
"grad_norm": 1.2305161952972412, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.4823, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0012151488557348275, |
|
"grad_norm": 1.3532754182815552, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.4471, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0012489029906163505, |
|
"grad_norm": 1.5262309312820435, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 0.5651, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0012826571254978736, |
|
"grad_norm": 1.0819740295410156, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 0.5211, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0013164112603793964, |
|
"grad_norm": 1.3941339254379272, |
|
"learning_rate": 5e-05, |
|
"loss": 0.46, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0013501653952609194, |
|
"grad_norm": 1.6077882051467896, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 0.5456, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0013839195301424425, |
|
"grad_norm": 1.1812937259674072, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 0.4872, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0014176736650239655, |
|
"grad_norm": 1.304977536201477, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.4647, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0014514277999054883, |
|
"grad_norm": 1.7109375, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.5107, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0014851819347870114, |
|
"grad_norm": 1.3880994319915771, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 0.4708, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0015189360696685344, |
|
"grad_norm": 1.2330398559570312, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.4897, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0015526902045500575, |
|
"grad_norm": 1.0758552551269531, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 0.4025, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.0015864443394315803, |
|
"grad_norm": 1.2690551280975342, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 0.5129, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0016201984743131033, |
|
"grad_norm": 1.5134022235870361, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.3607, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0016539526091946264, |
|
"grad_norm": 1.6051034927368164, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 0.5501, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0016877067440761494, |
|
"grad_norm": 4.110562324523926, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 0.7889, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0016877067440761494, |
|
"eval_loss": 0.4801631271839142, |
|
"eval_runtime": 2778.0051, |
|
"eval_samples_per_second": 8.981, |
|
"eval_steps_per_second": 4.491, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0017214608789576722, |
|
"grad_norm": 2.1474967002868652, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.6069, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.0017552150138391953, |
|
"grad_norm": 1.1795744895935059, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 0.473, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.0017889691487207183, |
|
"grad_norm": 1.3834023475646973, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 0.5656, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.0018227232836022414, |
|
"grad_norm": 1.2573689222335815, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.5592, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0018564774184837642, |
|
"grad_norm": 1.0423628091812134, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.537, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.0018902315533652872, |
|
"grad_norm": 1.135725736618042, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 0.4532, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.0019239856882468103, |
|
"grad_norm": 1.1951916217803955, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.5232, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.0019577398231283333, |
|
"grad_norm": 1.4549823999404907, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 0.4819, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0019914939580098563, |
|
"grad_norm": 1.3570860624313354, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.4457, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.0020252480928913794, |
|
"grad_norm": 1.134557843208313, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.4754, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.002059002227772902, |
|
"grad_norm": 1.164859414100647, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 0.4898, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.002092756362654425, |
|
"grad_norm": 1.095713496208191, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 0.5173, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.002126510497535948, |
|
"grad_norm": 1.2956494092941284, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.4378, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.002160264632417471, |
|
"grad_norm": 1.5956240892410278, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 0.4518, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.002194018767298994, |
|
"grad_norm": 1.0486266613006592, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 0.4689, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.002227772902180517, |
|
"grad_norm": 0.9784994721412659, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.4384, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.0022615270370620402, |
|
"grad_norm": 1.2614351511001587, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.3726, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.0022952811719435633, |
|
"grad_norm": 1.102088451385498, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 0.4358, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.002329035306825086, |
|
"grad_norm": 1.3074065446853638, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.3848, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.002362789441706609, |
|
"grad_norm": 1.1175683736801147, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 0.4579, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.002396543576588132, |
|
"grad_norm": 1.3784244060516357, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 0.5005, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.002430297711469655, |
|
"grad_norm": 1.2491179704666138, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.4487, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.002464051846351178, |
|
"grad_norm": 1.6952537298202515, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 0.4381, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.002497805981232701, |
|
"grad_norm": 1.4215257167816162, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 0.4436, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.002531560116114224, |
|
"grad_norm": 1.003847360610962, |
|
"learning_rate": 0.0, |
|
"loss": 0.4054, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.002531560116114224, |
|
"eval_loss": 0.46333596110343933, |
|
"eval_runtime": 2779.4243, |
|
"eval_samples_per_second": 8.976, |
|
"eval_steps_per_second": 4.488, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.0150141079977984e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|