|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.06863417982155114, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0009151223976206818, |
|
"grad_norm": 53.509986877441406, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 3.9382, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009151223976206818, |
|
"eval_loss": 5.209521770477295, |
|
"eval_runtime": 269.0963, |
|
"eval_samples_per_second": 3.423, |
|
"eval_steps_per_second": 1.713, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0018302447952413636, |
|
"grad_norm": 59.40241241455078, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 3.5577, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.002745367192862045, |
|
"grad_norm": 34.23931884765625, |
|
"learning_rate": 0.0001, |
|
"loss": 1.6193, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0036604895904827272, |
|
"grad_norm": 22.23225975036621, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 0.8745, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.004575611988103409, |
|
"grad_norm": 11.005436897277832, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 0.666, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00549073438572409, |
|
"grad_norm": 6.069633483886719, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 0.4651, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.006405856783344772, |
|
"grad_norm": 6.48963737487793, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.4599, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0073209791809654545, |
|
"grad_norm": 5.265394687652588, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 0.5091, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.008236101578586136, |
|
"grad_norm": 4.875738143920898, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.5463, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.009151223976206819, |
|
"grad_norm": 4.376837253570557, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 0.5592, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.010066346373827499, |
|
"grad_norm": 3.9858877658843994, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.4601, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01098146877144818, |
|
"grad_norm": 4.524520397186279, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.4564, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.011896591169068863, |
|
"grad_norm": 4.058590888977051, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 0.3405, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.012811713566689545, |
|
"grad_norm": 6.065700531005859, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 0.4246, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.013726835964310227, |
|
"grad_norm": 3.748472213745117, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.2962, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.014641958361930909, |
|
"grad_norm": 6.658496856689453, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 0.3704, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.01555708075955159, |
|
"grad_norm": 5.924066066741943, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 0.4376, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.016472203157172273, |
|
"grad_norm": 0.02715293876826763, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.0003, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.017387325554792953, |
|
"grad_norm": 0.5976182818412781, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.02, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.018302447952413637, |
|
"grad_norm": 0.17597460746765137, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 0.0018, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.019217570350034317, |
|
"grad_norm": 4.291876792907715, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.1806, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.020132692747654998, |
|
"grad_norm": 3.9051401615142822, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 0.157, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.02104781514527568, |
|
"grad_norm": 2.4507651329040527, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.0579, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.02196293754289636, |
|
"grad_norm": 2.8427188396453857, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.0496, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.022878059940517045, |
|
"grad_norm": 1.8807505369186401, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 0.0158, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.022878059940517045, |
|
"eval_loss": 0.29860904812812805, |
|
"eval_runtime": 271.9036, |
|
"eval_samples_per_second": 3.387, |
|
"eval_steps_per_second": 1.695, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.023793182338137726, |
|
"grad_norm": 0.3018530309200287, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 0.0027, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.024708304735758406, |
|
"grad_norm": 1.2422139644622803, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.0072, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.02562342713337909, |
|
"grad_norm": 0.9640685319900513, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 0.009, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.02653854953099977, |
|
"grad_norm": 4.360398769378662, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.1776, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.027453671928620454, |
|
"grad_norm": 309.7273254394531, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.4983, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.028368794326241134, |
|
"grad_norm": 3.7143096923828125, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.0492, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.029283916723861818, |
|
"grad_norm": 0.2380424439907074, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 0.001, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.030199039121482498, |
|
"grad_norm": 0.11934609711170197, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.0004, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.03111416151910318, |
|
"grad_norm": 0.007320514414459467, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 0.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.03202928391672386, |
|
"grad_norm": 0.002803817857056856, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.032944406314344546, |
|
"grad_norm": 0.0005631367093883455, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.03385952871196522, |
|
"grad_norm": 0.02052667923271656, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 0.0001, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.034774651109585906, |
|
"grad_norm": 0.0067352899350225925, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 0.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.03568977350720659, |
|
"grad_norm": 0.0002736738242674619, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.036604895904827274, |
|
"grad_norm": 0.00117769546341151, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03752001830244795, |
|
"grad_norm": 0.001372729311697185, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 0.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.038435140700068635, |
|
"grad_norm": 0.00026712805265560746, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03935026309768932, |
|
"grad_norm": 0.00031787759508006275, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.040265385495309995, |
|
"grad_norm": 0.000843716028612107, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.04118050789293068, |
|
"grad_norm": 0.0002576544648036361, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04209563029055136, |
|
"grad_norm": 0.13201799988746643, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 0.0004, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.043010752688172046, |
|
"grad_norm": 0.0014015285996720195, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 0.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.04392587508579272, |
|
"grad_norm": 0.0066895028576254845, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.0001, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.04484099748341341, |
|
"grad_norm": 0.0023771661799401045, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 0.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.04575611988103409, |
|
"grad_norm": 0.8348031640052795, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 0.0032, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04575611988103409, |
|
"eval_loss": 0.19106687605381012, |
|
"eval_runtime": 271.369, |
|
"eval_samples_per_second": 3.394, |
|
"eval_steps_per_second": 1.699, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04667124227865477, |
|
"grad_norm": 10.542644500732422, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.5443, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.04758636467627545, |
|
"grad_norm": 5.551739692687988, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 0.2044, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.048501487073896135, |
|
"grad_norm": 7.962470054626465, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 0.3918, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.04941660947151681, |
|
"grad_norm": 9.332234382629395, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.6089, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.050331731869137496, |
|
"grad_norm": 7.265214443206787, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.4531, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.05124685426675818, |
|
"grad_norm": 7.815890789031982, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 0.3726, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.05216197666437886, |
|
"grad_norm": 7.1828765869140625, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.3523, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.05307709906199954, |
|
"grad_norm": 8.059473037719727, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 0.4862, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.053992221459620224, |
|
"grad_norm": 6.242551326751709, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.4128, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.05490734385724091, |
|
"grad_norm": 4.751944065093994, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.3559, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.055822466254861584, |
|
"grad_norm": 3.4567954540252686, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 0.2912, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.05673758865248227, |
|
"grad_norm": 5.8354692459106445, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 0.4136, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.05765271105010295, |
|
"grad_norm": 5.292219638824463, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.3484, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.058567833447723636, |
|
"grad_norm": 5.4359846115112305, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 0.4286, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.05948295584534431, |
|
"grad_norm": 3.9820847511291504, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 0.2672, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.060398078242964996, |
|
"grad_norm": 5.325780868530273, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.4854, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.06131320064058568, |
|
"grad_norm": 2.7474725246429443, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.1493, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.06222832303820636, |
|
"grad_norm": 0.031556036323308945, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 0.0002, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.06314344543582705, |
|
"grad_norm": 1.6410260200500488, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.0273, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.06405856783344772, |
|
"grad_norm": 0.6467976570129395, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 0.0064, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0649736902310684, |
|
"grad_norm": 0.680183470249176, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 0.0023, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.06588881262868909, |
|
"grad_norm": 3.648841619491577, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.0219, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.06680393502630977, |
|
"grad_norm": 3.311021089553833, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 0.0454, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.06771905742393045, |
|
"grad_norm": 0.4128534495830536, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 0.002, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.06863417982155114, |
|
"grad_norm": 0.006412339396774769, |
|
"learning_rate": 0.0, |
|
"loss": 0.0001, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.06863417982155114, |
|
"eval_loss": 0.11607871204614639, |
|
"eval_runtime": 271.5587, |
|
"eval_samples_per_second": 3.392, |
|
"eval_steps_per_second": 1.698, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.157714927419392e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|