|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.111358574610245, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0014847809948032665, |
|
"grad_norm": 1.4331752061843872, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.2034, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0014847809948032665, |
|
"eval_loss": 2.588484525680542, |
|
"eval_runtime": 141.9969, |
|
"eval_samples_per_second": 4.0, |
|
"eval_steps_per_second": 2.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002969561989606533, |
|
"grad_norm": 4.0620622634887695, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.3831, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.004454342984409799, |
|
"grad_norm": 2.3476243019104004, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4242, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.005939123979213066, |
|
"grad_norm": 2.4534640312194824, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 0.2907, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.007423904974016332, |
|
"grad_norm": 1.7481118440628052, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 0.1797, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.008908685968819599, |
|
"grad_norm": 1.0210967063903809, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 0.1305, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.010393466963622866, |
|
"grad_norm": 0.7026821970939636, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.0784, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.011878247958426132, |
|
"grad_norm": 0.8235223293304443, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 0.1005, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.013363028953229399, |
|
"grad_norm": 0.6765594482421875, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.0628, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.014847809948032665, |
|
"grad_norm": 1.5128905773162842, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 0.0417, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.016332590942835932, |
|
"grad_norm": 2.1222732067108154, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.1114, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.017817371937639197, |
|
"grad_norm": 0.4752410650253296, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.0758, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.019302152932442463, |
|
"grad_norm": 1.4983466863632202, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 0.0888, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.020786933927245732, |
|
"grad_norm": 0.5699716806411743, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 0.0643, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.022271714922048998, |
|
"grad_norm": 0.8198930025100708, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.0654, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.023756495916852263, |
|
"grad_norm": 0.9171881675720215, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 0.054, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.025241276911655532, |
|
"grad_norm": 0.6817424297332764, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 0.0594, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.026726057906458798, |
|
"grad_norm": 0.9507274627685547, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.0351, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.028210838901262063, |
|
"grad_norm": 0.40875244140625, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.0301, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02969561989606533, |
|
"grad_norm": 0.6699226498603821, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 0.076, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.031180400890868598, |
|
"grad_norm": 0.41454505920410156, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.0228, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.032665181885671864, |
|
"grad_norm": 0.40090444684028625, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 0.0329, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.03414996288047513, |
|
"grad_norm": 0.3448033034801483, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.0547, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.035634743875278395, |
|
"grad_norm": 0.5039708614349365, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.0548, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.03711952487008166, |
|
"grad_norm": 0.28690701723098755, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 0.0293, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03711952487008166, |
|
"eval_loss": 0.10564589500427246, |
|
"eval_runtime": 143.498, |
|
"eval_samples_per_second": 3.958, |
|
"eval_steps_per_second": 1.979, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.038604305864884926, |
|
"grad_norm": 0.3717527985572815, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 0.0425, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0400890868596882, |
|
"grad_norm": 0.81565260887146, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.0945, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.041573867854491464, |
|
"grad_norm": 0.2406810075044632, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 0.0164, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.04305864884929473, |
|
"grad_norm": 1.2528947591781616, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.0452, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.044543429844097995, |
|
"grad_norm": 0.3788902163505554, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.031, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04602821083890126, |
|
"grad_norm": 0.4253796339035034, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.0581, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.047512991833704527, |
|
"grad_norm": 0.251658171415329, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 0.012, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.04899777282850779, |
|
"grad_norm": 0.9965800046920776, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.054, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.050482553823311065, |
|
"grad_norm": 0.495941698551178, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 0.0201, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.05196733481811433, |
|
"grad_norm": 0.4241041839122772, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.0288, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.053452115812917596, |
|
"grad_norm": 0.42201074957847595, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.0273, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.05493689680772086, |
|
"grad_norm": 0.8984993100166321, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 0.0647, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.05642167780252413, |
|
"grad_norm": 0.6671286821365356, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 0.0145, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.05790645879732739, |
|
"grad_norm": 0.07203401625156403, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0018, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.05939123979213066, |
|
"grad_norm": 0.045727696269750595, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 0.0012, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06087602078693393, |
|
"grad_norm": 0.19696299731731415, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 0.0032, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.062360801781737196, |
|
"grad_norm": 0.3413621485233307, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.0059, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.06384558277654045, |
|
"grad_norm": 0.09776332229375839, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.0018, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.06533036377134373, |
|
"grad_norm": 0.0692993700504303, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 0.001, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.066815144766147, |
|
"grad_norm": 0.013437100686132908, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.0004, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.06829992576095026, |
|
"grad_norm": 0.9036085605621338, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 0.0024, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.06978470675575353, |
|
"grad_norm": 14.660072326660156, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 0.4013, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.07126948775055679, |
|
"grad_norm": 2.000504493713379, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.0157, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.07275426874536006, |
|
"grad_norm": 0.019345076754689217, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 0.0004, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.07423904974016332, |
|
"grad_norm": 3.370844841003418, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 0.2163, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07423904974016332, |
|
"eval_loss": 0.11560910195112228, |
|
"eval_runtime": 143.5027, |
|
"eval_samples_per_second": 3.958, |
|
"eval_steps_per_second": 1.979, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0757238307349666, |
|
"grad_norm": 2.123660087585449, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.1803, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.07720861172976985, |
|
"grad_norm": 2.3457491397857666, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 0.1052, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.07869339272457312, |
|
"grad_norm": 0.9634860157966614, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 0.1048, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.0801781737193764, |
|
"grad_norm": 1.2893617153167725, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.1053, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.08166295471417966, |
|
"grad_norm": 1.0800879001617432, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.1001, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.08314773570898293, |
|
"grad_norm": 1.4931827783584595, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 0.0806, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.08463251670378619, |
|
"grad_norm": 3.529783248901367, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.1045, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.08611729769858946, |
|
"grad_norm": 2.6875853538513184, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 0.0877, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.08760207869339272, |
|
"grad_norm": 1.4070323705673218, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.0469, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.08908685968819599, |
|
"grad_norm": 0.904192328453064, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.037, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.09057164068299926, |
|
"grad_norm": 1.2252767086029053, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 0.0616, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.09205642167780252, |
|
"grad_norm": 0.8113102912902832, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 0.0507, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.0935412026726058, |
|
"grad_norm": 0.6455811262130737, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.0617, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.09502598366740905, |
|
"grad_norm": 0.48664721846580505, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 0.0352, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.09651076466221233, |
|
"grad_norm": 3.366154909133911, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 0.0314, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.09799554565701558, |
|
"grad_norm": 1.3770588636398315, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.0382, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.09948032665181886, |
|
"grad_norm": 0.6862576007843018, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.0242, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.10096510764662213, |
|
"grad_norm": 0.8646849393844604, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 0.0473, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.10244988864142539, |
|
"grad_norm": 0.4752139151096344, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.0625, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.10393466963622866, |
|
"grad_norm": 1.9038113355636597, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 0.051, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.10541945063103192, |
|
"grad_norm": 0.5322926044464111, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 0.0765, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.10690423162583519, |
|
"grad_norm": 0.49304574728012085, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.0655, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.10838901262063845, |
|
"grad_norm": 0.17058974504470825, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 0.027, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.10987379361544172, |
|
"grad_norm": 0.644688606262207, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 0.0848, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.111358574610245, |
|
"grad_norm": 0.3389902114868164, |
|
"learning_rate": 0.0, |
|
"loss": 0.0288, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.111358574610245, |
|
"eval_loss": 0.11129310727119446, |
|
"eval_runtime": 143.6519, |
|
"eval_samples_per_second": 3.954, |
|
"eval_steps_per_second": 1.977, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.1208807043773235e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|