|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.02635393332454869, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0003513857776606492, |
|
"grad_norm": 0.40566906332969666, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 4.8289, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003513857776606492, |
|
"eval_loss": 1.173790454864502, |
|
"eval_runtime": 610.791, |
|
"eval_samples_per_second": 3.924, |
|
"eval_steps_per_second": 1.963, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007027715553212984, |
|
"grad_norm": 0.5825751423835754, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 10.7893, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0010541573329819476, |
|
"grad_norm": 0.4203815460205078, |
|
"learning_rate": 0.0001, |
|
"loss": 5.0236, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0014055431106425968, |
|
"grad_norm": 0.5486876964569092, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 5.2274, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.001756928888303246, |
|
"grad_norm": 0.49067986011505127, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 8.6525, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.002108314665963895, |
|
"grad_norm": 0.5652139782905579, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 10.0269, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.002459700443624544, |
|
"grad_norm": 0.6335968375205994, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 6.0283, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0028110862212851937, |
|
"grad_norm": 0.6399157047271729, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 5.6921, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0031624719989458427, |
|
"grad_norm": 0.7017378807067871, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 6.5049, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.003513857776606492, |
|
"grad_norm": 0.6648010015487671, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 6.5068, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.003865243554267141, |
|
"grad_norm": 0.7622893452644348, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 6.1024, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.00421662933192779, |
|
"grad_norm": 0.8102121353149414, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 6.9054, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00456801510958844, |
|
"grad_norm": 1.0450242757797241, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 8.0875, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.004919400887249088, |
|
"grad_norm": 1.1724853515625, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 6.9638, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.005270786664909738, |
|
"grad_norm": 1.17697012424469, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 6.5686, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.005622172442570387, |
|
"grad_norm": 0.9172914624214172, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 6.4815, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.005973558220231036, |
|
"grad_norm": 1.8105238676071167, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 7.5522, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0063249439978916855, |
|
"grad_norm": 1.1030470132827759, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 7.9177, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.006676329775552334, |
|
"grad_norm": 1.0749439001083374, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 8.2476, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.007027715553212984, |
|
"grad_norm": 0.988822877407074, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 8.0408, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.007379101330873633, |
|
"grad_norm": 1.0468634366989136, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 7.7576, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.007730487108534282, |
|
"grad_norm": 1.0833945274353027, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 7.7106, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.008081872886194932, |
|
"grad_norm": 1.1043288707733154, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 7.796, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.00843325866385558, |
|
"grad_norm": 1.253221869468689, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 7.2421, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.00878464444151623, |
|
"grad_norm": 1.5644135475158691, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 6.957, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.00878464444151623, |
|
"eval_loss": 1.055617094039917, |
|
"eval_runtime": 614.7811, |
|
"eval_samples_per_second": 3.899, |
|
"eval_steps_per_second": 1.95, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.00913603021917688, |
|
"grad_norm": 1.6291801929473877, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 7.8515, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.009487415996837528, |
|
"grad_norm": 1.4865143299102783, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 6.6876, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.009838801774498177, |
|
"grad_norm": 1.633550763130188, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 9.7649, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.010190187552158827, |
|
"grad_norm": 1.6559158563613892, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 7.7611, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.010541573329819476, |
|
"grad_norm": 1.56125807762146, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 8.1391, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.010892959107480124, |
|
"grad_norm": 1.4842593669891357, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 7.1128, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.011244344885140775, |
|
"grad_norm": 1.9677858352661133, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 8.2545, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.011595730662801423, |
|
"grad_norm": 2.1278786659240723, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 8.5091, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.011947116440462072, |
|
"grad_norm": 1.7648427486419678, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 6.5063, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.01229850221812272, |
|
"grad_norm": 1.6439030170440674, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 8.567, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.012649887995783371, |
|
"grad_norm": 1.884887456893921, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 8.3356, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.01300127377344402, |
|
"grad_norm": 1.6573904752731323, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 7.9903, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.013352659551104668, |
|
"grad_norm": 1.8767001628875732, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 7.5882, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.013704045328765319, |
|
"grad_norm": 2.158738374710083, |
|
"learning_rate": 5e-05, |
|
"loss": 9.144, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.014055431106425967, |
|
"grad_norm": 2.622767448425293, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 9.0718, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.014406816884086616, |
|
"grad_norm": 2.098249912261963, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 7.5903, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.014758202661747266, |
|
"grad_norm": 2.695885419845581, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 9.2641, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.015109588439407915, |
|
"grad_norm": 2.7287509441375732, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 9.99, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.015460974217068563, |
|
"grad_norm": 2.987199068069458, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 9.1632, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.015812359994729214, |
|
"grad_norm": 3.6520209312438965, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 10.3929, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.016163745772389864, |
|
"grad_norm": 3.4548537731170654, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 10.003, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.01651513155005051, |
|
"grad_norm": 5.150744915008545, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 11.3511, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.01686651732771116, |
|
"grad_norm": 7.644892692565918, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 10.2625, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.01721790310537181, |
|
"grad_norm": 9.981356620788574, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 11.0407, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.01756928888303246, |
|
"grad_norm": 21.54275131225586, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 13.8822, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01756928888303246, |
|
"eval_loss": 1.019070029258728, |
|
"eval_runtime": 614.5764, |
|
"eval_samples_per_second": 3.9, |
|
"eval_steps_per_second": 1.951, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01792067466069311, |
|
"grad_norm": 1.2253239154815674, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 5.3973, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.01827206043835376, |
|
"grad_norm": 0.9925060868263245, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 5.1567, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.018623446216014406, |
|
"grad_norm": 1.3718881607055664, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 7.931, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.018974831993675056, |
|
"grad_norm": 1.1585586071014404, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 9.9163, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.019326217771335707, |
|
"grad_norm": 1.292351245880127, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 14.2114, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.019677603548996354, |
|
"grad_norm": 0.7819482088088989, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 5.4162, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.020028989326657004, |
|
"grad_norm": 0.9459342956542969, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 8.4571, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.020380375104317654, |
|
"grad_norm": 0.8447139859199524, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 4.3193, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0207317608819783, |
|
"grad_norm": 0.9083331823348999, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 5.212, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.02108314665963895, |
|
"grad_norm": 0.9303866028785706, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 5.864, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.021434532437299602, |
|
"grad_norm": 0.9800831079483032, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 5.7093, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.02178591821496025, |
|
"grad_norm": 1.0370510816574097, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 7.2464, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.0221373039926209, |
|
"grad_norm": 1.0886764526367188, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 6.793, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.02248868977028155, |
|
"grad_norm": 1.2717602252960205, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 7.8938, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.022840075547942196, |
|
"grad_norm": 1.13100004196167, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 6.6839, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.023191461325602847, |
|
"grad_norm": 1.103505253791809, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 7.4892, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.023542847103263497, |
|
"grad_norm": 1.0577113628387451, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 6.9858, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.023894232880924144, |
|
"grad_norm": 1.1837400197982788, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 7.707, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.024245618658584794, |
|
"grad_norm": 1.2462010383605957, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 8.0216, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.02459700443624544, |
|
"grad_norm": 1.1442655324935913, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 7.1856, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.02494839021390609, |
|
"grad_norm": 1.1962558031082153, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 7.0156, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.025299775991566742, |
|
"grad_norm": 1.2656418085098267, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 7.6485, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.02565116176922739, |
|
"grad_norm": 1.0747231245040894, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 6.3779, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.02600254754688804, |
|
"grad_norm": 1.213733196258545, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 8.1561, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.02635393332454869, |
|
"grad_norm": 1.571469783782959, |
|
"learning_rate": 0.0, |
|
"loss": 8.2613, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.02635393332454869, |
|
"eval_loss": 1.00751793384552, |
|
"eval_runtime": 614.6938, |
|
"eval_samples_per_second": 3.9, |
|
"eval_steps_per_second": 1.951, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.2197502476288e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|