GaetanMichelet's picture
Model save
c41972f verified
{
"best_metric": 0.47327545285224915,
"best_model_checkpoint": "data/Llama-31-8B_task-3_60-samples_config-4/checkpoint-117",
"epoch": 48.0,
"eval_steps": 500,
"global_step": 138,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.34782608695652173,
"grad_norm": 2.713942527770996,
"learning_rate": 3.3333333333333335e-07,
"loss": 2.4937,
"step": 1
},
{
"epoch": 0.6956521739130435,
"grad_norm": 2.5404553413391113,
"learning_rate": 6.666666666666667e-07,
"loss": 2.4485,
"step": 2
},
{
"epoch": 0.6956521739130435,
"eval_loss": 2.47861909866333,
"eval_runtime": 11.1868,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 2
},
{
"epoch": 1.391304347826087,
"grad_norm": 2.537482976913452,
"learning_rate": 1.3333333333333334e-06,
"loss": 2.4413,
"step": 4
},
{
"epoch": 1.7391304347826086,
"eval_loss": 2.4666788578033447,
"eval_runtime": 11.1876,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 5
},
{
"epoch": 2.0869565217391304,
"grad_norm": 2.515000820159912,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.4642,
"step": 6
},
{
"epoch": 2.782608695652174,
"grad_norm": 2.7089920043945312,
"learning_rate": 2.666666666666667e-06,
"loss": 2.6263,
"step": 8
},
{
"epoch": 2.782608695652174,
"eval_loss": 2.444336175918579,
"eval_runtime": 11.1858,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 8
},
{
"epoch": 3.4782608695652173,
"grad_norm": 2.3491318225860596,
"learning_rate": 3.3333333333333333e-06,
"loss": 2.1438,
"step": 10
},
{
"epoch": 3.8260869565217392,
"eval_loss": 2.41117262840271,
"eval_runtime": 11.1873,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 11
},
{
"epoch": 4.173913043478261,
"grad_norm": 2.7485859394073486,
"learning_rate": 4.000000000000001e-06,
"loss": 2.5153,
"step": 12
},
{
"epoch": 4.869565217391305,
"grad_norm": 2.9895129203796387,
"learning_rate": 4.666666666666667e-06,
"loss": 2.3995,
"step": 14
},
{
"epoch": 4.869565217391305,
"eval_loss": 2.3653676509857178,
"eval_runtime": 11.1932,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 14
},
{
"epoch": 5.565217391304348,
"grad_norm": 2.5411155223846436,
"learning_rate": 5.333333333333334e-06,
"loss": 2.2475,
"step": 16
},
{
"epoch": 5.913043478260869,
"eval_loss": 2.3020589351654053,
"eval_runtime": 11.1867,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 17
},
{
"epoch": 6.260869565217392,
"grad_norm": 2.7964119911193848,
"learning_rate": 6e-06,
"loss": 2.3758,
"step": 18
},
{
"epoch": 6.956521739130435,
"grad_norm": 2.7497153282165527,
"learning_rate": 6.666666666666667e-06,
"loss": 2.2194,
"step": 20
},
{
"epoch": 6.956521739130435,
"eval_loss": 2.2091970443725586,
"eval_runtime": 11.1867,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 20
},
{
"epoch": 7.6521739130434785,
"grad_norm": 2.986889362335205,
"learning_rate": 7.333333333333333e-06,
"loss": 2.2976,
"step": 22
},
{
"epoch": 8.0,
"eval_loss": 2.0988080501556396,
"eval_runtime": 11.191,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 23
},
{
"epoch": 8.347826086956522,
"grad_norm": 2.966017961502075,
"learning_rate": 8.000000000000001e-06,
"loss": 2.0386,
"step": 24
},
{
"epoch": 8.695652173913043,
"eval_loss": 2.0089728832244873,
"eval_runtime": 11.1873,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 25
},
{
"epoch": 9.043478260869565,
"grad_norm": 2.240626335144043,
"learning_rate": 8.666666666666668e-06,
"loss": 1.9381,
"step": 26
},
{
"epoch": 9.73913043478261,
"grad_norm": 2.0199873447418213,
"learning_rate": 9.333333333333334e-06,
"loss": 1.8757,
"step": 28
},
{
"epoch": 9.73913043478261,
"eval_loss": 1.8477638959884644,
"eval_runtime": 11.1884,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 28
},
{
"epoch": 10.434782608695652,
"grad_norm": 2.3196067810058594,
"learning_rate": 1e-05,
"loss": 1.753,
"step": 30
},
{
"epoch": 10.782608695652174,
"eval_loss": 1.661726951599121,
"eval_runtime": 11.1905,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 31
},
{
"epoch": 11.130434782608695,
"grad_norm": 2.170588970184326,
"learning_rate": 9.99864620589731e-06,
"loss": 1.6899,
"step": 32
},
{
"epoch": 11.826086956521738,
"grad_norm": 1.8977363109588623,
"learning_rate": 9.994585556692624e-06,
"loss": 1.5394,
"step": 34
},
{
"epoch": 11.826086956521738,
"eval_loss": 1.4736323356628418,
"eval_runtime": 11.1874,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 34
},
{
"epoch": 12.521739130434783,
"grad_norm": 2.0006208419799805,
"learning_rate": 9.987820251299121e-06,
"loss": 1.4055,
"step": 36
},
{
"epoch": 12.869565217391305,
"eval_loss": 1.2967798709869385,
"eval_runtime": 11.1868,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 37
},
{
"epoch": 13.217391304347826,
"grad_norm": 2.0926783084869385,
"learning_rate": 9.978353953249023e-06,
"loss": 1.3167,
"step": 38
},
{
"epoch": 13.91304347826087,
"grad_norm": 2.1667168140411377,
"learning_rate": 9.966191788709716e-06,
"loss": 1.1544,
"step": 40
},
{
"epoch": 13.91304347826087,
"eval_loss": 1.1374319791793823,
"eval_runtime": 11.1869,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 40
},
{
"epoch": 14.608695652173914,
"grad_norm": 1.5720164775848389,
"learning_rate": 9.951340343707852e-06,
"loss": 1.0965,
"step": 42
},
{
"epoch": 14.956521739130435,
"eval_loss": 0.9952384829521179,
"eval_runtime": 11.1859,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 43
},
{
"epoch": 15.304347826086957,
"grad_norm": 1.4824587106704712,
"learning_rate": 9.933807660562898e-06,
"loss": 0.9479,
"step": 44
},
{
"epoch": 16.0,
"grad_norm": 1.6556437015533447,
"learning_rate": 9.913603233532067e-06,
"loss": 0.8824,
"step": 46
},
{
"epoch": 16.0,
"eval_loss": 0.8632068634033203,
"eval_runtime": 11.1862,
"eval_samples_per_second": 1.073,
"eval_steps_per_second": 1.073,
"step": 46
},
{
"epoch": 16.695652173913043,
"grad_norm": 1.2123420238494873,
"learning_rate": 9.890738003669029e-06,
"loss": 0.8412,
"step": 48
},
{
"epoch": 16.695652173913043,
"eval_loss": 0.7849404811859131,
"eval_runtime": 11.1943,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 48
},
{
"epoch": 17.391304347826086,
"grad_norm": 1.1566580533981323,
"learning_rate": 9.86522435289912e-06,
"loss": 0.7232,
"step": 50
},
{
"epoch": 17.73913043478261,
"eval_loss": 0.7002378106117249,
"eval_runtime": 11.1971,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 51
},
{
"epoch": 18.08695652173913,
"grad_norm": 1.167207956314087,
"learning_rate": 9.83707609731432e-06,
"loss": 0.5363,
"step": 52
},
{
"epoch": 18.782608695652176,
"grad_norm": 1.0615767240524292,
"learning_rate": 9.806308479691595e-06,
"loss": 0.6275,
"step": 54
},
{
"epoch": 18.782608695652176,
"eval_loss": 0.6459404826164246,
"eval_runtime": 11.191,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 54
},
{
"epoch": 19.47826086956522,
"grad_norm": 0.6826199889183044,
"learning_rate": 9.77293816123866e-06,
"loss": 0.6078,
"step": 56
},
{
"epoch": 19.82608695652174,
"eval_loss": 0.6101614832878113,
"eval_runtime": 11.1924,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 57
},
{
"epoch": 20.17391304347826,
"grad_norm": 0.8720101118087769,
"learning_rate": 9.736983212571646e-06,
"loss": 0.6167,
"step": 58
},
{
"epoch": 20.869565217391305,
"grad_norm": 0.6700724959373474,
"learning_rate": 9.698463103929542e-06,
"loss": 0.5477,
"step": 60
},
{
"epoch": 20.869565217391305,
"eval_loss": 0.5827677845954895,
"eval_runtime": 11.192,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 60
},
{
"epoch": 21.565217391304348,
"grad_norm": 0.6034025549888611,
"learning_rate": 9.657398694630713e-06,
"loss": 0.4369,
"step": 62
},
{
"epoch": 21.91304347826087,
"eval_loss": 0.5586910843849182,
"eval_runtime": 11.2081,
"eval_samples_per_second": 1.071,
"eval_steps_per_second": 1.071,
"step": 63
},
{
"epoch": 22.26086956521739,
"grad_norm": 0.627003014087677,
"learning_rate": 9.613812221777212e-06,
"loss": 0.5656,
"step": 64
},
{
"epoch": 22.956521739130434,
"grad_norm": 0.5275210738182068,
"learning_rate": 9.567727288213005e-06,
"loss": 0.5025,
"step": 66
},
{
"epoch": 22.956521739130434,
"eval_loss": 0.5395612716674805,
"eval_runtime": 11.1941,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 66
},
{
"epoch": 23.652173913043477,
"grad_norm": 0.600711464881897,
"learning_rate": 9.519168849742603e-06,
"loss": 0.5043,
"step": 68
},
{
"epoch": 24.0,
"eval_loss": 0.5226072072982788,
"eval_runtime": 11.193,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 69
},
{
"epoch": 24.347826086956523,
"grad_norm": 0.4522433876991272,
"learning_rate": 9.468163201617063e-06,
"loss": 0.3742,
"step": 70
},
{
"epoch": 24.695652173913043,
"eval_loss": 0.5101320743560791,
"eval_runtime": 11.1941,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 71
},
{
"epoch": 25.043478260869566,
"grad_norm": 0.5010709166526794,
"learning_rate": 9.414737964294636e-06,
"loss": 0.5107,
"step": 72
},
{
"epoch": 25.73913043478261,
"grad_norm": 0.4562795162200928,
"learning_rate": 9.358922068483813e-06,
"loss": 0.449,
"step": 74
},
{
"epoch": 25.73913043478261,
"eval_loss": 0.5002694725990295,
"eval_runtime": 11.1989,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 74
},
{
"epoch": 26.434782608695652,
"grad_norm": 0.38633814454078674,
"learning_rate": 9.30074573947683e-06,
"loss": 0.3276,
"step": 76
},
{
"epoch": 26.782608695652176,
"eval_loss": 0.4924570322036743,
"eval_runtime": 11.1968,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 77
},
{
"epoch": 27.130434782608695,
"grad_norm": 0.46364274621009827,
"learning_rate": 9.24024048078213e-06,
"loss": 0.5073,
"step": 78
},
{
"epoch": 27.82608695652174,
"grad_norm": 0.4898723363876343,
"learning_rate": 9.177439057064684e-06,
"loss": 0.4754,
"step": 80
},
{
"epoch": 27.82608695652174,
"eval_loss": 0.493239164352417,
"eval_runtime": 11.1957,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 80
},
{
"epoch": 28.52173913043478,
"grad_norm": 0.5032973885536194,
"learning_rate": 9.112375476403313e-06,
"loss": 0.3724,
"step": 82
},
{
"epoch": 28.869565217391305,
"eval_loss": 0.48757660388946533,
"eval_runtime": 11.1963,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 83
},
{
"epoch": 29.217391304347824,
"grad_norm": 0.2735467553138733,
"learning_rate": 9.045084971874738e-06,
"loss": 0.338,
"step": 84
},
{
"epoch": 29.91304347826087,
"grad_norm": 0.4709406793117523,
"learning_rate": 8.97560398247424e-06,
"loss": 0.4679,
"step": 86
},
{
"epoch": 29.91304347826087,
"eval_loss": 0.48609817028045654,
"eval_runtime": 11.1994,
"eval_samples_per_second": 1.071,
"eval_steps_per_second": 1.071,
"step": 86
},
{
"epoch": 30.608695652173914,
"grad_norm": 0.25528159737586975,
"learning_rate": 8.903970133383297e-06,
"loss": 0.3245,
"step": 88
},
{
"epoch": 30.956521739130434,
"eval_loss": 0.4883822202682495,
"eval_runtime": 11.1995,
"eval_samples_per_second": 1.071,
"eval_steps_per_second": 1.071,
"step": 89
},
{
"epoch": 31.304347826086957,
"grad_norm": 0.36503228545188904,
"learning_rate": 8.83022221559489e-06,
"loss": 0.4829,
"step": 90
},
{
"epoch": 32.0,
"grad_norm": 0.3194330930709839,
"learning_rate": 8.754400164907496e-06,
"loss": 0.3613,
"step": 92
},
{
"epoch": 32.0,
"eval_loss": 0.4921656847000122,
"eval_runtime": 11.193,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 92
},
{
"epoch": 32.69565217391305,
"grad_norm": 0.24091415107250214,
"learning_rate": 8.676545040299145e-06,
"loss": 0.3511,
"step": 94
},
{
"epoch": 32.69565217391305,
"eval_loss": 0.48988497257232666,
"eval_runtime": 11.1916,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 94
},
{
"epoch": 33.391304347826086,
"grad_norm": 0.32847926020622253,
"learning_rate": 8.596699001693257e-06,
"loss": 0.5275,
"step": 96
},
{
"epoch": 33.73913043478261,
"eval_loss": 0.4931323528289795,
"eval_runtime": 11.1917,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 97
},
{
"epoch": 34.08695652173913,
"grad_norm": 0.229437455534935,
"learning_rate": 8.51490528712831e-06,
"loss": 0.2909,
"step": 98
},
{
"epoch": 34.78260869565217,
"grad_norm": 0.2619946002960205,
"learning_rate": 8.43120818934367e-06,
"loss": 0.3403,
"step": 100
},
{
"epoch": 34.78260869565217,
"eval_loss": 0.488254576921463,
"eval_runtime": 11.2025,
"eval_samples_per_second": 1.071,
"eval_steps_per_second": 1.071,
"step": 100
},
{
"epoch": 35.47826086956522,
"grad_norm": 0.2497745305299759,
"learning_rate": 8.345653031794292e-06,
"loss": 0.4209,
"step": 102
},
{
"epoch": 35.82608695652174,
"eval_loss": 0.4814887046813965,
"eval_runtime": 11.195,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 103
},
{
"epoch": 36.17391304347826,
"grad_norm": 0.2717205882072449,
"learning_rate": 8.258286144107277e-06,
"loss": 0.3889,
"step": 104
},
{
"epoch": 36.869565217391305,
"grad_norm": 0.2658683955669403,
"learning_rate": 8.16915483699355e-06,
"loss": 0.3543,
"step": 106
},
{
"epoch": 36.869565217391305,
"eval_loss": 0.48049914836883545,
"eval_runtime": 11.1957,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 106
},
{
"epoch": 37.56521739130435,
"grad_norm": 0.24305017292499542,
"learning_rate": 8.078307376628292e-06,
"loss": 0.4115,
"step": 108
},
{
"epoch": 37.91304347826087,
"eval_loss": 0.47674474120140076,
"eval_runtime": 11.1929,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 109
},
{
"epoch": 38.26086956521739,
"grad_norm": 0.27400490641593933,
"learning_rate": 7.985792958513932e-06,
"loss": 0.3339,
"step": 110
},
{
"epoch": 38.95652173913044,
"grad_norm": 0.26753950119018555,
"learning_rate": 7.891661680839932e-06,
"loss": 0.3902,
"step": 112
},
{
"epoch": 38.95652173913044,
"eval_loss": 0.47939857840538025,
"eval_runtime": 11.1926,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 112
},
{
"epoch": 39.65217391304348,
"grad_norm": 0.25980764627456665,
"learning_rate": 7.795964517353734e-06,
"loss": 0.3735,
"step": 114
},
{
"epoch": 40.0,
"eval_loss": 0.4776058495044708,
"eval_runtime": 11.1942,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 115
},
{
"epoch": 40.34782608695652,
"grad_norm": 0.2467641979455948,
"learning_rate": 7.698753289757565e-06,
"loss": 0.3227,
"step": 116
},
{
"epoch": 40.69565217391305,
"eval_loss": 0.47327545285224915,
"eval_runtime": 11.1958,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 117
},
{
"epoch": 41.04347826086956,
"grad_norm": 0.28041788935661316,
"learning_rate": 7.600080639646077e-06,
"loss": 0.385,
"step": 118
},
{
"epoch": 41.73913043478261,
"grad_norm": 0.21985159814357758,
"learning_rate": 7.500000000000001e-06,
"loss": 0.2983,
"step": 120
},
{
"epoch": 41.73913043478261,
"eval_loss": 0.47965100407600403,
"eval_runtime": 11.1988,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 120
},
{
"epoch": 42.43478260869565,
"grad_norm": 0.30970826745033264,
"learning_rate": 7.398565566251232e-06,
"loss": 0.4421,
"step": 122
},
{
"epoch": 42.78260869565217,
"eval_loss": 0.47911468148231506,
"eval_runtime": 11.2003,
"eval_samples_per_second": 1.071,
"eval_steps_per_second": 1.071,
"step": 123
},
{
"epoch": 43.130434782608695,
"grad_norm": 0.320541650056839,
"learning_rate": 7.295832266935059e-06,
"loss": 0.3315,
"step": 124
},
{
"epoch": 43.82608695652174,
"grad_norm": 0.3472091853618622,
"learning_rate": 7.191855733945388e-06,
"loss": 0.3819,
"step": 126
},
{
"epoch": 43.82608695652174,
"eval_loss": 0.47386252880096436,
"eval_runtime": 11.1952,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 126
},
{
"epoch": 44.52173913043478,
"grad_norm": 0.24557389318943024,
"learning_rate": 7.08669227240909e-06,
"loss": 0.2965,
"step": 128
},
{
"epoch": 44.869565217391305,
"eval_loss": 0.4763611853122711,
"eval_runtime": 11.1934,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 129
},
{
"epoch": 45.21739130434783,
"grad_norm": 0.39236781001091003,
"learning_rate": 6.980398830195785e-06,
"loss": 0.3882,
"step": 130
},
{
"epoch": 45.91304347826087,
"grad_norm": 0.2221372425556183,
"learning_rate": 6.873032967079562e-06,
"loss": 0.2661,
"step": 132
},
{
"epoch": 45.91304347826087,
"eval_loss": 0.4764821529388428,
"eval_runtime": 11.1966,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 132
},
{
"epoch": 46.608695652173914,
"grad_norm": 0.30951645970344543,
"learning_rate": 6.7646528235693445e-06,
"loss": 0.3827,
"step": 134
},
{
"epoch": 46.95652173913044,
"eval_loss": 0.4778183698654175,
"eval_runtime": 11.193,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 135
},
{
"epoch": 47.30434782608695,
"grad_norm": 0.3314763903617859,
"learning_rate": 6.655317089424791e-06,
"loss": 0.351,
"step": 136
},
{
"epoch": 48.0,
"grad_norm": 0.30320581793785095,
"learning_rate": 6.545084971874738e-06,
"loss": 0.3144,
"step": 138
},
{
"epoch": 48.0,
"eval_loss": 0.47970080375671387,
"eval_runtime": 11.1919,
"eval_samples_per_second": 1.072,
"eval_steps_per_second": 1.072,
"step": 138
},
{
"epoch": 48.0,
"step": 138,
"total_flos": 1.749470205265838e+17,
"train_loss": 0.9229606502297996,
"train_runtime": 6469.2828,
"train_samples_per_second": 1.067,
"train_steps_per_second": 0.046
}
],
"logging_steps": 2,
"max_steps": 300,
"num_input_tokens_seen": 0,
"num_train_epochs": 150,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.749470205265838e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}