|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 385, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 198.0, |
|
"learning_rate": 1.282051282051282e-07, |
|
"logits/chosen": -2.7358343601226807, |
|
"logits/rejected": -2.7480404376983643, |
|
"logps/chosen": -27.35565757751465, |
|
"logps/rejected": -21.06114387512207, |
|
"loss": 1.5625, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 306.0, |
|
"learning_rate": 1.282051282051282e-06, |
|
"logits/chosen": -3.009331703186035, |
|
"logits/rejected": -2.9980063438415527, |
|
"logps/chosen": -33.193904876708984, |
|
"logps/rejected": -32.01276779174805, |
|
"loss": 1.5179, |
|
"rewards/accuracies": 0.5416666865348816, |
|
"rewards/chosen": -0.005185974761843681, |
|
"rewards/margins": 0.01435802225023508, |
|
"rewards/rejected": -0.019543997943401337, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 268.0, |
|
"learning_rate": 2.564102564102564e-06, |
|
"logits/chosen": -2.8995871543884277, |
|
"logits/rejected": -2.894530773162842, |
|
"logps/chosen": -32.46534729003906, |
|
"logps/rejected": -28.974437713623047, |
|
"loss": 1.6084, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.0038995600771158934, |
|
"rewards/margins": 0.0009288033470511436, |
|
"rewards/rejected": 0.002970754634588957, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 221.0, |
|
"learning_rate": 3.846153846153847e-06, |
|
"logits/chosen": -3.097817897796631, |
|
"logits/rejected": -3.1089720726013184, |
|
"logps/chosen": -32.821983337402344, |
|
"logps/rejected": -30.1259708404541, |
|
"loss": 1.6124, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": 0.0346810482442379, |
|
"rewards/margins": 0.006648649461567402, |
|
"rewards/rejected": 0.028032397851347923, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 225.0, |
|
"learning_rate": 4.999896948438434e-06, |
|
"logits/chosen": -2.8635919094085693, |
|
"logits/rejected": -2.8542821407318115, |
|
"logps/chosen": -31.561412811279297, |
|
"logps/rejected": -32.368797302246094, |
|
"loss": 1.5659, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.10756368935108185, |
|
"rewards/margins": 0.11209961026906967, |
|
"rewards/rejected": -0.004535915795713663, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 268.0, |
|
"learning_rate": 4.987541037542187e-06, |
|
"logits/chosen": -2.886489152908325, |
|
"logits/rejected": -2.884337902069092, |
|
"logps/chosen": -29.50502586364746, |
|
"logps/rejected": -30.12764549255371, |
|
"loss": 1.6791, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.0940544530749321, |
|
"rewards/margins": 0.10953446477651596, |
|
"rewards/rejected": -0.015479998663067818, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 217.0, |
|
"learning_rate": 4.954691471941119e-06, |
|
"logits/chosen": -2.9178414344787598, |
|
"logits/rejected": -2.9194931983947754, |
|
"logps/chosen": -30.02047348022461, |
|
"logps/rejected": -28.11154556274414, |
|
"loss": 1.3051, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.04371771961450577, |
|
"rewards/margins": 0.10606314986944199, |
|
"rewards/rejected": -0.06234544515609741, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 1032.0, |
|
"learning_rate": 4.901618883413549e-06, |
|
"logits/chosen": -3.0002479553222656, |
|
"logits/rejected": -3.007493495941162, |
|
"logps/chosen": -29.312774658203125, |
|
"logps/rejected": -31.054119110107422, |
|
"loss": 2.1422, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -0.012723004445433617, |
|
"rewards/margins": 0.05220501497387886, |
|
"rewards/rejected": -0.06492801755666733, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 326.0, |
|
"learning_rate": 4.828760511501322e-06, |
|
"logits/chosen": -2.814807415008545, |
|
"logits/rejected": -2.8305888175964355, |
|
"logps/chosen": -29.402179718017578, |
|
"logps/rejected": -29.830846786499023, |
|
"loss": 1.5698, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.05541967228055, |
|
"rewards/margins": 0.11804421246051788, |
|
"rewards/rejected": -0.06262453645467758, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 322.0, |
|
"learning_rate": 4.7367166013034295e-06, |
|
"logits/chosen": -2.9036896228790283, |
|
"logits/rejected": -2.885866641998291, |
|
"logps/chosen": -32.615840911865234, |
|
"logps/rejected": -30.156829833984375, |
|
"loss": 5.9533, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.08823554217815399, |
|
"rewards/margins": 0.1423090249300003, |
|
"rewards/rejected": -0.054073482751846313, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 177.0, |
|
"learning_rate": 4.626245458345211e-06, |
|
"logits/chosen": -3.007761001586914, |
|
"logits/rejected": -3.0078930854797363, |
|
"logps/chosen": -31.708675384521484, |
|
"logps/rejected": -30.79708480834961, |
|
"loss": 1.7948, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.1392797976732254, |
|
"rewards/margins": 0.13164444267749786, |
|
"rewards/rejected": 0.007635355927050114, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.8176231384277344, |
|
"eval_logits/rejected": -2.814725399017334, |
|
"eval_logps/chosen": -31.257057189941406, |
|
"eval_logps/rejected": -34.74238204956055, |
|
"eval_loss": 1.6240766048431396, |
|
"eval_rewards/accuracies": 0.5685215592384338, |
|
"eval_rewards/chosen": 0.010158155113458633, |
|
"eval_rewards/margins": 0.027848951518535614, |
|
"eval_rewards/rejected": -0.01769079640507698, |
|
"eval_runtime": 112.9872, |
|
"eval_samples_per_second": 3.036, |
|
"eval_steps_per_second": 0.381, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 251.0, |
|
"learning_rate": 4.498257201263691e-06, |
|
"logits/chosen": -2.964388847351074, |
|
"logits/rejected": -2.9403655529022217, |
|
"logps/chosen": -31.717798233032227, |
|
"logps/rejected": -31.185800552368164, |
|
"loss": 1.9523, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.19960005581378937, |
|
"rewards/margins": 0.21038112044334412, |
|
"rewards/rejected": -0.010781067423522472, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 182.0, |
|
"learning_rate": 4.353806263777678e-06, |
|
"logits/chosen": -3.0449271202087402, |
|
"logits/rejected": -3.074241876602173, |
|
"logps/chosen": -28.759485244750977, |
|
"logps/rejected": -34.1177864074707, |
|
"loss": 1.6223, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.1617673933506012, |
|
"rewards/margins": 0.1498604714870453, |
|
"rewards/rejected": 0.01190691627562046, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 153.0, |
|
"learning_rate": 4.1940827077152755e-06, |
|
"logits/chosen": -2.745648145675659, |
|
"logits/rejected": -2.7412946224212646, |
|
"logps/chosen": -28.58340835571289, |
|
"logps/rejected": -30.125900268554688, |
|
"loss": 1.9846, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.15650880336761475, |
|
"rewards/margins": 0.1533457338809967, |
|
"rewards/rejected": 0.0031630918383598328, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 128.0, |
|
"learning_rate": 4.0204024186666215e-06, |
|
"logits/chosen": -3.016637086868286, |
|
"logits/rejected": -3.014425277709961, |
|
"logps/chosen": -27.189367294311523, |
|
"logps/rejected": -31.768428802490234, |
|
"loss": 1.6015, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.1187439113855362, |
|
"rewards/margins": 0.16340722143650055, |
|
"rewards/rejected": -0.04466330260038376, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 191.0, |
|
"learning_rate": 3.834196265035119e-06, |
|
"logits/chosen": -2.812203884124756, |
|
"logits/rejected": -2.8067917823791504, |
|
"logps/chosen": -27.404495239257812, |
|
"logps/rejected": -31.38730239868164, |
|
"loss": 1.2187, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.16840532422065735, |
|
"rewards/margins": 0.2338680922985077, |
|
"rewards/rejected": -0.06546276807785034, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 223.0, |
|
"learning_rate": 3.636998309800573e-06, |
|
"logits/chosen": -3.127948522567749, |
|
"logits/rejected": -3.1103968620300293, |
|
"logps/chosen": -31.860748291015625, |
|
"logps/rejected": -29.207168579101562, |
|
"loss": 1.9596, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.2523886263370514, |
|
"rewards/margins": 0.3433467745780945, |
|
"rewards/rejected": -0.09095814824104309, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 112.0, |
|
"learning_rate": 3.4304331721118078e-06, |
|
"logits/chosen": -2.9414854049682617, |
|
"logits/rejected": -2.9481499195098877, |
|
"logps/chosen": -29.37969970703125, |
|
"logps/rejected": -31.226505279541016, |
|
"loss": 1.5997, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.205966979265213, |
|
"rewards/margins": 0.25677576661109924, |
|
"rewards/rejected": -0.05080880969762802, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 118.5, |
|
"learning_rate": 3.2162026428305436e-06, |
|
"logits/chosen": -2.789107322692871, |
|
"logits/rejected": -2.7866973876953125, |
|
"logps/chosen": -29.164358139038086, |
|
"logps/rejected": -29.882699966430664, |
|
"loss": 1.5371, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.2021961212158203, |
|
"rewards/margins": 0.24800649285316467, |
|
"rewards/rejected": -0.045810386538505554, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 143.0, |
|
"learning_rate": 2.996071664294641e-06, |
|
"logits/chosen": -2.9042458534240723, |
|
"logits/rejected": -2.900311231613159, |
|
"logps/chosen": -29.820514678955078, |
|
"logps/rejected": -28.412799835205078, |
|
"loss": 2.882, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.14647993445396423, |
|
"rewards/margins": 0.1685885637998581, |
|
"rewards/rejected": -0.022108638659119606, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 200.0, |
|
"learning_rate": 2.7718537898066833e-06, |
|
"logits/chosen": -2.9734153747558594, |
|
"logits/rejected": -2.9612646102905273, |
|
"logps/chosen": -32.977813720703125, |
|
"logps/rejected": -30.30489730834961, |
|
"loss": 2.6629, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.288481742143631, |
|
"rewards/margins": 0.23680701851844788, |
|
"rewards/rejected": 0.05167470499873161, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.812067985534668, |
|
"eval_logits/rejected": -2.8095240592956543, |
|
"eval_logps/chosen": -31.236892700195312, |
|
"eval_logps/rejected": -34.73383712768555, |
|
"eval_loss": 1.5648894309997559, |
|
"eval_rewards/accuracies": 0.5456810593605042, |
|
"eval_rewards/chosen": 0.018223287537693977, |
|
"eval_rewards/margins": 0.032496776431798935, |
|
"eval_rewards/rejected": -0.014273490756750107, |
|
"eval_runtime": 112.7253, |
|
"eval_samples_per_second": 3.043, |
|
"eval_steps_per_second": 0.381, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 214.0, |
|
"learning_rate": 2.5453962426402006e-06, |
|
"logits/chosen": -2.9090185165405273, |
|
"logits/rejected": -2.910029888153076, |
|
"logps/chosen": -32.432430267333984, |
|
"logps/rejected": -33.925514221191406, |
|
"loss": 1.49, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.2403920441865921, |
|
"rewards/margins": 0.26600998640060425, |
|
"rewards/rejected": -0.025617951527237892, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 202.0, |
|
"learning_rate": 2.3185646976551794e-06, |
|
"logits/chosen": -2.8856301307678223, |
|
"logits/rejected": -2.9012038707733154, |
|
"logps/chosen": -29.458789825439453, |
|
"logps/rejected": -28.523406982421875, |
|
"loss": 2.1585, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.2876998782157898, |
|
"rewards/margins": 0.28575605154037476, |
|
"rewards/rejected": 0.001943854964338243, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 150.0, |
|
"learning_rate": 2.0932279108998323e-06, |
|
"logits/chosen": -2.9351718425750732, |
|
"logits/rejected": -2.939762592315674, |
|
"logps/chosen": -30.686193466186523, |
|
"logps/rejected": -31.73343276977539, |
|
"loss": 1.6516, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.18626943230628967, |
|
"rewards/margins": 0.1763811856508255, |
|
"rewards/rejected": 0.00988826435059309, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 117.0, |
|
"learning_rate": 1.8712423238279358e-06, |
|
"logits/chosen": -2.98549485206604, |
|
"logits/rejected": -2.9937961101531982, |
|
"logps/chosen": -30.101831436157227, |
|
"logps/rejected": -30.255285263061523, |
|
"loss": 0.8256, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 0.32123786211013794, |
|
"rewards/margins": 0.329254686832428, |
|
"rewards/rejected": -0.008016781881451607, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 350.0, |
|
"learning_rate": 1.6544367689701824e-06, |
|
"logits/chosen": -2.817945718765259, |
|
"logits/rejected": -2.808492660522461, |
|
"logps/chosen": -26.3896427154541, |
|
"logps/rejected": -29.31142807006836, |
|
"loss": 1.6788, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.22583059966564178, |
|
"rewards/margins": 0.13774575293064117, |
|
"rewards/rejected": 0.0880848616361618, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 105.5, |
|
"learning_rate": 1.4445974030621963e-06, |
|
"logits/chosen": -2.7999472618103027, |
|
"logits/rejected": -2.8214704990386963, |
|
"logps/chosen": -29.094181060791016, |
|
"logps/rejected": -34.21991729736328, |
|
"loss": 1.111, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.294312059879303, |
|
"rewards/margins": 0.3126428425312042, |
|
"rewards/rejected": -0.01833074726164341, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 124.0, |
|
"learning_rate": 1.243452991757889e-06, |
|
"logits/chosen": -2.942542791366577, |
|
"logits/rejected": -2.9476306438446045, |
|
"logps/chosen": -30.103805541992188, |
|
"logps/rejected": -29.8814640045166, |
|
"loss": 1.3753, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.27102118730545044, |
|
"rewards/margins": 0.27750203013420105, |
|
"rewards/rejected": -0.0064808703027665615, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 96.0, |
|
"learning_rate": 1.0526606671603523e-06, |
|
"logits/chosen": -2.9563868045806885, |
|
"logits/rejected": -2.9434051513671875, |
|
"logps/chosen": -30.231460571289062, |
|
"logps/rejected": -28.510696411132812, |
|
"loss": 1.8713, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.196088969707489, |
|
"rewards/margins": 0.1478276252746582, |
|
"rewards/rejected": 0.0482613667845726, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 176.0, |
|
"learning_rate": 8.737922755071455e-07, |
|
"logits/chosen": -2.884552478790283, |
|
"logits/rejected": -2.8662493228912354, |
|
"logps/chosen": -31.870113372802734, |
|
"logps/rejected": -30.56744384765625, |
|
"loss": 1.5277, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.3064747452735901, |
|
"rewards/margins": 0.32412776350975037, |
|
"rewards/rejected": -0.017653033137321472, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 372.0, |
|
"learning_rate": 7.08321427484816e-07, |
|
"logits/chosen": -2.889061689376831, |
|
"logits/rejected": -2.885744571685791, |
|
"logps/chosen": -31.58562660217285, |
|
"logps/rejected": -27.505157470703125, |
|
"loss": 1.5067, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.254812091588974, |
|
"rewards/margins": 0.21544210612773895, |
|
"rewards/rejected": 0.039369963109493256, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.80647873878479, |
|
"eval_logits/rejected": -2.8040761947631836, |
|
"eval_logps/chosen": -31.251041412353516, |
|
"eval_logps/rejected": -34.71211624145508, |
|
"eval_loss": 1.6903845071792603, |
|
"eval_rewards/accuracies": 0.5573089718818665, |
|
"eval_rewards/chosen": 0.012563018128275871, |
|
"eval_rewards/margins": 0.018146870657801628, |
|
"eval_rewards/rejected": -0.005583854392170906, |
|
"eval_runtime": 113.035, |
|
"eval_samples_per_second": 3.034, |
|
"eval_steps_per_second": 0.38, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 129.0, |
|
"learning_rate": 5.576113578589035e-07, |
|
"logits/chosen": -2.7671875953674316, |
|
"logits/rejected": -2.785642147064209, |
|
"logps/chosen": -28.604686737060547, |
|
"logps/rejected": -30.709314346313477, |
|
"loss": 1.2664, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.24989323318004608, |
|
"rewards/margins": 0.24081555008888245, |
|
"rewards/rejected": 0.009077723138034344, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 92.5, |
|
"learning_rate": 4.229036944380913e-07, |
|
"logits/chosen": -3.014265537261963, |
|
"logits/rejected": -2.9993736743927, |
|
"logps/chosen": -29.781946182250977, |
|
"logps/rejected": -28.38212013244629, |
|
"loss": 1.4826, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.17185598611831665, |
|
"rewards/margins": 0.19719351828098297, |
|
"rewards/rejected": -0.025337522849440575, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 163.0, |
|
"learning_rate": 3.053082288996112e-07, |
|
"logits/chosen": -2.9294309616088867, |
|
"logits/rejected": -2.9114794731140137, |
|
"logps/chosen": -27.724899291992188, |
|
"logps/rejected": -30.527271270751953, |
|
"loss": 1.2966, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.2548690140247345, |
|
"rewards/margins": 0.30911877751350403, |
|
"rewards/rejected": -0.054249756038188934, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 191.0, |
|
"learning_rate": 2.0579377374915805e-07, |
|
"logits/chosen": -3.144012928009033, |
|
"logits/rejected": -3.1506927013397217, |
|
"logps/chosen": -31.059814453125, |
|
"logps/rejected": -32.82073211669922, |
|
"loss": 1.5694, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.23175635933876038, |
|
"rewards/margins": 0.2816471457481384, |
|
"rewards/rejected": -0.04989078640937805, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 112.5, |
|
"learning_rate": 1.2518018074041684e-07, |
|
"logits/chosen": -3.021515130996704, |
|
"logits/rejected": -3.02475643157959, |
|
"logps/chosen": -30.08322525024414, |
|
"logps/rejected": -31.63114356994629, |
|
"loss": 1.3735, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.2894637882709503, |
|
"rewards/margins": 0.3365970253944397, |
|
"rewards/rejected": -0.04713326320052147, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 215.0, |
|
"learning_rate": 6.41315865106129e-08, |
|
"logits/chosen": -2.8619635105133057, |
|
"logits/rejected": -2.8640754222869873, |
|
"logps/chosen": -27.785686492919922, |
|
"logps/rejected": -29.807357788085938, |
|
"loss": 1.4796, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.34561628103256226, |
|
"rewards/margins": 0.33618348836898804, |
|
"rewards/rejected": 0.00943276472389698, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 127.5, |
|
"learning_rate": 2.3150941078050325e-08, |
|
"logits/chosen": -2.9357876777648926, |
|
"logits/rejected": -2.9344675540924072, |
|
"logps/chosen": -30.16611671447754, |
|
"logps/rejected": -32.088932037353516, |
|
"loss": 1.1067, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.24821257591247559, |
|
"rewards/margins": 0.31256765127182007, |
|
"rewards/rejected": -0.06435510516166687, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 552.0, |
|
"learning_rate": 2.575864278703266e-09, |
|
"logits/chosen": -2.89326548576355, |
|
"logits/rejected": -2.8758132457733154, |
|
"logps/chosen": -28.290267944335938, |
|
"logps/rejected": -27.925792694091797, |
|
"loss": 1.6812, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.2619072496891022, |
|
"rewards/margins": 0.27637776732444763, |
|
"rewards/rejected": -0.01447053998708725, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 385, |
|
"total_flos": 0.0, |
|
"train_loss": 1.7377146052075672, |
|
"train_runtime": 2719.6001, |
|
"train_samples_per_second": 1.132, |
|
"train_steps_per_second": 0.142 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 385, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|