|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 385, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 15.8125, |
|
"learning_rate": 1.282051282051282e-07, |
|
"logits/chosen": -2.7358343601226807, |
|
"logits/rejected": -2.7480404376983643, |
|
"logps/chosen": -27.35565757751465, |
|
"logps/rejected": -21.06114387512207, |
|
"loss": 1.0, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 27.375, |
|
"learning_rate": 1.282051282051282e-06, |
|
"logits/chosen": -3.0097196102142334, |
|
"logits/rejected": -2.998093605041504, |
|
"logps/chosen": -33.20063781738281, |
|
"logps/rejected": -31.99871826171875, |
|
"loss": 0.997, |
|
"rewards/accuracies": 0.4444444477558136, |
|
"rewards/chosen": -0.003939596936106682, |
|
"rewards/margins": 0.0030229436233639717, |
|
"rewards/rejected": -0.006962540093809366, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 18.375, |
|
"learning_rate": 2.564102564102564e-06, |
|
"logits/chosen": -2.899087429046631, |
|
"logits/rejected": -2.8937907218933105, |
|
"logps/chosen": -32.46463394165039, |
|
"logps/rejected": -28.943897247314453, |
|
"loss": 1.0055, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": 0.0020922694820910692, |
|
"rewards/margins": -0.005501673556864262, |
|
"rewards/rejected": 0.0075939432717859745, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 17.625, |
|
"learning_rate": 3.846153846153847e-06, |
|
"logits/chosen": -3.096761465072632, |
|
"logits/rejected": -3.1090149879455566, |
|
"logps/chosen": -32.796714782714844, |
|
"logps/rejected": -30.1170597076416, |
|
"loss": 0.9934, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.022394269704818726, |
|
"rewards/margins": 0.006596512161195278, |
|
"rewards/rejected": 0.015797756612300873, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 18.125, |
|
"learning_rate": 4.999896948438434e-06, |
|
"logits/chosen": -2.862929105758667, |
|
"logits/rejected": -2.8537039756774902, |
|
"logps/chosen": -31.520278930664062, |
|
"logps/rejected": -32.38086700439453, |
|
"loss": 0.9333, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.062008898705244064, |
|
"rewards/margins": 0.06669147312641144, |
|
"rewards/rejected": -0.004682569764554501, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 12.4375, |
|
"learning_rate": 4.987541037542187e-06, |
|
"logits/chosen": -2.8800158500671387, |
|
"logits/rejected": -2.877554416656494, |
|
"logps/chosen": -29.325756072998047, |
|
"logps/rejected": -30.063373565673828, |
|
"loss": 0.9279, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.08288151025772095, |
|
"rewards/margins": 0.07776711881160736, |
|
"rewards/rejected": 0.005114386323839426, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 16.125, |
|
"learning_rate": 4.954691471941119e-06, |
|
"logits/chosen": -2.905949115753174, |
|
"logits/rejected": -2.9077162742614746, |
|
"logps/chosen": -29.794702529907227, |
|
"logps/rejected": -27.984210968017578, |
|
"loss": 0.9273, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.06701362133026123, |
|
"rewards/margins": 0.07271867990493774, |
|
"rewards/rejected": -0.005705048330128193, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 21.5, |
|
"learning_rate": 4.901618883413549e-06, |
|
"logits/chosen": -2.988873243331909, |
|
"logits/rejected": -2.995783567428589, |
|
"logps/chosen": -29.117477416992188, |
|
"logps/rejected": -30.8828067779541, |
|
"loss": 0.9691, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.03269868344068527, |
|
"rewards/margins": 0.030899733304977417, |
|
"rewards/rejected": 0.001798946992494166, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 18.75, |
|
"learning_rate": 4.828760511501322e-06, |
|
"logits/chosen": -2.804410457611084, |
|
"logits/rejected": -2.819267988204956, |
|
"logps/chosen": -29.285558700561523, |
|
"logps/rejected": -29.831363677978516, |
|
"loss": 0.9175, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.0510343499481678, |
|
"rewards/margins": 0.0824500098824501, |
|
"rewards/rejected": -0.0314156636595726, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 18.875, |
|
"learning_rate": 4.7367166013034295e-06, |
|
"logits/chosen": -2.893660068511963, |
|
"logits/rejected": -2.876103401184082, |
|
"logps/chosen": -32.681396484375, |
|
"logps/rejected": -30.351022720336914, |
|
"loss": 0.9037, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.03100699558854103, |
|
"rewards/margins": 0.09688206017017365, |
|
"rewards/rejected": -0.06587505340576172, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 16.75, |
|
"learning_rate": 4.626245458345211e-06, |
|
"logits/chosen": -2.998610734939575, |
|
"logits/rejected": -3.0000457763671875, |
|
"logps/chosen": -31.857208251953125, |
|
"logps/rejected": -30.99690818786621, |
|
"loss": 0.9239, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.0399332270026207, |
|
"rewards/margins": 0.07608047872781754, |
|
"rewards/rejected": -0.03614724427461624, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.8054628372192383, |
|
"eval_logits/rejected": -2.8027477264404297, |
|
"eval_logps/chosen": -31.287212371826172, |
|
"eval_logps/rejected": -34.87384033203125, |
|
"eval_loss": 0.9661039710044861, |
|
"eval_rewards/accuracies": 0.6013289093971252, |
|
"eval_rewards/chosen": -0.0009517524158582091, |
|
"eval_rewards/margins": 0.03418472036719322, |
|
"eval_rewards/rejected": -0.035136472433805466, |
|
"eval_runtime": 113.2847, |
|
"eval_samples_per_second": 3.028, |
|
"eval_steps_per_second": 0.38, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 20.875, |
|
"learning_rate": 4.498257201263691e-06, |
|
"logits/chosen": -2.9547083377838135, |
|
"logits/rejected": -2.9303781986236572, |
|
"logps/chosen": -31.887340545654297, |
|
"logps/rejected": -31.57730484008789, |
|
"loss": 0.8618, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.0658910945057869, |
|
"rewards/margins": 0.1495821326971054, |
|
"rewards/rejected": -0.08369103074073792, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 17.75, |
|
"learning_rate": 4.353806263777678e-06, |
|
"logits/chosen": -3.036729335784912, |
|
"logits/rejected": -3.066075563430786, |
|
"logps/chosen": -28.683155059814453, |
|
"logps/rejected": -34.55553436279297, |
|
"loss": 0.8329, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.09614940732717514, |
|
"rewards/margins": 0.1777469962835312, |
|
"rewards/rejected": -0.08159758895635605, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 17.0, |
|
"learning_rate": 4.1940827077152755e-06, |
|
"logits/chosen": -2.742605209350586, |
|
"logits/rejected": -2.7394351959228516, |
|
"logps/chosen": -28.72283935546875, |
|
"logps/rejected": -30.73993492126465, |
|
"loss": 0.8454, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.050367455929517746, |
|
"rewards/margins": 0.17159387469291687, |
|
"rewards/rejected": -0.12122640758752823, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 17.25, |
|
"learning_rate": 4.0204024186666215e-06, |
|
"logits/chosen": -3.0202817916870117, |
|
"logits/rejected": -3.0177478790283203, |
|
"logps/chosen": -27.38101577758789, |
|
"logps/rejected": -32.12105178833008, |
|
"loss": 0.9013, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.021041864529252052, |
|
"rewards/margins": 0.11389818042516708, |
|
"rewards/rejected": -0.09285632520914078, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 17.5, |
|
"learning_rate": 3.834196265035119e-06, |
|
"logits/chosen": -2.815894603729248, |
|
"logits/rejected": -2.8112242221832275, |
|
"logps/chosen": -27.407995223999023, |
|
"logps/rejected": -31.938213348388672, |
|
"loss": 0.785, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.083502396941185, |
|
"rewards/margins": 0.22641611099243164, |
|
"rewards/rejected": -0.14291372895240784, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 20.25, |
|
"learning_rate": 3.636998309800573e-06, |
|
"logits/chosen": -3.133810043334961, |
|
"logits/rejected": -3.1171538829803467, |
|
"logps/chosen": -31.85055160522461, |
|
"logps/rejected": -30.0231990814209, |
|
"loss": 0.7145, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.1282336711883545, |
|
"rewards/margins": 0.33691832423210144, |
|
"rewards/rejected": -0.20868465304374695, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 18.875, |
|
"learning_rate": 3.4304331721118078e-06, |
|
"logits/chosen": -2.946866989135742, |
|
"logits/rejected": -2.954603910446167, |
|
"logps/chosen": -29.613689422607422, |
|
"logps/rejected": -32.301788330078125, |
|
"loss": 0.7392, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.05618465691804886, |
|
"rewards/margins": 0.2966458201408386, |
|
"rewards/rejected": -0.24046114087104797, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 16.375, |
|
"learning_rate": 3.2162026428305436e-06, |
|
"logits/chosen": -2.803523540496826, |
|
"logits/rejected": -2.8009111881256104, |
|
"logps/chosen": -29.574106216430664, |
|
"logps/rejected": -30.58365249633789, |
|
"loss": 0.839, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.019148189574480057, |
|
"rewards/margins": 0.1822437047958374, |
|
"rewards/rejected": -0.16309550404548645, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 12.3125, |
|
"learning_rate": 2.996071664294641e-06, |
|
"logits/chosen": -2.913604497909546, |
|
"logits/rejected": -2.909559726715088, |
|
"logps/chosen": -30.189212799072266, |
|
"logps/rejected": -29.22299575805664, |
|
"loss": 0.8485, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.000499642628710717, |
|
"rewards/margins": 0.17259356379508972, |
|
"rewards/rejected": -0.17309322953224182, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 13.5625, |
|
"learning_rate": 2.7718537898066833e-06, |
|
"logits/chosen": -2.9829163551330566, |
|
"logits/rejected": -2.9701950550079346, |
|
"logps/chosen": -33.297157287597656, |
|
"logps/rejected": -31.153818130493164, |
|
"loss": 0.8146, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.08037153631448746, |
|
"rewards/margins": 0.22431731224060059, |
|
"rewards/rejected": -0.14394578337669373, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.8232696056365967, |
|
"eval_logits/rejected": -2.820622444152832, |
|
"eval_logps/chosen": -31.65613555908203, |
|
"eval_logps/rejected": -35.39246368408203, |
|
"eval_loss": 0.9363206028938293, |
|
"eval_rewards/accuracies": 0.6183555126190186, |
|
"eval_rewards/chosen": -0.07473766803741455, |
|
"eval_rewards/margins": 0.06412449479103088, |
|
"eval_rewards/rejected": -0.13886214792728424, |
|
"eval_runtime": 113.1194, |
|
"eval_samples_per_second": 3.032, |
|
"eval_steps_per_second": 0.38, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 22.0, |
|
"learning_rate": 2.5453962426402006e-06, |
|
"logits/chosen": -2.921879768371582, |
|
"logits/rejected": -2.923065185546875, |
|
"logps/chosen": -32.841976165771484, |
|
"logps/rejected": -35.08049011230469, |
|
"loss": 0.7569, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.03828667476773262, |
|
"rewards/margins": 0.28209152817726135, |
|
"rewards/rejected": -0.24380484223365784, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 16.875, |
|
"learning_rate": 2.3185646976551794e-06, |
|
"logits/chosen": -2.905679225921631, |
|
"logits/rejected": -2.9223124980926514, |
|
"logps/chosen": -29.705821990966797, |
|
"logps/rejected": -29.646404266357422, |
|
"loss": 0.7313, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.09444301575422287, |
|
"rewards/margins": 0.3180699944496155, |
|
"rewards/rejected": -0.223626971244812, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 17.5, |
|
"learning_rate": 2.0932279108998323e-06, |
|
"logits/chosen": -2.9525959491729736, |
|
"logits/rejected": -2.9570093154907227, |
|
"logps/chosen": -30.988698959350586, |
|
"logps/rejected": -32.67749786376953, |
|
"loss": 0.8272, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.032633669674396515, |
|
"rewards/margins": 0.21650373935699463, |
|
"rewards/rejected": -0.1838700771331787, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 17.875, |
|
"learning_rate": 1.8712423238279358e-06, |
|
"logits/chosen": -3.010042667388916, |
|
"logits/rejected": -3.0168490409851074, |
|
"logps/chosen": -30.514179229736328, |
|
"logps/rejected": -31.29245948791504, |
|
"loss": 0.7261, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.07814909517765045, |
|
"rewards/margins": 0.2895917594432831, |
|
"rewards/rejected": -0.21144266426563263, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 17.5, |
|
"learning_rate": 1.6544367689701824e-06, |
|
"logits/chosen": -2.8428425788879395, |
|
"logits/rejected": -2.8320090770721436, |
|
"logps/chosen": -26.695632934570312, |
|
"logps/rejected": -30.281723022460938, |
|
"loss": 0.8345, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.051716603338718414, |
|
"rewards/margins": 0.20173287391662598, |
|
"rewards/rejected": -0.15001627802848816, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 18.25, |
|
"learning_rate": 1.4445974030621963e-06, |
|
"logits/chosen": -2.8217809200286865, |
|
"logits/rejected": -2.8434066772460938, |
|
"logps/chosen": -29.24324607849121, |
|
"logps/rejected": -35.42533874511719, |
|
"loss": 0.6631, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.11734296381473541, |
|
"rewards/margins": 0.36759305000305176, |
|
"rewards/rejected": -0.25025007128715515, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 20.875, |
|
"learning_rate": 1.243452991757889e-06, |
|
"logits/chosen": -2.964048147201538, |
|
"logits/rejected": -2.9707446098327637, |
|
"logps/chosen": -30.70973777770996, |
|
"logps/rejected": -31.365869522094727, |
|
"loss": 0.6964, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.014324212446808815, |
|
"rewards/margins": 0.3144455850124359, |
|
"rewards/rejected": -0.3001214265823364, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 17.375, |
|
"learning_rate": 1.0526606671603523e-06, |
|
"logits/chosen": -2.9795358180999756, |
|
"logits/rejected": -2.9657275676727295, |
|
"logps/chosen": -30.683574676513672, |
|
"logps/rejected": -29.658838272094727, |
|
"loss": 0.811, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.007621827535331249, |
|
"rewards/margins": 0.21311911940574646, |
|
"rewards/rejected": -0.20549730956554413, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 17.875, |
|
"learning_rate": 8.737922755071455e-07, |
|
"logits/chosen": -2.908496618270874, |
|
"logits/rejected": -2.89042592048645, |
|
"logps/chosen": -31.784709930419922, |
|
"logps/rejected": -32.081748962402344, |
|
"loss": 0.5814, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.17031878232955933, |
|
"rewards/margins": 0.48200663924217224, |
|
"rewards/rejected": -0.3116878867149353, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 16.875, |
|
"learning_rate": 7.08321427484816e-07, |
|
"logits/chosen": -2.909313440322876, |
|
"logits/rejected": -2.9058613777160645, |
|
"logps/chosen": -31.75840187072754, |
|
"logps/rejected": -28.829761505126953, |
|
"loss": 0.7173, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.09285064786672592, |
|
"rewards/margins": 0.3380856215953827, |
|
"rewards/rejected": -0.24523496627807617, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.8267321586608887, |
|
"eval_logits/rejected": -2.8247475624084473, |
|
"eval_logps/chosen": -31.700992584228516, |
|
"eval_logps/rejected": -35.481746673583984, |
|
"eval_loss": 0.9279295802116394, |
|
"eval_rewards/accuracies": 0.6125415563583374, |
|
"eval_rewards/chosen": -0.08370788395404816, |
|
"eval_rewards/margins": 0.07301054149866104, |
|
"eval_rewards/rejected": -0.1567184180021286, |
|
"eval_runtime": 112.8754, |
|
"eval_samples_per_second": 3.039, |
|
"eval_steps_per_second": 0.381, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 18.875, |
|
"learning_rate": 5.576113578589035e-07, |
|
"logits/chosen": -2.7911877632141113, |
|
"logits/rejected": -2.8082525730133057, |
|
"logps/chosen": -29.013874053955078, |
|
"logps/rejected": -32.23237228393555, |
|
"loss": 0.6971, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.04310917109251022, |
|
"rewards/margins": 0.34318238496780396, |
|
"rewards/rejected": -0.3000732362270355, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 17.875, |
|
"learning_rate": 4.229036944380913e-07, |
|
"logits/chosen": -3.034618616104126, |
|
"logits/rejected": -3.0209412574768066, |
|
"logps/chosen": -29.51166343688965, |
|
"logps/rejected": -29.482135772705078, |
|
"loss": 0.6758, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.13998422026634216, |
|
"rewards/margins": 0.3726558983325958, |
|
"rewards/rejected": -0.23267166316509247, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 16.375, |
|
"learning_rate": 3.053082288996112e-07, |
|
"logits/chosen": -2.9494917392730713, |
|
"logits/rejected": -2.9318935871124268, |
|
"logps/chosen": -27.68900489807129, |
|
"logps/rejected": -31.994842529296875, |
|
"loss": 0.6228, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.1346132755279541, |
|
"rewards/margins": 0.4552524983882904, |
|
"rewards/rejected": -0.3206392228603363, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 20.25, |
|
"learning_rate": 2.0579377374915805e-07, |
|
"logits/chosen": -3.168849468231201, |
|
"logits/rejected": -3.174539089202881, |
|
"logps/chosen": -31.354122161865234, |
|
"logps/rejected": -34.371620178222656, |
|
"loss": 0.6799, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.057016681879758835, |
|
"rewards/margins": 0.39213961362838745, |
|
"rewards/rejected": -0.3351229131221771, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 18.75, |
|
"learning_rate": 1.2518018074041684e-07, |
|
"logits/chosen": -3.0446276664733887, |
|
"logits/rejected": -3.0476937294006348, |
|
"logps/chosen": -30.175695419311523, |
|
"logps/rejected": -32.66219711303711, |
|
"loss": 0.6878, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.12623663246631622, |
|
"rewards/margins": 0.35601407289505005, |
|
"rewards/rejected": -0.22977742552757263, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 20.25, |
|
"learning_rate": 6.41315865106129e-08, |
|
"logits/chosen": -2.8835089206695557, |
|
"logits/rejected": -2.8853554725646973, |
|
"logps/chosen": -27.79741859436035, |
|
"logps/rejected": -30.80864906311035, |
|
"loss": 0.6737, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.1704617589712143, |
|
"rewards/margins": 0.3660039007663727, |
|
"rewards/rejected": -0.19554206728935242, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 24.375, |
|
"learning_rate": 2.3150941078050325e-08, |
|
"logits/chosen": -2.960698127746582, |
|
"logits/rejected": -2.958723306655884, |
|
"logps/chosen": -30.233112335205078, |
|
"logps/rejected": -33.06101608276367, |
|
"loss": 0.7086, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.1107078567147255, |
|
"rewards/margins": 0.33730247616767883, |
|
"rewards/rejected": -0.22659464180469513, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 18.25, |
|
"learning_rate": 2.575864278703266e-09, |
|
"logits/chosen": -2.9156675338745117, |
|
"logits/rejected": -2.898934841156006, |
|
"logps/chosen": -28.734481811523438, |
|
"logps/rejected": -29.24869728088379, |
|
"loss": 0.7185, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.04211098328232765, |
|
"rewards/margins": 0.31392616033554077, |
|
"rewards/rejected": -0.2718151807785034, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 385, |
|
"total_flos": 0.0, |
|
"train_loss": 0.8011291454364727, |
|
"train_runtime": 2726.0249, |
|
"train_samples_per_second": 1.129, |
|
"train_steps_per_second": 0.141 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 385, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|