|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.0, |
|
"eval_steps": 100, |
|
"global_step": 1540, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.282051282051282e-07, |
|
"logits/chosen": -1.7278180122375488, |
|
"logits/rejected": -1.7377450466156006, |
|
"logps/chosen": -29.553977966308594, |
|
"logps/rejected": -42.813133239746094, |
|
"loss": 0.5102, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.282051282051282e-06, |
|
"logits/chosen": -1.866772174835205, |
|
"logits/rejected": -1.8710780143737793, |
|
"logps/chosen": -36.99943161010742, |
|
"logps/rejected": -33.657470703125, |
|
"loss": 0.4858, |
|
"rewards/accuracies": 0.5277777910232544, |
|
"rewards/chosen": 0.005016062408685684, |
|
"rewards/margins": 0.02419126406311989, |
|
"rewards/rejected": -0.019175197929143906, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.564102564102564e-06, |
|
"logits/chosen": -1.9975645542144775, |
|
"logits/rejected": -2.0002217292785645, |
|
"logps/chosen": -29.651592254638672, |
|
"logps/rejected": -29.066898345947266, |
|
"loss": 0.5542, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.006579822860658169, |
|
"rewards/margins": -0.004162783268839121, |
|
"rewards/rejected": -0.0024170405231416225, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.846153846153847e-06, |
|
"logits/chosen": -1.9212992191314697, |
|
"logits/rejected": -1.918600082397461, |
|
"logps/chosen": -31.42719078063965, |
|
"logps/rejected": -33.23390579223633, |
|
"loss": 0.5492, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": -0.00776728754863143, |
|
"rewards/margins": 0.0030429032631218433, |
|
"rewards/rejected": -0.010810190811753273, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.999896948438434e-06, |
|
"logits/chosen": -2.0178468227386475, |
|
"logits/rejected": -2.009111166000366, |
|
"logps/chosen": -32.579471588134766, |
|
"logps/rejected": -32.50147247314453, |
|
"loss": 0.5878, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": -0.001865685684606433, |
|
"rewards/margins": -0.009705344215035439, |
|
"rewards/rejected": 0.007839656434953213, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.987541037542187e-06, |
|
"logits/chosen": -1.8617979288101196, |
|
"logits/rejected": -1.8510282039642334, |
|
"logps/chosen": -33.562862396240234, |
|
"logps/rejected": -35.4472770690918, |
|
"loss": 0.5775, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": -0.0037666684947907925, |
|
"rewards/margins": -0.007314275950193405, |
|
"rewards/rejected": 0.0035476074554026127, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.954691471941119e-06, |
|
"logits/chosen": -1.9399009943008423, |
|
"logits/rejected": -1.9418485164642334, |
|
"logps/chosen": -32.5726203918457, |
|
"logps/rejected": -33.18496322631836, |
|
"loss": 0.5967, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.019294817000627518, |
|
"rewards/margins": 0.028803948312997818, |
|
"rewards/rejected": -0.009509134106338024, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.901618883413549e-06, |
|
"logits/chosen": -2.0729470252990723, |
|
"logits/rejected": -2.077918529510498, |
|
"logps/chosen": -33.93268585205078, |
|
"logps/rejected": -36.637229919433594, |
|
"loss": 0.5297, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.039063483476638794, |
|
"rewards/margins": 0.08691467344760895, |
|
"rewards/rejected": -0.04785118252038956, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.828760511501322e-06, |
|
"logits/chosen": -1.9366486072540283, |
|
"logits/rejected": -1.939780592918396, |
|
"logps/chosen": -34.30228805541992, |
|
"logps/rejected": -34.652069091796875, |
|
"loss": 0.4209, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.08786438405513763, |
|
"rewards/margins": 0.12962770462036133, |
|
"rewards/rejected": -0.041763320565223694, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.7367166013034295e-06, |
|
"logits/chosen": -1.946979284286499, |
|
"logits/rejected": -1.951521635055542, |
|
"logps/chosen": -32.4210205078125, |
|
"logps/rejected": -32.337799072265625, |
|
"loss": 0.6191, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.03248247504234314, |
|
"rewards/margins": 0.017702888697385788, |
|
"rewards/rejected": 0.014779585413634777, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.626245458345211e-06, |
|
"logits/chosen": -2.045017719268799, |
|
"logits/rejected": -2.043015241622925, |
|
"logps/chosen": -32.16361618041992, |
|
"logps/rejected": -31.273128509521484, |
|
"loss": 0.4711, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.05405203625559807, |
|
"rewards/margins": 0.05889582633972168, |
|
"rewards/rejected": -0.004843792412430048, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.240097761154175, |
|
"eval_logits/rejected": -2.2352383136749268, |
|
"eval_logps/chosen": -34.01129150390625, |
|
"eval_logps/rejected": -37.49793243408203, |
|
"eval_loss": 0.5754798054695129, |
|
"eval_rewards/accuracies": 0.5195183157920837, |
|
"eval_rewards/chosen": 0.016279777511954308, |
|
"eval_rewards/margins": 0.0031977586913853884, |
|
"eval_rewards/rejected": 0.013082021847367287, |
|
"eval_runtime": 145.9275, |
|
"eval_samples_per_second": 2.35, |
|
"eval_steps_per_second": 0.295, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.498257201263691e-06, |
|
"logits/chosen": -2.0017762184143066, |
|
"logits/rejected": -1.9993689060211182, |
|
"logps/chosen": -33.13564682006836, |
|
"logps/rejected": -33.997642517089844, |
|
"loss": 0.6759, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.07571890205144882, |
|
"rewards/margins": 0.05350743606686592, |
|
"rewards/rejected": 0.022211460396647453, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.353806263777678e-06, |
|
"logits/chosen": -2.013158082962036, |
|
"logits/rejected": -2.004814624786377, |
|
"logps/chosen": -32.32666015625, |
|
"logps/rejected": -32.12488555908203, |
|
"loss": 0.5572, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.08277235180139542, |
|
"rewards/margins": 0.05714136362075806, |
|
"rewards/rejected": 0.02563098631799221, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.1940827077152755e-06, |
|
"logits/chosen": -2.039836883544922, |
|
"logits/rejected": -2.031825542449951, |
|
"logps/chosen": -30.349987030029297, |
|
"logps/rejected": -32.029136657714844, |
|
"loss": 0.5109, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.0880090743303299, |
|
"rewards/margins": 0.07130132615566254, |
|
"rewards/rejected": 0.016707751899957657, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.0204024186666215e-06, |
|
"logits/chosen": -1.9694139957427979, |
|
"logits/rejected": -1.9796711206436157, |
|
"logps/chosen": -31.24441909790039, |
|
"logps/rejected": -32.56440353393555, |
|
"loss": 0.5037, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.12266886234283447, |
|
"rewards/margins": 0.14333835244178772, |
|
"rewards/rejected": -0.02066950313746929, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.834196265035119e-06, |
|
"logits/chosen": -1.8828538656234741, |
|
"logits/rejected": -1.8840004205703735, |
|
"logps/chosen": -34.006561279296875, |
|
"logps/rejected": -34.81753921508789, |
|
"loss": 0.4564, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.13028928637504578, |
|
"rewards/margins": 0.17248663306236267, |
|
"rewards/rejected": -0.04219735041260719, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.636998309800573e-06, |
|
"logits/chosen": -1.9349613189697266, |
|
"logits/rejected": -1.9315464496612549, |
|
"logps/chosen": -36.059181213378906, |
|
"logps/rejected": -32.72267150878906, |
|
"loss": 0.487, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.07874591648578644, |
|
"rewards/margins": 0.07869003713130951, |
|
"rewards/rejected": 5.587190389633179e-05, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4304331721118078e-06, |
|
"logits/chosen": -2.036032199859619, |
|
"logits/rejected": -2.028651714324951, |
|
"logps/chosen": -33.573509216308594, |
|
"logps/rejected": -31.36956214904785, |
|
"loss": 0.4465, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.15704451501369476, |
|
"rewards/margins": 0.16944527626037598, |
|
"rewards/rejected": -0.012400749139487743, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 3.2162026428305436e-06, |
|
"logits/chosen": -2.042617082595825, |
|
"logits/rejected": -2.0478556156158447, |
|
"logps/chosen": -32.33074188232422, |
|
"logps/rejected": -32.472496032714844, |
|
"loss": 0.3727, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.14983226358890533, |
|
"rewards/margins": 0.14098060131072998, |
|
"rewards/rejected": 0.008851657621562481, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 2.996071664294641e-06, |
|
"logits/chosen": -2.0445618629455566, |
|
"logits/rejected": -2.0417864322662354, |
|
"logps/chosen": -31.317855834960938, |
|
"logps/rejected": -31.348400115966797, |
|
"loss": 0.4609, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.11976919323205948, |
|
"rewards/margins": 0.14090058207511902, |
|
"rewards/rejected": -0.021131375804543495, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.7718537898066833e-06, |
|
"logits/chosen": -1.9139589071273804, |
|
"logits/rejected": -1.9186115264892578, |
|
"logps/chosen": -31.420101165771484, |
|
"logps/rejected": -32.79216766357422, |
|
"loss": 0.5061, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.13032536208629608, |
|
"rewards/margins": 0.13119173049926758, |
|
"rewards/rejected": -0.0008663811022415757, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.2385354042053223, |
|
"eval_logits/rejected": -2.2336885929107666, |
|
"eval_logps/chosen": -34.05002975463867, |
|
"eval_logps/rejected": -37.54545974731445, |
|
"eval_loss": 0.5877167582511902, |
|
"eval_rewards/accuracies": 0.4991694688796997, |
|
"eval_rewards/chosen": -0.010832725092768669, |
|
"eval_rewards/margins": 0.009353035129606724, |
|
"eval_rewards/rejected": -0.020185761153697968, |
|
"eval_runtime": 145.6958, |
|
"eval_samples_per_second": 2.354, |
|
"eval_steps_per_second": 0.295, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.5453962426402006e-06, |
|
"logits/chosen": -2.026780366897583, |
|
"logits/rejected": -2.0374534130096436, |
|
"logps/chosen": -31.786846160888672, |
|
"logps/rejected": -33.922447204589844, |
|
"loss": 0.4225, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.12911871075630188, |
|
"rewards/margins": 0.17094141244888306, |
|
"rewards/rejected": -0.041822709143161774, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.3185646976551794e-06, |
|
"logits/chosen": -1.9182548522949219, |
|
"logits/rejected": -1.933038353919983, |
|
"logps/chosen": -29.936147689819336, |
|
"logps/rejected": -31.559993743896484, |
|
"loss": 0.4429, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.12277615070343018, |
|
"rewards/margins": 0.12404797971248627, |
|
"rewards/rejected": -0.0012718416983261704, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2.0932279108998323e-06, |
|
"logits/chosen": -1.9737575054168701, |
|
"logits/rejected": -1.9777438640594482, |
|
"logps/chosen": -33.20977783203125, |
|
"logps/rejected": -31.5734920501709, |
|
"loss": 0.3768, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.14538228511810303, |
|
"rewards/margins": 0.16231416165828705, |
|
"rewards/rejected": -0.016931891441345215, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.8712423238279358e-06, |
|
"logits/chosen": -1.9738047122955322, |
|
"logits/rejected": -1.9518985748291016, |
|
"logps/chosen": -33.94301223754883, |
|
"logps/rejected": -35.023075103759766, |
|
"loss": 0.4304, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.1475195735692978, |
|
"rewards/margins": 0.19811663031578064, |
|
"rewards/rejected": -0.05059707164764404, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.6544367689701824e-06, |
|
"logits/chosen": -2.0155911445617676, |
|
"logits/rejected": -2.0122878551483154, |
|
"logps/chosen": -32.76276397705078, |
|
"logps/rejected": -36.25225830078125, |
|
"loss": 0.4155, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.11361332982778549, |
|
"rewards/margins": 0.1327473670244217, |
|
"rewards/rejected": -0.019134048372507095, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.4445974030621963e-06, |
|
"logits/chosen": -1.8830368518829346, |
|
"logits/rejected": -1.880629539489746, |
|
"logps/chosen": -34.01710891723633, |
|
"logps/rejected": -35.53314971923828, |
|
"loss": 0.4219, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.11990444362163544, |
|
"rewards/margins": 0.1398761123418808, |
|
"rewards/rejected": -0.019971664994955063, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.243452991757889e-06, |
|
"logits/chosen": -1.8685131072998047, |
|
"logits/rejected": -1.8660367727279663, |
|
"logps/chosen": -34.2363395690918, |
|
"logps/rejected": -31.768321990966797, |
|
"loss": 0.4918, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.11288031190633774, |
|
"rewards/margins": 0.11077453196048737, |
|
"rewards/rejected": 0.0021057710982859135, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0526606671603523e-06, |
|
"logits/chosen": -1.9722087383270264, |
|
"logits/rejected": -1.961612343788147, |
|
"logps/chosen": -35.04926300048828, |
|
"logps/rejected": -31.826486587524414, |
|
"loss": 0.3417, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.21516695618629456, |
|
"rewards/margins": 0.20501188933849335, |
|
"rewards/rejected": 0.010155050083994865, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.737922755071455e-07, |
|
"logits/chosen": -2.068380832672119, |
|
"logits/rejected": -2.05340313911438, |
|
"logps/chosen": -30.748706817626953, |
|
"logps/rejected": -32.63469314575195, |
|
"loss": 0.4741, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.12279413640499115, |
|
"rewards/margins": 0.11079368740320206, |
|
"rewards/rejected": 0.012000440619885921, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 7.08321427484816e-07, |
|
"logits/chosen": -1.9397351741790771, |
|
"logits/rejected": -1.9371974468231201, |
|
"logps/chosen": -32.603660583496094, |
|
"logps/rejected": -30.863941192626953, |
|
"loss": 0.3371, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.2264125794172287, |
|
"rewards/margins": 0.25940603017807007, |
|
"rewards/rejected": -0.03299345448613167, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.2370686531066895, |
|
"eval_logits/rejected": -2.2322022914886475, |
|
"eval_logps/chosen": -34.03439712524414, |
|
"eval_logps/rejected": -37.53529357910156, |
|
"eval_loss": 0.5842657089233398, |
|
"eval_rewards/accuracies": 0.5278239250183105, |
|
"eval_rewards/chosen": 0.00010742468293756247, |
|
"eval_rewards/margins": 0.013178782537579536, |
|
"eval_rewards/rejected": -0.013071359135210514, |
|
"eval_runtime": 145.5131, |
|
"eval_samples_per_second": 2.357, |
|
"eval_steps_per_second": 0.296, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 101.5, |
|
"learning_rate": 4.84533120650964e-06, |
|
"logits/chosen": -2.0717549324035645, |
|
"logits/rejected": -2.058924913406372, |
|
"logps/chosen": -32.21288299560547, |
|
"logps/rejected": -32.803016662597656, |
|
"loss": 0.2644, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.22085602581501007, |
|
"rewards/margins": 0.3342258632183075, |
|
"rewards/rejected": -0.11336983740329742, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 28.625, |
|
"learning_rate": 4.825108134172131e-06, |
|
"logits/chosen": -1.98604416847229, |
|
"logits/rejected": -1.9773375988006592, |
|
"logps/chosen": -32.002044677734375, |
|
"logps/rejected": -30.32120132446289, |
|
"loss": 0.2537, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.24706146121025085, |
|
"rewards/margins": 0.3029271960258484, |
|
"rewards/rejected": -0.055865705013275146, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 23.125, |
|
"learning_rate": 4.80369052967602e-06, |
|
"logits/chosen": -1.923841118812561, |
|
"logits/rejected": -1.935959815979004, |
|
"logps/chosen": -30.132516860961914, |
|
"logps/rejected": -33.53127670288086, |
|
"loss": 0.3016, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.2427288293838501, |
|
"rewards/margins": 0.3330441117286682, |
|
"rewards/rejected": -0.0903153270483017, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 56.5, |
|
"learning_rate": 4.781089396387968e-06, |
|
"logits/chosen": -1.8918178081512451, |
|
"logits/rejected": -1.8825798034667969, |
|
"logps/chosen": -34.266075134277344, |
|
"logps/rejected": -36.046722412109375, |
|
"loss": 0.283, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.3029957413673401, |
|
"rewards/margins": 0.4359920620918274, |
|
"rewards/rejected": -0.1329963356256485, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 44.0, |
|
"learning_rate": 4.757316345716554e-06, |
|
"logits/chosen": -1.950615644454956, |
|
"logits/rejected": -1.951266884803772, |
|
"logps/chosen": -34.006526947021484, |
|
"logps/rejected": -33.97644805908203, |
|
"loss": 0.2869, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 0.2791282534599304, |
|
"rewards/margins": 0.3683601915836334, |
|
"rewards/rejected": -0.08923190832138062, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 54.25, |
|
"learning_rate": 4.73238359114687e-06, |
|
"logits/chosen": -2.07924222946167, |
|
"logits/rejected": -2.0854721069335938, |
|
"logps/chosen": -31.282617568969727, |
|
"logps/rejected": -32.800941467285156, |
|
"loss": 0.3061, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.2034403532743454, |
|
"rewards/margins": 0.296302855014801, |
|
"rewards/rejected": -0.09286249428987503, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 172.0, |
|
"learning_rate": 4.706303941965804e-06, |
|
"logits/chosen": -2.011671781539917, |
|
"logits/rejected": -2.0111806392669678, |
|
"logps/chosen": -33.01143264770508, |
|
"logps/rejected": -36.115821838378906, |
|
"loss": 0.4197, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.3034539818763733, |
|
"rewards/margins": 0.349683940410614, |
|
"rewards/rejected": -0.04622991383075714, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 32.5, |
|
"learning_rate": 4.679090796681225e-06, |
|
"logits/chosen": -2.047156572341919, |
|
"logits/rejected": -2.04251766204834, |
|
"logps/chosen": -30.326885223388672, |
|
"logps/rejected": -29.325164794921875, |
|
"loss": 0.2942, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.26859989762306213, |
|
"rewards/margins": 0.3464903235435486, |
|
"rewards/rejected": -0.07789044082164764, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 52.25, |
|
"learning_rate": 4.650758136138454e-06, |
|
"logits/chosen": -1.8196346759796143, |
|
"logits/rejected": -1.8260679244995117, |
|
"logps/chosen": -32.0097541809082, |
|
"logps/rejected": -36.38646697998047, |
|
"loss": 0.4763, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.26317089796066284, |
|
"rewards/margins": 0.450895220041275, |
|
"rewards/rejected": -0.1877242624759674, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 55.75, |
|
"learning_rate": 4.621320516337559e-06, |
|
"logits/chosen": -1.9777911901474, |
|
"logits/rejected": -1.9714463949203491, |
|
"logps/chosen": -33.3974609375, |
|
"logps/rejected": -32.44751739501953, |
|
"loss": 0.4001, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.3046652674674988, |
|
"rewards/margins": 0.40171337127685547, |
|
"rewards/rejected": -0.09704810380935669, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"eval_logits/chosen": -2.2401604652404785, |
|
"eval_logits/rejected": -2.235311508178711, |
|
"eval_logps/chosen": -34.04499435424805, |
|
"eval_logps/rejected": -37.51197052001953, |
|
"eval_loss": 0.6349518299102783, |
|
"eval_rewards/accuracies": 0.48380398750305176, |
|
"eval_rewards/chosen": -0.00731015345081687, |
|
"eval_rewards/margins": -0.010565591044723988, |
|
"eval_rewards/rejected": 0.003255437593907118, |
|
"eval_runtime": 145.4219, |
|
"eval_samples_per_second": 2.359, |
|
"eval_steps_per_second": 0.296, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"grad_norm": 32.75, |
|
"learning_rate": 4.590793060955158e-06, |
|
"logits/chosen": -1.9763590097427368, |
|
"logits/rejected": -1.9835665225982666, |
|
"logps/chosen": -28.756824493408203, |
|
"logps/rejected": -29.339588165283203, |
|
"loss": 0.3007, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.2544856667518616, |
|
"rewards/margins": 0.42344123125076294, |
|
"rewards/rejected": -0.16895556449890137, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"grad_norm": 83.0, |
|
"learning_rate": 4.559191453574582e-06, |
|
"logits/chosen": -1.9966684579849243, |
|
"logits/rejected": -1.9955562353134155, |
|
"logps/chosen": -33.83961868286133, |
|
"logps/rejected": -31.041629791259766, |
|
"loss": 0.3581, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.28776243329048157, |
|
"rewards/margins": 0.3567716181278229, |
|
"rewards/rejected": -0.06900922954082489, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"grad_norm": 96.0, |
|
"learning_rate": 4.52653192962838e-06, |
|
"logits/chosen": -1.9937080144882202, |
|
"logits/rejected": -1.9763784408569336, |
|
"logps/chosen": -30.642507553100586, |
|
"logps/rejected": -33.117218017578125, |
|
"loss": 0.5267, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.2342870533466339, |
|
"rewards/margins": 0.36793988943099976, |
|
"rewards/rejected": -0.13365286588668823, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"grad_norm": 43.75, |
|
"learning_rate": 4.492831268057307e-06, |
|
"logits/chosen": -2.0257625579833984, |
|
"logits/rejected": -2.0276401042938232, |
|
"logps/chosen": -36.22648239135742, |
|
"logps/rejected": -35.008148193359375, |
|
"loss": 0.361, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.2701130509376526, |
|
"rewards/margins": 0.37383943796157837, |
|
"rewards/rejected": -0.10372640937566757, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"grad_norm": 24.875, |
|
"learning_rate": 4.458106782690094e-06, |
|
"logits/chosen": -2.1039767265319824, |
|
"logits/rejected": -2.103851079940796, |
|
"logps/chosen": -32.201255798339844, |
|
"logps/rejected": -33.596595764160156, |
|
"loss": 0.3431, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.30246877670288086, |
|
"rewards/margins": 0.3280053436756134, |
|
"rewards/rejected": -0.025536546483635902, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"grad_norm": 73.0, |
|
"learning_rate": 4.422376313348405e-06, |
|
"logits/chosen": -2.0463452339172363, |
|
"logits/rejected": -2.0388851165771484, |
|
"logps/chosen": -32.02504348754883, |
|
"logps/rejected": -36.26923370361328, |
|
"loss": 0.504, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.2666401267051697, |
|
"rewards/margins": 0.41727694869041443, |
|
"rewards/rejected": -0.15063682198524475, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 74.0, |
|
"learning_rate": 4.3856582166815696e-06, |
|
"logits/chosen": -1.9466873407363892, |
|
"logits/rejected": -1.9430792331695557, |
|
"logps/chosen": -33.49795913696289, |
|
"logps/rejected": -33.04600143432617, |
|
"loss": 0.2362, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.3040338456630707, |
|
"rewards/margins": 0.38951820135116577, |
|
"rewards/rejected": -0.08548439294099808, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 74.0, |
|
"learning_rate": 4.347971356735789e-06, |
|
"logits/chosen": -2.0715551376342773, |
|
"logits/rejected": -2.0645508766174316, |
|
"logps/chosen": -30.481578826904297, |
|
"logps/rejected": -32.14142990112305, |
|
"loss": 0.4149, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.242868572473526, |
|
"rewards/margins": 0.3464292287826538, |
|
"rewards/rejected": -0.10356064140796661, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"grad_norm": 97.5, |
|
"learning_rate": 4.309335095262675e-06, |
|
"logits/chosen": -2.018632411956787, |
|
"logits/rejected": -2.020221710205078, |
|
"logps/chosen": -34.90156936645508, |
|
"logps/rejected": -33.80845260620117, |
|
"loss": 0.3635, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.3161340653896332, |
|
"rewards/margins": 0.4329928457736969, |
|
"rewards/rejected": -0.11685874313116074, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"grad_norm": 64.0, |
|
"learning_rate": 4.269769281772082e-06, |
|
"logits/chosen": -1.9072506427764893, |
|
"logits/rejected": -1.9048874378204346, |
|
"logps/chosen": -32.74415588378906, |
|
"logps/rejected": -36.70649337768555, |
|
"loss": 0.3401, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 0.2996928095817566, |
|
"rewards/margins": 0.469214528799057, |
|
"rewards/rejected": -0.16952167451381683, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"eval_logits/chosen": -2.2401528358459473, |
|
"eval_logits/rejected": -2.235276937484741, |
|
"eval_logps/chosen": -34.05385971069336, |
|
"eval_logps/rejected": -37.54425811767578, |
|
"eval_loss": 0.623750627040863, |
|
"eval_rewards/accuracies": 0.5141196250915527, |
|
"eval_rewards/chosen": -0.013515940867364407, |
|
"eval_rewards/margins": 0.005829531699419022, |
|
"eval_rewards/rejected": -0.019345473498106003, |
|
"eval_runtime": 145.1627, |
|
"eval_samples_per_second": 2.363, |
|
"eval_steps_per_second": 0.296, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"grad_norm": 43.75, |
|
"learning_rate": 4.22929424333435e-06, |
|
"logits/chosen": -2.0122382640838623, |
|
"logits/rejected": -2.0169613361358643, |
|
"logps/chosen": -33.01191329956055, |
|
"logps/rejected": -31.77316665649414, |
|
"loss": 0.3867, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.31180983781814575, |
|
"rewards/margins": 0.40075111389160156, |
|
"rewards/rejected": -0.08894126117229462, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"grad_norm": 47.0, |
|
"learning_rate": 4.1879307741372085e-06, |
|
"logits/chosen": -2.045281171798706, |
|
"logits/rejected": -2.0561985969543457, |
|
"logps/chosen": -30.94211196899414, |
|
"logps/rejected": -31.70987892150879, |
|
"loss": 0.4115, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 0.31381234526634216, |
|
"rewards/margins": 0.455346018075943, |
|
"rewards/rejected": -0.14153364300727844, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"grad_norm": 69.0, |
|
"learning_rate": 4.145700124802693e-06, |
|
"logits/chosen": -1.9737331867218018, |
|
"logits/rejected": -1.9702527523040771, |
|
"logps/chosen": -31.990686416625977, |
|
"logps/rejected": -32.61437225341797, |
|
"loss": 0.4131, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.3008785843849182, |
|
"rewards/margins": 0.4769964814186096, |
|
"rewards/rejected": -0.1761178821325302, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 52.75, |
|
"learning_rate": 4.102623991469562e-06, |
|
"logits/chosen": -1.8443644046783447, |
|
"logits/rejected": -1.853546142578125, |
|
"logps/chosen": -32.251625061035156, |
|
"logps/rejected": -32.053138732910156, |
|
"loss": 0.3856, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.22243666648864746, |
|
"rewards/margins": 0.3604408800601959, |
|
"rewards/rejected": -0.13800422847270966, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"grad_norm": 58.75, |
|
"learning_rate": 4.058724504646834e-06, |
|
"logits/chosen": -1.9435665607452393, |
|
"logits/rejected": -1.9371446371078491, |
|
"logps/chosen": -33.34047317504883, |
|
"logps/rejected": -31.121173858642578, |
|
"loss": 0.3465, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.2574843168258667, |
|
"rewards/margins": 0.34836047887802124, |
|
"rewards/rejected": -0.09087616950273514, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"grad_norm": 135.0, |
|
"learning_rate": 4.014024217844167e-06, |
|
"logits/chosen": -2.0361862182617188, |
|
"logits/rejected": -2.0341010093688965, |
|
"logps/chosen": -34.12422561645508, |
|
"logps/rejected": -31.634075164794922, |
|
"loss": 0.6034, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.3128809928894043, |
|
"rewards/margins": 0.40856558084487915, |
|
"rewards/rejected": -0.09568462520837784, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"grad_norm": 52.75, |
|
"learning_rate": 3.968546095984911e-06, |
|
"logits/chosen": -1.8710432052612305, |
|
"logits/rejected": -1.8688808679580688, |
|
"logps/chosen": -32.49556350708008, |
|
"logps/rejected": -31.1130428314209, |
|
"loss": 0.4937, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.21059513092041016, |
|
"rewards/margins": 0.2881118357181549, |
|
"rewards/rejected": -0.07751675695180893, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"grad_norm": 95.5, |
|
"learning_rate": 3.922313503607806e-06, |
|
"logits/chosen": -2.005981922149658, |
|
"logits/rejected": -2.0027260780334473, |
|
"logps/chosen": -30.789072036743164, |
|
"logps/rejected": -34.820518493652344, |
|
"loss": 0.4043, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.22090163826942444, |
|
"rewards/margins": 0.356947124004364, |
|
"rewards/rejected": -0.13604548573493958, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"grad_norm": 20.25, |
|
"learning_rate": 3.875350192863368e-06, |
|
"logits/chosen": -1.948760986328125, |
|
"logits/rejected": -1.9524637460708618, |
|
"logps/chosen": -29.25394630432129, |
|
"logps/rejected": -30.857736587524414, |
|
"loss": 0.3681, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.2527596354484558, |
|
"rewards/margins": 0.3919641375541687, |
|
"rewards/rejected": -0.1392044574022293, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 28.125, |
|
"learning_rate": 3.8276802913111436e-06, |
|
"logits/chosen": -1.9878416061401367, |
|
"logits/rejected": -1.9875671863555908, |
|
"logps/chosen": -31.794857025146484, |
|
"logps/rejected": -31.33709716796875, |
|
"loss": 0.433, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.2986726462841034, |
|
"rewards/margins": 0.3995324373245239, |
|
"rewards/rejected": -0.10085982084274292, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"eval_logits/chosen": -2.2469263076782227, |
|
"eval_logits/rejected": -2.2420639991760254, |
|
"eval_logps/chosen": -34.01606369018555, |
|
"eval_logps/rejected": -37.50114822387695, |
|
"eval_loss": 0.6143299341201782, |
|
"eval_rewards/accuracies": 0.5245016813278198, |
|
"eval_rewards/chosen": 0.012940882705152035, |
|
"eval_rewards/margins": 0.002107805572450161, |
|
"eval_rewards/rejected": 0.010833078064024448, |
|
"eval_runtime": 145.3485, |
|
"eval_samples_per_second": 2.36, |
|
"eval_steps_per_second": 0.296, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"grad_norm": 77.5, |
|
"learning_rate": 3.7793282895240927e-06, |
|
"logits/chosen": -2.05083966255188, |
|
"logits/rejected": -2.0515644550323486, |
|
"logps/chosen": -34.6113395690918, |
|
"logps/rejected": -33.167572021484375, |
|
"loss": 0.3998, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.29316502809524536, |
|
"rewards/margins": 0.3515344560146332, |
|
"rewards/rejected": -0.058369409292936325, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"grad_norm": 91.5, |
|
"learning_rate": 3.730319028506478e-06, |
|
"logits/chosen": -2.01389741897583, |
|
"logits/rejected": -2.0112407207489014, |
|
"logps/chosen": -32.689491271972656, |
|
"logps/rejected": -32.25727081298828, |
|
"loss": 0.3825, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 0.3051331341266632, |
|
"rewards/margins": 0.4362831711769104, |
|
"rewards/rejected": -0.1311500072479248, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"grad_norm": 39.25, |
|
"learning_rate": 3.6806776869317074e-06, |
|
"logits/chosen": -2.028050184249878, |
|
"logits/rejected": -2.018892765045166, |
|
"logps/chosen": -32.39860153198242, |
|
"logps/rejected": -30.971405029296875, |
|
"loss": 0.5713, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.26426073908805847, |
|
"rewards/margins": 0.28948652744293213, |
|
"rewards/rejected": -0.02522580698132515, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"grad_norm": 32.25, |
|
"learning_rate": 3.6304297682067146e-06, |
|
"logits/chosen": -2.0308046340942383, |
|
"logits/rejected": -2.027677059173584, |
|
"logps/chosen": -31.706838607788086, |
|
"logps/rejected": -32.60099792480469, |
|
"loss": 0.2067, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 0.3441086411476135, |
|
"rewards/margins": 0.4712149500846863, |
|
"rewards/rejected": -0.12710630893707275, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"grad_norm": 27.875, |
|
"learning_rate": 3.579601087369492e-06, |
|
"logits/chosen": -2.03548264503479, |
|
"logits/rejected": -2.0378546714782715, |
|
"logps/chosen": -33.151695251464844, |
|
"logps/rejected": -33.8459587097168, |
|
"loss": 0.3123, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 0.3514065742492676, |
|
"rewards/margins": 0.465198814868927, |
|
"rewards/rejected": -0.11379221826791763, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"grad_norm": 47.5, |
|
"learning_rate": 3.5282177578265295e-06, |
|
"logits/chosen": -1.9493051767349243, |
|
"logits/rejected": -1.9498169422149658, |
|
"logps/chosen": -33.52117156982422, |
|
"logps/rejected": -31.683589935302734, |
|
"loss": 0.3393, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.2979589104652405, |
|
"rewards/margins": 0.35139504075050354, |
|
"rewards/rejected": -0.053436122834682465, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"grad_norm": 60.25, |
|
"learning_rate": 3.476306177936961e-06, |
|
"logits/chosen": -1.9889039993286133, |
|
"logits/rejected": -1.9790817499160767, |
|
"logps/chosen": -33.58037567138672, |
|
"logps/rejected": -32.429481506347656, |
|
"loss": 0.2419, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.2946351170539856, |
|
"rewards/margins": 0.37981870770454407, |
|
"rewards/rejected": -0.08518362790346146, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"grad_norm": 58.75, |
|
"learning_rate": 3.423893017450324e-06, |
|
"logits/chosen": -1.8874107599258423, |
|
"logits/rejected": -1.8842309713363647, |
|
"logps/chosen": -30.960819244384766, |
|
"logps/rejected": -34.40943145751953, |
|
"loss": 0.2598, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 0.35009804368019104, |
|
"rewards/margins": 0.41369539499282837, |
|
"rewards/rejected": -0.06359735876321793, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"grad_norm": 114.5, |
|
"learning_rate": 3.3710052038048794e-06, |
|
"logits/chosen": -1.944596290588379, |
|
"logits/rejected": -1.9437839984893799, |
|
"logps/chosen": -34.76592254638672, |
|
"logps/rejected": -35.450462341308594, |
|
"loss": 0.3155, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.3530573844909668, |
|
"rewards/margins": 0.42070499062538147, |
|
"rewards/rejected": -0.06764759868383408, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"grad_norm": 35.25, |
|
"learning_rate": 3.3176699082935546e-06, |
|
"logits/chosen": -1.9258012771606445, |
|
"logits/rejected": -1.928860068321228, |
|
"logps/chosen": -32.108642578125, |
|
"logps/rejected": -35.734214782714844, |
|
"loss": 0.3298, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.3459445536136627, |
|
"rewards/margins": 0.4107227325439453, |
|
"rewards/rejected": -0.06477821618318558, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"eval_logits/chosen": -2.244976043701172, |
|
"eval_logits/rejected": -2.240100622177124, |
|
"eval_logps/chosen": -33.94416046142578, |
|
"eval_logps/rejected": -37.445308685302734, |
|
"eval_loss": 0.579022228717804, |
|
"eval_rewards/accuracies": 0.5195183157920837, |
|
"eval_rewards/chosen": 0.06327207386493683, |
|
"eval_rewards/margins": 0.013353652320802212, |
|
"eval_rewards/rejected": 0.049918416887521744, |
|
"eval_runtime": 145.2212, |
|
"eval_samples_per_second": 2.362, |
|
"eval_steps_per_second": 0.296, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"grad_norm": 43.5, |
|
"learning_rate": 3.2639145321045933e-06, |
|
"logits/chosen": -2.0379347801208496, |
|
"logits/rejected": -2.0407185554504395, |
|
"logps/chosen": -34.414249420166016, |
|
"logps/rejected": -34.340797424316406, |
|
"loss": 0.4057, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.3369380831718445, |
|
"rewards/margins": 0.38638633489608765, |
|
"rewards/rejected": -0.04944825917482376, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"grad_norm": 105.5, |
|
"learning_rate": 3.2097666922441107e-06, |
|
"logits/chosen": -1.8936197757720947, |
|
"logits/rejected": -1.8877111673355103, |
|
"logps/chosen": -34.38829040527344, |
|
"logps/rejected": -32.45317459106445, |
|
"loss": 0.3661, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 0.31145918369293213, |
|
"rewards/margins": 0.3823915123939514, |
|
"rewards/rejected": -0.0709322914481163, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"grad_norm": 34.75, |
|
"learning_rate": 3.1552542073477554e-06, |
|
"logits/chosen": -2.068476438522339, |
|
"logits/rejected": -2.065448045730591, |
|
"logps/chosen": -30.17218017578125, |
|
"logps/rejected": -31.612863540649414, |
|
"loss": 0.3787, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 0.3365642726421356, |
|
"rewards/margins": 0.45183300971984863, |
|
"rewards/rejected": -0.11526870727539062, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 77.0, |
|
"learning_rate": 3.100405083388799e-06, |
|
"logits/chosen": -1.911435842514038, |
|
"logits/rejected": -1.9112478494644165, |
|
"logps/chosen": -32.99172592163086, |
|
"logps/rejected": -37.709041595458984, |
|
"loss": 0.3501, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.3300129473209381, |
|
"rewards/margins": 0.3868759870529175, |
|
"rewards/rejected": -0.05686303228139877, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"grad_norm": 93.0, |
|
"learning_rate": 3.0452474992899645e-06, |
|
"logits/chosen": -1.7933385372161865, |
|
"logits/rejected": -1.7985591888427734, |
|
"logps/chosen": -36.41147994995117, |
|
"logps/rejected": -33.99577713012695, |
|
"loss": 0.3683, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.364685595035553, |
|
"rewards/margins": 0.38109368085861206, |
|
"rewards/rejected": -0.016408095136284828, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"grad_norm": 111.5, |
|
"learning_rate": 2.989809792446417e-06, |
|
"logits/chosen": -2.0044448375701904, |
|
"logits/rejected": -2.0057899951934814, |
|
"logps/chosen": -32.149024963378906, |
|
"logps/rejected": -32.881797790527344, |
|
"loss": 0.3432, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 0.3196606934070587, |
|
"rewards/margins": 0.39146822690963745, |
|
"rewards/rejected": -0.07180755585432053, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 71.0, |
|
"learning_rate": 2.9341204441673267e-06, |
|
"logits/chosen": -1.9735772609710693, |
|
"logits/rejected": -1.9727319478988647, |
|
"logps/chosen": -31.752365112304688, |
|
"logps/rejected": -35.02272033691406, |
|
"loss": 0.3104, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.2962704002857208, |
|
"rewards/margins": 0.29246559739112854, |
|
"rewards/rejected": 0.0038047723937779665, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"grad_norm": 27.0, |
|
"learning_rate": 2.878208065043501e-06, |
|
"logits/chosen": -1.9740774631500244, |
|
"logits/rejected": -1.9732996225357056, |
|
"logps/chosen": -34.172630310058594, |
|
"logps/rejected": -31.525737762451172, |
|
"loss": 0.2392, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 0.33308273553848267, |
|
"rewards/margins": 0.40743428468704224, |
|
"rewards/rejected": -0.07435150444507599, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"grad_norm": 32.0, |
|
"learning_rate": 2.8221013802485974e-06, |
|
"logits/chosen": -2.00411057472229, |
|
"logits/rejected": -2.0026121139526367, |
|
"logps/chosen": -29.090576171875, |
|
"logps/rejected": -32.80150604248047, |
|
"loss": 0.166, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 0.3308223783969879, |
|
"rewards/margins": 0.4464591145515442, |
|
"rewards/rejected": -0.11563672870397568, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 28.625, |
|
"learning_rate": 2.76582921478147e-06, |
|
"logits/chosen": -2.0544497966766357, |
|
"logits/rejected": -2.05083966255188, |
|
"logps/chosen": -31.90934181213379, |
|
"logps/rejected": -34.358177185058594, |
|
"loss": 0.14, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.36250725388526917, |
|
"rewards/margins": 0.4739067554473877, |
|
"rewards/rejected": -0.11139953136444092, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"eval_logits/chosen": -2.247178077697754, |
|
"eval_logits/rejected": -2.2423007488250732, |
|
"eval_logps/chosen": -33.95090103149414, |
|
"eval_logps/rejected": -37.43887710571289, |
|
"eval_loss": 0.5903980731964111, |
|
"eval_rewards/accuracies": 0.5161960124969482, |
|
"eval_rewards/chosen": 0.05855472758412361, |
|
"eval_rewards/margins": 0.0041350084356963634, |
|
"eval_rewards/rejected": 0.05441971868276596, |
|
"eval_runtime": 145.1787, |
|
"eval_samples_per_second": 2.363, |
|
"eval_steps_per_second": 0.296, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"grad_norm": 104.0, |
|
"learning_rate": 2.7094204786572254e-06, |
|
"logits/chosen": -1.8908456563949585, |
|
"logits/rejected": -1.8829033374786377, |
|
"logps/chosen": -33.954002380371094, |
|
"logps/rejected": -34.61323165893555, |
|
"loss": 0.3154, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.3515668511390686, |
|
"rewards/margins": 0.38875049352645874, |
|
"rewards/rejected": -0.03718366473913193, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"grad_norm": 43.75, |
|
"learning_rate": 2.6529041520546072e-06, |
|
"logits/chosen": -1.9656623601913452, |
|
"logits/rejected": -1.976292371749878, |
|
"logps/chosen": -35.201332092285156, |
|
"logps/rejected": -32.56475067138672, |
|
"loss": 0.2191, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.3460533618927002, |
|
"rewards/margins": 0.4393738806247711, |
|
"rewards/rejected": -0.09332051128149033, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"grad_norm": 58.0, |
|
"learning_rate": 2.5963092704273302e-06, |
|
"logits/chosen": -2.0145046710968018, |
|
"logits/rejected": -2.018724203109741, |
|
"logps/chosen": -34.39170455932617, |
|
"logps/rejected": -29.336902618408203, |
|
"loss": 0.196, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 0.357754647731781, |
|
"rewards/margins": 0.4058148264884949, |
|
"rewards/rejected": -0.04806024581193924, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"grad_norm": 28.5, |
|
"learning_rate": 2.53966490958702e-06, |
|
"logits/chosen": -2.012115240097046, |
|
"logits/rejected": -2.0198771953582764, |
|
"logps/chosen": -34.340667724609375, |
|
"logps/rejected": -29.972625732421875, |
|
"loss": 0.375, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.3131526708602905, |
|
"rewards/margins": 0.40060001611709595, |
|
"rewards/rejected": -0.08744733780622482, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"grad_norm": 48.75, |
|
"learning_rate": 2.4830001707654135e-06, |
|
"logits/chosen": -1.942496657371521, |
|
"logits/rejected": -1.932924509048462, |
|
"logps/chosen": -31.318111419677734, |
|
"logps/rejected": -32.194252014160156, |
|
"loss": 0.2437, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.3325595259666443, |
|
"rewards/margins": 0.4588742256164551, |
|
"rewards/rejected": -0.1263146996498108, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"grad_norm": 57.0, |
|
"learning_rate": 2.4263441656635054e-06, |
|
"logits/chosen": -2.0852370262145996, |
|
"logits/rejected": -2.075453281402588, |
|
"logps/chosen": -25.809616088867188, |
|
"logps/rejected": -30.046661376953125, |
|
"loss": 0.2, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.29011884331703186, |
|
"rewards/margins": 0.4121415615081787, |
|
"rewards/rejected": -0.12202271074056625, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"grad_norm": 62.0, |
|
"learning_rate": 2.3697260014953107e-06, |
|
"logits/chosen": -1.9381517171859741, |
|
"logits/rejected": -1.9393552541732788, |
|
"logps/chosen": -33.79442596435547, |
|
"logps/rejected": -30.08365821838379, |
|
"loss": 0.3179, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.37700217962265015, |
|
"rewards/margins": 0.4478107988834381, |
|
"rewards/rejected": -0.07080861181020737, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"grad_norm": 25.25, |
|
"learning_rate": 2.3131747660339396e-06, |
|
"logits/chosen": -1.9556992053985596, |
|
"logits/rejected": -1.9564440250396729, |
|
"logps/chosen": -31.94332504272461, |
|
"logps/rejected": -33.166770935058594, |
|
"loss": 0.2801, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.3121483623981476, |
|
"rewards/margins": 0.3932458460330963, |
|
"rewards/rejected": -0.08109745383262634, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"grad_norm": 42.25, |
|
"learning_rate": 2.256719512667651e-06, |
|
"logits/chosen": -1.858231782913208, |
|
"logits/rejected": -1.856436014175415, |
|
"logps/chosen": -35.18943405151367, |
|
"logps/rejected": -36.062679290771484, |
|
"loss": 0.2634, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 0.3328378200531006, |
|
"rewards/margins": 0.46265101432800293, |
|
"rewards/rejected": -0.12981316447257996, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"grad_norm": 79.0, |
|
"learning_rate": 2.2003892454735786e-06, |
|
"logits/chosen": -2.0039381980895996, |
|
"logits/rejected": -1.9971100091934204, |
|
"logps/chosen": -31.39545249938965, |
|
"logps/rejected": -32.833343505859375, |
|
"loss": 0.2302, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.4250277578830719, |
|
"rewards/margins": 0.47772127389907837, |
|
"rewards/rejected": -0.05269356444478035, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"eval_logits/chosen": -2.2496516704559326, |
|
"eval_logits/rejected": -2.2447705268859863, |
|
"eval_logps/chosen": -33.913028717041016, |
|
"eval_logps/rejected": -37.41090393066406, |
|
"eval_loss": 0.5758183598518372, |
|
"eval_rewards/accuracies": 0.5544019937515259, |
|
"eval_rewards/chosen": 0.08506669104099274, |
|
"eval_rewards/margins": 0.011066065169870853, |
|
"eval_rewards/rejected": 0.07400061935186386, |
|
"eval_runtime": 145.1902, |
|
"eval_samples_per_second": 2.362, |
|
"eval_steps_per_second": 0.296, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"grad_norm": 21.875, |
|
"learning_rate": 2.1442129043167877e-06, |
|
"logits/chosen": -2.0508811473846436, |
|
"logits/rejected": -2.0461061000823975, |
|
"logps/chosen": -33.02284240722656, |
|
"logps/rejected": -34.64262390136719, |
|
"loss": 0.2017, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.3743865489959717, |
|
"rewards/margins": 0.45418882369995117, |
|
"rewards/rejected": -0.0798022598028183, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"grad_norm": 31.875, |
|
"learning_rate": 2.088219349982323e-06, |
|
"logits/chosen": -1.9676059484481812, |
|
"logits/rejected": -1.972545862197876, |
|
"logps/chosen": -34.80748748779297, |
|
"logps/rejected": -32.547122955322266, |
|
"loss": 0.297, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.3787701427936554, |
|
"rewards/margins": 0.41182270646095276, |
|
"rewards/rejected": -0.03305256366729736, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"grad_norm": 45.0, |
|
"learning_rate": 2.0324373493478803e-06, |
|
"logits/chosen": -2.0639491081237793, |
|
"logits/rejected": -2.054579257965088, |
|
"logps/chosen": -31.739765167236328, |
|
"logps/rejected": -34.55535125732422, |
|
"loss": 0.1863, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 0.3816167712211609, |
|
"rewards/margins": 0.4604073166847229, |
|
"rewards/rejected": -0.07879054546356201, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"grad_norm": 63.5, |
|
"learning_rate": 1.976895560604729e-06, |
|
"logits/chosen": -1.9907861948013306, |
|
"logits/rejected": -1.987722635269165, |
|
"logps/chosen": -30.749156951904297, |
|
"logps/rejected": -32.56981658935547, |
|
"loss": 0.2258, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 0.3775084614753723, |
|
"rewards/margins": 0.4506160318851471, |
|
"rewards/rejected": -0.07310755550861359, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"grad_norm": 29.875, |
|
"learning_rate": 1.921622518534466e-06, |
|
"logits/chosen": -1.9317022562026978, |
|
"logits/rejected": -1.9392095804214478, |
|
"logps/chosen": -32.34041976928711, |
|
"logps/rejected": -35.695228576660156, |
|
"loss": 0.2412, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.3714412748813629, |
|
"rewards/margins": 0.47314995527267456, |
|
"rewards/rejected": -0.10170867294073105, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"grad_norm": 114.5, |
|
"learning_rate": 1.8666466198491794e-06, |
|
"logits/chosen": -1.9403190612792969, |
|
"logits/rejected": -1.9334567785263062, |
|
"logps/chosen": -33.3146858215332, |
|
"logps/rejected": -36.74799346923828, |
|
"loss": 0.3338, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.3596806824207306, |
|
"rewards/margins": 0.46411022543907166, |
|
"rewards/rejected": -0.1044294610619545, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"grad_norm": 30.375, |
|
"learning_rate": 1.8119961086025376e-06, |
|
"logits/chosen": -1.9422130584716797, |
|
"logits/rejected": -1.942911148071289, |
|
"logps/chosen": -30.72060203552246, |
|
"logps/rejected": -33.139339447021484, |
|
"loss": 0.2958, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.3610595762729645, |
|
"rewards/margins": 0.4717164933681488, |
|
"rewards/rejected": -0.11065696179866791, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"grad_norm": 49.75, |
|
"learning_rate": 1.7576990616793139e-06, |
|
"logits/chosen": -1.9468282461166382, |
|
"logits/rejected": -1.9580564498901367, |
|
"logps/chosen": -32.50783157348633, |
|
"logps/rejected": -34.23973083496094, |
|
"loss": 0.2541, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.3981999158859253, |
|
"rewards/margins": 0.47416257858276367, |
|
"rewards/rejected": -0.07596264779567719, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"grad_norm": 83.5, |
|
"learning_rate": 1.7037833743707892e-06, |
|
"logits/chosen": -2.0208706855773926, |
|
"logits/rejected": -2.018265962600708, |
|
"logps/chosen": -35.62385940551758, |
|
"logps/rejected": -32.450279235839844, |
|
"loss": 0.3659, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.34803569316864014, |
|
"rewards/margins": 0.38377222418785095, |
|
"rewards/rejected": -0.03573652729392052, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"grad_norm": 24.375, |
|
"learning_rate": 1.6502767460434588e-06, |
|
"logits/chosen": -1.9889123439788818, |
|
"logits/rejected": -1.9926494359970093, |
|
"logps/chosen": -34.00056457519531, |
|
"logps/rejected": -34.569400787353516, |
|
"loss": 0.2296, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 0.32390862703323364, |
|
"rewards/margins": 0.38225558400154114, |
|
"rewards/rejected": -0.05834692716598511, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"eval_logits/chosen": -2.248878240585327, |
|
"eval_logits/rejected": -2.2439959049224854, |
|
"eval_logps/chosen": -33.944374084472656, |
|
"eval_logps/rejected": -37.437828063964844, |
|
"eval_loss": 0.5750337243080139, |
|
"eval_rewards/accuracies": 0.5074750781059265, |
|
"eval_rewards/chosen": 0.06312327831983566, |
|
"eval_rewards/margins": 0.007969892583787441, |
|
"eval_rewards/rejected": 0.05515338107943535, |
|
"eval_runtime": 145.3511, |
|
"eval_samples_per_second": 2.36, |
|
"eval_steps_per_second": 0.296, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"grad_norm": 75.0, |
|
"learning_rate": 1.5972066659083796e-06, |
|
"logits/chosen": -1.9976009130477905, |
|
"logits/rejected": -2.0026943683624268, |
|
"logps/chosen": -32.26424789428711, |
|
"logps/rejected": -32.64023208618164, |
|
"loss": 0.204, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.3753224313259125, |
|
"rewards/margins": 0.45860418677330017, |
|
"rewards/rejected": -0.08328177034854889, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"grad_norm": 58.5, |
|
"learning_rate": 1.5446003988985041e-06, |
|
"logits/chosen": -1.9610779285430908, |
|
"logits/rejected": -1.9637727737426758, |
|
"logps/chosen": -29.9860897064209, |
|
"logps/rejected": -31.74285316467285, |
|
"loss": 0.2786, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.32168638706207275, |
|
"rewards/margins": 0.3522700071334839, |
|
"rewards/rejected": -0.03058364987373352, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"grad_norm": 24.0, |
|
"learning_rate": 1.4924849716612211e-06, |
|
"logits/chosen": -2.0177597999572754, |
|
"logits/rejected": -2.011871814727783, |
|
"logps/chosen": -33.44524383544922, |
|
"logps/rejected": -33.608463287353516, |
|
"loss": 0.1846, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.38070768117904663, |
|
"rewards/margins": 0.44403451681137085, |
|
"rewards/rejected": -0.06332685798406601, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"grad_norm": 46.25, |
|
"learning_rate": 1.440887158673332e-06, |
|
"logits/chosen": -1.945654273033142, |
|
"logits/rejected": -1.9493560791015625, |
|
"logps/chosen": -35.2274284362793, |
|
"logps/rejected": -34.58420181274414, |
|
"loss": 0.2056, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 0.36841046810150146, |
|
"rewards/margins": 0.483646959066391, |
|
"rewards/rejected": -0.11523648351430893, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"grad_norm": 17.375, |
|
"learning_rate": 1.3898334684855647e-06, |
|
"logits/chosen": -1.9452226161956787, |
|
"logits/rejected": -1.958367109298706, |
|
"logps/chosen": -31.376358032226562, |
|
"logps/rejected": -33.11796951293945, |
|
"loss": 0.2082, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.34057068824768066, |
|
"rewards/margins": 0.40912023186683655, |
|
"rewards/rejected": -0.06854955106973648, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"grad_norm": 49.5, |
|
"learning_rate": 1.3393501301037245e-06, |
|
"logits/chosen": -1.9365898370742798, |
|
"logits/rejected": -1.9300063848495483, |
|
"logps/chosen": -31.5865478515625, |
|
"logps/rejected": -33.410743713378906, |
|
"loss": 0.1983, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.4009482264518738, |
|
"rewards/margins": 0.46589574217796326, |
|
"rewards/rejected": -0.06494755297899246, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"grad_norm": 30.125, |
|
"learning_rate": 1.2894630795134454e-06, |
|
"logits/chosen": -2.062471866607666, |
|
"logits/rejected": -2.063298225402832, |
|
"logps/chosen": -33.060279846191406, |
|
"logps/rejected": -33.027076721191406, |
|
"loss": 0.1608, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.3407577872276306, |
|
"rewards/margins": 0.46842724084854126, |
|
"rewards/rejected": -0.12766942381858826, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"grad_norm": 19.125, |
|
"learning_rate": 1.2401979463554984e-06, |
|
"logits/chosen": -1.9935426712036133, |
|
"logits/rejected": -1.9923900365829468, |
|
"logps/chosen": -33.55720901489258, |
|
"logps/rejected": -33.266090393066406, |
|
"loss": 0.2082, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.372835636138916, |
|
"rewards/margins": 0.42481860518455505, |
|
"rewards/rejected": -0.051982950419187546, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"grad_norm": 37.0, |
|
"learning_rate": 1.1915800407584705e-06, |
|
"logits/chosen": -2.021810531616211, |
|
"logits/rejected": -2.01427960395813, |
|
"logps/chosen": -33.71491622924805, |
|
"logps/rejected": -31.135467529296875, |
|
"loss": 0.1958, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.3576733469963074, |
|
"rewards/margins": 0.42342647910118103, |
|
"rewards/rejected": -0.06575315445661545, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"grad_norm": 67.5, |
|
"learning_rate": 1.1436343403356019e-06, |
|
"logits/chosen": -1.9883337020874023, |
|
"logits/rejected": -1.9875253438949585, |
|
"logps/chosen": -34.86940002441406, |
|
"logps/rejected": -36.43136978149414, |
|
"loss": 0.2798, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.3843076229095459, |
|
"rewards/margins": 0.4393698275089264, |
|
"rewards/rejected": -0.055062174797058105, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"eval_logits/chosen": -2.2467923164367676, |
|
"eval_logits/rejected": -2.2419159412384033, |
|
"eval_logps/chosen": -33.93034744262695, |
|
"eval_logps/rejected": -37.438724517822266, |
|
"eval_loss": 0.5483447909355164, |
|
"eval_rewards/accuracies": 0.5427741408348083, |
|
"eval_rewards/chosen": 0.07294297218322754, |
|
"eval_rewards/margins": 0.018412673845887184, |
|
"eval_rewards/rejected": 0.05453029274940491, |
|
"eval_runtime": 145.1205, |
|
"eval_samples_per_second": 2.364, |
|
"eval_steps_per_second": 0.296, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"grad_norm": 54.25, |
|
"learning_rate": 1.0963854773524548e-06, |
|
"logits/chosen": -2.0355682373046875, |
|
"logits/rejected": -2.0409066677093506, |
|
"logps/chosen": -34.98146057128906, |
|
"logps/rejected": -35.63863754272461, |
|
"loss": 0.2279, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 0.3399600386619568, |
|
"rewards/margins": 0.40649813413619995, |
|
"rewards/rejected": -0.06653807312250137, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"grad_norm": 71.5, |
|
"learning_rate": 1.049857726072005e-06, |
|
"logits/chosen": -2.0084500312805176, |
|
"logits/rejected": -2.006545066833496, |
|
"logps/chosen": -32.13376235961914, |
|
"logps/rejected": -33.14947509765625, |
|
"loss": 0.2249, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.33771127462387085, |
|
"rewards/margins": 0.45441800355911255, |
|
"rewards/rejected": -0.11670677363872528, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"grad_norm": 17.875, |
|
"learning_rate": 1.0040749902836508e-06, |
|
"logits/chosen": -1.9112991094589233, |
|
"logits/rejected": -1.9141557216644287, |
|
"logps/chosen": -29.258800506591797, |
|
"logps/rejected": -30.369312286376953, |
|
"loss": 0.2421, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.3428861200809479, |
|
"rewards/margins": 0.429875910282135, |
|
"rewards/rejected": -0.08698974549770355, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"grad_norm": 42.25, |
|
"learning_rate": 9.59060791022566e-07, |
|
"logits/chosen": -2.0088257789611816, |
|
"logits/rejected": -2.005708694458008, |
|
"logps/chosen": -32.668514251708984, |
|
"logps/rejected": -32.6502799987793, |
|
"loss": 0.2007, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.37466686964035034, |
|
"rewards/margins": 0.461048424243927, |
|
"rewards/rejected": -0.08638156205415726, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"grad_norm": 29.25, |
|
"learning_rate": 9.148382544856885e-07, |
|
"logits/chosen": -1.930190086364746, |
|
"logits/rejected": -1.9238803386688232, |
|
"logps/chosen": -27.67626953125, |
|
"logps/rejected": -32.34447479248047, |
|
"loss": 0.1585, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 0.32320261001586914, |
|
"rewards/margins": 0.4605270028114319, |
|
"rewards/rejected": -0.13732439279556274, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"grad_norm": 35.75, |
|
"learning_rate": 8.714301001505568e-07, |
|
"logits/chosen": -2.04903507232666, |
|
"logits/rejected": -2.0465400218963623, |
|
"logps/chosen": -32.619815826416016, |
|
"logps/rejected": -35.1733283996582, |
|
"loss": 0.1505, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.3333774507045746, |
|
"rewards/margins": 0.39063215255737305, |
|
"rewards/rejected": -0.05725465342402458, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"grad_norm": 15.625, |
|
"learning_rate": 8.288586291031025e-07, |
|
"logits/chosen": -2.0296478271484375, |
|
"logits/rejected": -2.0277647972106934, |
|
"logps/chosen": -31.087743759155273, |
|
"logps/rejected": -32.51514434814453, |
|
"loss": 0.0912, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 0.36866381764411926, |
|
"rewards/margins": 0.4744475483894348, |
|
"rewards/rejected": -0.10578374564647675, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"grad_norm": 35.75, |
|
"learning_rate": 7.871457125803897e-07, |
|
"logits/chosen": -2.0225229263305664, |
|
"logits/rejected": -2.0101773738861084, |
|
"logps/chosen": -35.537513732910156, |
|
"logps/rejected": -34.152286529541016, |
|
"loss": 0.183, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 0.35545071959495544, |
|
"rewards/margins": 0.4120173454284668, |
|
"rewards/rejected": -0.056566618382930756, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"grad_norm": 30.5, |
|
"learning_rate": 7.463127807341966e-07, |
|
"logits/chosen": -1.9124174118041992, |
|
"logits/rejected": -1.907052755355835, |
|
"logps/chosen": -34.80583953857422, |
|
"logps/rejected": -34.315452575683594, |
|
"loss": 0.1388, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.3806923031806946, |
|
"rewards/margins": 0.43503332138061523, |
|
"rewards/rejected": -0.05434093996882439, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"grad_norm": 30.75, |
|
"learning_rate": 7.063808116212021e-07, |
|
"logits/chosen": -1.9541196823120117, |
|
"logits/rejected": -1.9571189880371094, |
|
"logps/chosen": -32.08814239501953, |
|
"logps/rejected": -31.591243743896484, |
|
"loss": 0.1195, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.3688638210296631, |
|
"rewards/margins": 0.4424096941947937, |
|
"rewards/rejected": -0.07354583591222763, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"eval_logits/chosen": -2.247305393218994, |
|
"eval_logits/rejected": -2.242413282394409, |
|
"eval_logps/chosen": -33.93861770629883, |
|
"eval_logps/rejected": -37.42906188964844, |
|
"eval_loss": 0.5758681297302246, |
|
"eval_rewards/accuracies": 0.5137043595314026, |
|
"eval_rewards/chosen": 0.06715311110019684, |
|
"eval_rewards/margins": 0.005864266771823168, |
|
"eval_rewards/rejected": 0.06128884479403496, |
|
"eval_runtime": 145.3379, |
|
"eval_samples_per_second": 2.36, |
|
"eval_steps_per_second": 0.296, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"grad_norm": 32.75, |
|
"learning_rate": 6.673703204254348e-07, |
|
"logits/chosen": -1.986507773399353, |
|
"logits/rejected": -1.9819329977035522, |
|
"logps/chosen": -30.773900985717773, |
|
"logps/rejected": -31.090429306030273, |
|
"loss": 0.117, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.349838525056839, |
|
"rewards/margins": 0.4774731695652008, |
|
"rewards/rejected": -0.12763459980487823, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"grad_norm": 33.75, |
|
"learning_rate": 6.293013489185315e-07, |
|
"logits/chosen": -1.9981073141098022, |
|
"logits/rejected": -1.9925674200057983, |
|
"logps/chosen": -34.77349090576172, |
|
"logps/rejected": -34.63701248168945, |
|
"loss": 0.1218, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 0.35031452775001526, |
|
"rewards/margins": 0.3973073959350586, |
|
"rewards/rejected": -0.046992845833301544, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"grad_norm": 172.0, |
|
"learning_rate": 5.921934551632086e-07, |
|
"logits/chosen": -1.9999059438705444, |
|
"logits/rejected": -1.9867637157440186, |
|
"logps/chosen": -32.57126998901367, |
|
"logps/rejected": -34.21943283081055, |
|
"loss": 0.1846, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.3659234941005707, |
|
"rewards/margins": 0.4356323182582855, |
|
"rewards/rejected": -0.06970882415771484, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"grad_norm": 36.25, |
|
"learning_rate": 5.560657034652405e-07, |
|
"logits/chosen": -2.0442328453063965, |
|
"logits/rejected": -2.041350841522217, |
|
"logps/chosen": -34.520362854003906, |
|
"logps/rejected": -31.955829620361328, |
|
"loss": 0.1368, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.3587776720523834, |
|
"rewards/margins": 0.4399491250514984, |
|
"rewards/rejected": -0.08117148280143738, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"grad_norm": 95.5, |
|
"learning_rate": 5.2093665457911e-07, |
|
"logits/chosen": -1.97548508644104, |
|
"logits/rejected": -1.9726585149765015, |
|
"logps/chosen": -33.8781623840332, |
|
"logps/rejected": -34.95212173461914, |
|
"loss": 0.1618, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.3625948429107666, |
|
"rewards/margins": 0.47109127044677734, |
|
"rewards/rejected": -0.10849642753601074, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"grad_norm": 26.25, |
|
"learning_rate": 4.868243561723535e-07, |
|
"logits/chosen": -2.038349151611328, |
|
"logits/rejected": -2.033210277557373, |
|
"logps/chosen": -30.266530990600586, |
|
"logps/rejected": -32.47600555419922, |
|
"loss": 0.1155, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.3459872007369995, |
|
"rewards/margins": 0.4114041328430176, |
|
"rewards/rejected": -0.06541696935892105, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"grad_norm": 17.75, |
|
"learning_rate": 4.537463335535161e-07, |
|
"logits/chosen": -2.0647287368774414, |
|
"logits/rejected": -2.069408416748047, |
|
"logps/chosen": -32.192989349365234, |
|
"logps/rejected": -31.8017578125, |
|
"loss": 0.126, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.3626050055027008, |
|
"rewards/margins": 0.446880578994751, |
|
"rewards/rejected": -0.08427558094263077, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"grad_norm": 42.0, |
|
"learning_rate": 4.217195806684629e-07, |
|
"logits/chosen": -1.9595832824707031, |
|
"logits/rejected": -1.966700792312622, |
|
"logps/chosen": -34.40987777709961, |
|
"logps/rejected": -32.77952575683594, |
|
"loss": 0.1119, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 0.3481543958187103, |
|
"rewards/margins": 0.4750334620475769, |
|
"rewards/rejected": -0.126879021525383, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"grad_norm": 30.375, |
|
"learning_rate": 3.907605513696808e-07, |
|
"logits/chosen": -1.8622219562530518, |
|
"logits/rejected": -1.8643817901611328, |
|
"logps/chosen": -32.59587860107422, |
|
"logps/rejected": -36.642635345458984, |
|
"loss": 0.1373, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 0.3752126097679138, |
|
"rewards/margins": 0.4370684027671814, |
|
"rewards/rejected": -0.06185578554868698, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"grad_norm": 89.5, |
|
"learning_rate": 3.6088515096305675e-07, |
|
"logits/chosen": -1.9182116985321045, |
|
"logits/rejected": -1.9216159582138062, |
|
"logps/chosen": -32.71792221069336, |
|
"logps/rejected": -32.585205078125, |
|
"loss": 0.1371, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.3491230309009552, |
|
"rewards/margins": 0.4338512420654297, |
|
"rewards/rejected": -0.08472825586795807, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"eval_logits/chosen": -2.2483434677124023, |
|
"eval_logits/rejected": -2.243445634841919, |
|
"eval_logps/chosen": -33.929874420166016, |
|
"eval_logps/rejected": -37.43461990356445, |
|
"eval_loss": 0.5592050552368164, |
|
"eval_rewards/accuracies": 0.5494186282157898, |
|
"eval_rewards/chosen": 0.0732741430401802, |
|
"eval_rewards/margins": 0.01587507873773575, |
|
"eval_rewards/rejected": 0.05739906057715416, |
|
"eval_runtime": 145.2128, |
|
"eval_samples_per_second": 2.362, |
|
"eval_steps_per_second": 0.296, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"grad_norm": 22.125, |
|
"learning_rate": 3.321087280364757e-07, |
|
"logits/chosen": -2.0270915031433105, |
|
"logits/rejected": -2.006920337677002, |
|
"logps/chosen": -30.607580184936523, |
|
"logps/rejected": -35.26466369628906, |
|
"loss": 0.14, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.35407310724258423, |
|
"rewards/margins": 0.42285603284835815, |
|
"rewards/rejected": -0.0687829852104187, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"grad_norm": 22.875, |
|
"learning_rate": 3.044460665744284e-07, |
|
"logits/chosen": -2.006091594696045, |
|
"logits/rejected": -2.0118560791015625, |
|
"logps/chosen": -32.49515914916992, |
|
"logps/rejected": -32.58808517456055, |
|
"loss": 0.1516, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.37704116106033325, |
|
"rewards/margins": 0.4451761245727539, |
|
"rewards/rejected": -0.06813497841358185, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"grad_norm": 58.25, |
|
"learning_rate": 2.779113783626916e-07, |
|
"logits/chosen": -1.9745457172393799, |
|
"logits/rejected": -1.969630479812622, |
|
"logps/chosen": -32.551475524902344, |
|
"logps/rejected": -34.6931266784668, |
|
"loss": 0.1414, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.3297252357006073, |
|
"rewards/margins": 0.4143126606941223, |
|
"rewards/rejected": -0.08458739519119263, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"grad_norm": 38.5, |
|
"learning_rate": 2.5251829568697204e-07, |
|
"logits/chosen": -1.840356469154358, |
|
"logits/rejected": -1.8500515222549438, |
|
"logps/chosen": -33.240806579589844, |
|
"logps/rejected": -31.120624542236328, |
|
"loss": 0.1088, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 0.391147643327713, |
|
"rewards/margins": 0.4517039358615875, |
|
"rewards/rejected": -0.0605563148856163, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"grad_norm": 25.75, |
|
"learning_rate": 2.2827986432927774e-07, |
|
"logits/chosen": -1.9017702341079712, |
|
"logits/rejected": -1.906445860862732, |
|
"logps/chosen": -33.377037048339844, |
|
"logps/rejected": -33.292083740234375, |
|
"loss": 0.0844, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 0.4017297625541687, |
|
"rewards/margins": 0.4628194272518158, |
|
"rewards/rejected": -0.06108973175287247, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"grad_norm": 70.5, |
|
"learning_rate": 2.0520853686560177e-07, |
|
"logits/chosen": -1.9277992248535156, |
|
"logits/rejected": -1.9333375692367554, |
|
"logps/chosen": -33.33270263671875, |
|
"logps/rejected": -34.68195343017578, |
|
"loss": 0.13, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.3492715060710907, |
|
"rewards/margins": 0.43249478936195374, |
|
"rewards/rejected": -0.08322323858737946, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"grad_norm": 45.0, |
|
"learning_rate": 1.833161662683672e-07, |
|
"logits/chosen": -2.017637252807617, |
|
"logits/rejected": -2.0131232738494873, |
|
"logps/chosen": -32.15598678588867, |
|
"logps/rejected": -30.987085342407227, |
|
"loss": 0.1109, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.335787296295166, |
|
"rewards/margins": 0.38841742277145386, |
|
"rewards/rejected": -0.052630096673965454, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"grad_norm": 23.75, |
|
"learning_rate": 1.626139998169246e-07, |
|
"logits/chosen": -1.9399824142456055, |
|
"logits/rejected": -1.9417117834091187, |
|
"logps/chosen": -29.205978393554688, |
|
"logps/rejected": -30.845836639404297, |
|
"loss": 0.1135, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.3173268437385559, |
|
"rewards/margins": 0.3884861469268799, |
|
"rewards/rejected": -0.07115931063890457, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"grad_norm": 36.5, |
|
"learning_rate": 1.4311267331922535e-07, |
|
"logits/chosen": -1.9257915019989014, |
|
"logits/rejected": -1.9303070306777954, |
|
"logps/chosen": -31.455486297607422, |
|
"logps/rejected": -33.11508560180664, |
|
"loss": 0.0911, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 0.39480385184288025, |
|
"rewards/margins": 0.4837164878845215, |
|
"rewards/rejected": -0.08891263604164124, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"grad_norm": 60.0, |
|
"learning_rate": 1.2482220564763669e-07, |
|
"logits/chosen": -1.99787175655365, |
|
"logits/rejected": -2.0011351108551025, |
|
"logps/chosen": -34.36697006225586, |
|
"logps/rejected": -33.57072448730469, |
|
"loss": 0.0993, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 0.3945205807685852, |
|
"rewards/margins": 0.4975336194038391, |
|
"rewards/rejected": -0.10301297903060913, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"eval_logits/chosen": -2.247082471847534, |
|
"eval_logits/rejected": -2.242189884185791, |
|
"eval_logps/chosen": -33.95657730102539, |
|
"eval_logps/rejected": -37.431129455566406, |
|
"eval_loss": 0.6129570007324219, |
|
"eval_rewards/accuracies": 0.4871262311935425, |
|
"eval_rewards/chosen": 0.05458100512623787, |
|
"eval_rewards/margins": -0.005263464525341988, |
|
"eval_rewards/rejected": 0.05984446778893471, |
|
"eval_runtime": 145.3553, |
|
"eval_samples_per_second": 2.36, |
|
"eval_steps_per_second": 0.296, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"grad_norm": 64.5, |
|
"learning_rate": 1.0775199359171346e-07, |
|
"logits/chosen": -2.0669209957122803, |
|
"logits/rejected": -2.0600974559783936, |
|
"logps/chosen": -33.28879928588867, |
|
"logps/rejected": -34.330623626708984, |
|
"loss": 0.14, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.3596227765083313, |
|
"rewards/margins": 0.4198623299598694, |
|
"rewards/rejected": -0.06023954600095749, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"grad_norm": 25.625, |
|
"learning_rate": 9.191080703056604e-08, |
|
"logits/chosen": -1.9519445896148682, |
|
"logits/rejected": -1.9626754522323608, |
|
"logps/chosen": -33.65924072265625, |
|
"logps/rejected": -33.730491638183594, |
|
"loss": 0.0849, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": 0.34304356575012207, |
|
"rewards/margins": 0.4406338632106781, |
|
"rewards/rejected": -0.09759029000997543, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"grad_norm": 58.0, |
|
"learning_rate": 7.730678442730539e-08, |
|
"logits/chosen": -2.0385050773620605, |
|
"logits/rejected": -2.0503756999969482, |
|
"logps/chosen": -34.54597473144531, |
|
"logps/rejected": -33.77598571777344, |
|
"loss": 0.1741, |
|
"rewards/accuracies": 0.9125000238418579, |
|
"rewards/chosen": 0.36038076877593994, |
|
"rewards/margins": 0.4567357897758484, |
|
"rewards/rejected": -0.09635499119758606, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"grad_norm": 18.875, |
|
"learning_rate": 6.394742864787806e-08, |
|
"logits/chosen": -2.0281505584716797, |
|
"logits/rejected": -2.030089855194092, |
|
"logps/chosen": -32.70575714111328, |
|
"logps/rejected": -33.9802360534668, |
|
"loss": 0.1205, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 0.35183992981910706, |
|
"rewards/margins": 0.4360295832157135, |
|
"rewards/rejected": -0.08418963849544525, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"grad_norm": 20.5, |
|
"learning_rate": 5.183960310644748e-08, |
|
"logits/chosen": -2.0094611644744873, |
|
"logits/rejected": -2.0022339820861816, |
|
"logps/chosen": -35.01443099975586, |
|
"logps/rejected": -33.88014221191406, |
|
"loss": 0.1175, |
|
"rewards/accuracies": 0.9624999761581421, |
|
"rewards/chosen": 0.38772544264793396, |
|
"rewards/margins": 0.464539110660553, |
|
"rewards/rejected": -0.07681362330913544, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"grad_norm": 25.625, |
|
"learning_rate": 4.098952823928693e-08, |
|
"logits/chosen": -1.9764325618743896, |
|
"logits/rejected": -1.9825315475463867, |
|
"logps/chosen": -30.76479148864746, |
|
"logps/rejected": -33.833831787109375, |
|
"loss": 0.1383, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.31386876106262207, |
|
"rewards/margins": 0.4285518229007721, |
|
"rewards/rejected": -0.11468305438756943, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"grad_norm": 21.125, |
|
"learning_rate": 3.1402778309014284e-08, |
|
"logits/chosen": -1.923027753829956, |
|
"logits/rejected": -1.9207102060317993, |
|
"logps/chosen": -30.280017852783203, |
|
"logps/rejected": -30.681386947631836, |
|
"loss": 0.1347, |
|
"rewards/accuracies": 0.925000011920929, |
|
"rewards/chosen": 0.3958771824836731, |
|
"rewards/margins": 0.4568881392478943, |
|
"rewards/rejected": -0.061010997742414474, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"grad_norm": 18.25, |
|
"learning_rate": 2.3084278540791427e-08, |
|
"logits/chosen": -2.0964748859405518, |
|
"logits/rejected": -2.090756893157959, |
|
"logps/chosen": -34.767372131347656, |
|
"logps/rejected": -32.20579528808594, |
|
"loss": 0.1015, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": 0.3625258803367615, |
|
"rewards/margins": 0.4668874740600586, |
|
"rewards/rejected": -0.1043616384267807, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"grad_norm": 21.375, |
|
"learning_rate": 1.6038302591975807e-08, |
|
"logits/chosen": -1.9879157543182373, |
|
"logits/rejected": -1.9899780750274658, |
|
"logps/chosen": -27.98676109313965, |
|
"logps/rejected": -28.119998931884766, |
|
"loss": 0.1002, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.34273290634155273, |
|
"rewards/margins": 0.41113558411598206, |
|
"rewards/rejected": -0.06840268522500992, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"grad_norm": 59.75, |
|
"learning_rate": 1.0268470356514237e-08, |
|
"logits/chosen": -1.9974746704101562, |
|
"logits/rejected": -1.9917857646942139, |
|
"logps/chosen": -32.35822677612305, |
|
"logps/rejected": -32.09123611450195, |
|
"loss": 0.18, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.3097951412200928, |
|
"rewards/margins": 0.3953918516635895, |
|
"rewards/rejected": -0.0855967253446579, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"eval_logits/chosen": -2.2471930980682373, |
|
"eval_logits/rejected": -2.242299795150757, |
|
"eval_logps/chosen": -33.923362731933594, |
|
"eval_logps/rejected": -37.430606842041016, |
|
"eval_loss": 0.556613028049469, |
|
"eval_rewards/accuracies": 0.5049834251403809, |
|
"eval_rewards/chosen": 0.07783377170562744, |
|
"eval_rewards/margins": 0.01762736216187477, |
|
"eval_rewards/rejected": 0.06020640954375267, |
|
"eval_runtime": 145.2496, |
|
"eval_samples_per_second": 2.361, |
|
"eval_steps_per_second": 0.296, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"grad_norm": 31.25, |
|
"learning_rate": 5.777746105209147e-09, |
|
"logits/chosen": -1.9237276315689087, |
|
"logits/rejected": -1.9281084537506104, |
|
"logps/chosen": -33.68144989013672, |
|
"logps/rejected": -34.5217399597168, |
|
"loss": 0.1599, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.34268397092819214, |
|
"rewards/margins": 0.4206327497959137, |
|
"rewards/rejected": -0.07794882357120514, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"grad_norm": 44.25, |
|
"learning_rate": 2.5684369628148352e-09, |
|
"logits/chosen": -1.9838800430297852, |
|
"logits/rejected": -1.9821369647979736, |
|
"logps/chosen": -30.222864151000977, |
|
"logps/rejected": -32.720550537109375, |
|
"loss": 0.1456, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.3579188287258148, |
|
"rewards/margins": 0.44949913024902344, |
|
"rewards/rejected": -0.09158027172088623, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"grad_norm": 40.75, |
|
"learning_rate": 6.421917227455999e-10, |
|
"logits/chosen": -2.0813286304473877, |
|
"logits/rejected": -2.0737102031707764, |
|
"logps/chosen": -27.602453231811523, |
|
"logps/rejected": -28.810745239257812, |
|
"loss": 0.091, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": 0.32234111428260803, |
|
"rewards/margins": 0.41973695158958435, |
|
"rewards/rejected": -0.09739579260349274, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 19.0, |
|
"learning_rate": 0.0, |
|
"logits/chosen": -1.979434609413147, |
|
"logits/rejected": -1.9692825078964233, |
|
"logps/chosen": -32.49375534057617, |
|
"logps/rejected": -35.5590705871582, |
|
"loss": 0.1005, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": 0.40070921182632446, |
|
"rewards/margins": 0.5042387247085571, |
|
"rewards/rejected": -0.10352955758571625, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 1540, |
|
"total_flos": 0.0, |
|
"train_loss": 0.20226844080856868, |
|
"train_runtime": 10768.956, |
|
"train_samples_per_second": 1.144, |
|
"train_steps_per_second": 0.143 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1540, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|