|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 385, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 55.25, |
|
"learning_rate": 1.282051282051282e-07, |
|
"logits/chosen": -2.7358343601226807, |
|
"logits/rejected": -2.7480404376983643, |
|
"logps/chosen": -27.35565757751465, |
|
"logps/rejected": -21.06114387512207, |
|
"loss": 1.0, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 94.0, |
|
"learning_rate": 1.282051282051282e-06, |
|
"logits/chosen": -3.009542942047119, |
|
"logits/rejected": -2.998302698135376, |
|
"logps/chosen": -33.17807388305664, |
|
"logps/rejected": -31.984642028808594, |
|
"loss": 0.9835, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.002009392250329256, |
|
"rewards/margins": 0.016525447368621826, |
|
"rewards/rejected": -0.014516051858663559, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 64.5, |
|
"learning_rate": 2.564102564102564e-06, |
|
"logits/chosen": -2.899103879928589, |
|
"logits/rejected": -2.8936753273010254, |
|
"logps/chosen": -32.4957160949707, |
|
"logps/rejected": -28.984477996826172, |
|
"loss": 1.0126, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": -0.014431963674724102, |
|
"rewards/margins": -0.012606342323124409, |
|
"rewards/rejected": -0.0018256217008456588, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 62.0, |
|
"learning_rate": 3.846153846153847e-06, |
|
"logits/chosen": -3.0970749855041504, |
|
"logits/rejected": -3.1094558238983154, |
|
"logps/chosen": -32.78588104248047, |
|
"logps/rejected": -30.1519775390625, |
|
"loss": 0.9449, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.08596263825893402, |
|
"rewards/margins": 0.05510985851287842, |
|
"rewards/rejected": 0.030852774158120155, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 50.5, |
|
"learning_rate": 4.999896948438434e-06, |
|
"logits/chosen": -2.862823486328125, |
|
"logits/rejected": -2.853950023651123, |
|
"logps/chosen": -31.538238525390625, |
|
"logps/rejected": -32.37445068359375, |
|
"loss": 0.8574, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.20445938408374786, |
|
"rewards/margins": 0.21635785698890686, |
|
"rewards/rejected": -0.011898472905158997, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 39.0, |
|
"learning_rate": 4.987541037542187e-06, |
|
"logits/chosen": -2.881490707397461, |
|
"logits/rejected": -2.8795459270477295, |
|
"logps/chosen": -29.380319595336914, |
|
"logps/rejected": -30.1256046295166, |
|
"loss": 0.8579, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.2518887519836426, |
|
"rewards/margins": 0.2775513231754303, |
|
"rewards/rejected": -0.025662561878561974, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 55.75, |
|
"learning_rate": 4.954691471941119e-06, |
|
"logits/chosen": -2.9107794761657715, |
|
"logits/rejected": -2.912201166152954, |
|
"logps/chosen": -29.885101318359375, |
|
"logps/rejected": -28.084728240966797, |
|
"loss": 0.804, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.17126679420471191, |
|
"rewards/margins": 0.26159828901290894, |
|
"rewards/rejected": -0.0903315395116806, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 72.0, |
|
"learning_rate": 4.901618883413549e-06, |
|
"logits/chosen": -2.9932219982147217, |
|
"logits/rejected": -2.999392032623291, |
|
"logps/chosen": -29.26175308227539, |
|
"logps/rejected": -31.045358657836914, |
|
"loss": 0.8981, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.013451325707137585, |
|
"rewards/margins": 0.12094201892614365, |
|
"rewards/rejected": -0.10749070346355438, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 61.75, |
|
"learning_rate": 4.828760511501322e-06, |
|
"logits/chosen": -2.808314561843872, |
|
"logits/rejected": -2.8239831924438477, |
|
"logps/chosen": -29.382043838500977, |
|
"logps/rejected": -29.915264129638672, |
|
"loss": 0.8143, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.11108281463384628, |
|
"rewards/margins": 0.27977070212364197, |
|
"rewards/rejected": -0.1686878651380539, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 48.75, |
|
"learning_rate": 4.7367166013034295e-06, |
|
"logits/chosen": -2.898421049118042, |
|
"logits/rejected": -2.8805272579193115, |
|
"logps/chosen": -32.720703125, |
|
"logps/rejected": -30.371923446655273, |
|
"loss": 0.8446, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.08101295679807663, |
|
"rewards/margins": 0.3262065052986145, |
|
"rewards/rejected": -0.2451934814453125, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 48.25, |
|
"learning_rate": 4.626245458345211e-06, |
|
"logits/chosen": -3.005709409713745, |
|
"logits/rejected": -3.0060715675354004, |
|
"logps/chosen": -31.795307159423828, |
|
"logps/rejected": -30.9265193939209, |
|
"loss": 0.8179, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.18309839069843292, |
|
"rewards/margins": 0.26034125685691833, |
|
"rewards/rejected": -0.07724283635616302, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.813856840133667, |
|
"eval_logits/rejected": -2.8111777305603027, |
|
"eval_logps/chosen": -31.290462493896484, |
|
"eval_logps/rejected": -34.86981201171875, |
|
"eval_loss": 0.8994626402854919, |
|
"eval_rewards/accuracies": 0.610049843788147, |
|
"eval_rewards/chosen": -0.005606824532151222, |
|
"eval_rewards/margins": 0.11455333232879639, |
|
"eval_rewards/rejected": -0.12016014754772186, |
|
"eval_runtime": 113.4039, |
|
"eval_samples_per_second": 3.025, |
|
"eval_steps_per_second": 0.379, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 73.0, |
|
"learning_rate": 4.498257201263691e-06, |
|
"logits/chosen": -2.9637725353240967, |
|
"logits/rejected": -2.9400086402893066, |
|
"logps/chosen": -31.802236557006836, |
|
"logps/rejected": -31.42850685119629, |
|
"loss": 0.7127, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.29019027948379517, |
|
"rewards/margins": 0.47895127534866333, |
|
"rewards/rejected": -0.18876102566719055, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 57.25, |
|
"learning_rate": 4.353806263777678e-06, |
|
"logits/chosen": -3.0474579334259033, |
|
"logits/rejected": -3.076024055480957, |
|
"logps/chosen": -28.733871459960938, |
|
"logps/rejected": -34.316707611083984, |
|
"loss": 0.7493, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.3010229170322418, |
|
"rewards/margins": 0.41943326592445374, |
|
"rewards/rejected": -0.11841034889221191, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 46.25, |
|
"learning_rate": 4.1940827077152755e-06, |
|
"logits/chosen": -2.7504048347473145, |
|
"logits/rejected": -2.7461349964141846, |
|
"logps/chosen": -28.568979263305664, |
|
"logps/rejected": -30.313528060913086, |
|
"loss": 0.7475, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.28399184346199036, |
|
"rewards/margins": 0.4097979664802551, |
|
"rewards/rejected": -0.12580609321594238, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 40.75, |
|
"learning_rate": 4.0204024186666215e-06, |
|
"logits/chosen": -3.0258471965789795, |
|
"logits/rejected": -3.022930860519409, |
|
"logps/chosen": -27.234683990478516, |
|
"logps/rejected": -31.9457950592041, |
|
"loss": 0.7968, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.17608027160167694, |
|
"rewards/margins": 0.3783981502056122, |
|
"rewards/rejected": -0.20231786370277405, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 47.25, |
|
"learning_rate": 3.834196265035119e-06, |
|
"logits/chosen": -2.823291063308716, |
|
"logits/rejected": -2.818061113357544, |
|
"logps/chosen": -27.286752700805664, |
|
"logps/rejected": -31.543277740478516, |
|
"loss": 0.6353, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.3771280348300934, |
|
"rewards/margins": 0.6008729934692383, |
|
"rewards/rejected": -0.22374498844146729, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 61.0, |
|
"learning_rate": 3.636998309800573e-06, |
|
"logits/chosen": -3.141061305999756, |
|
"logits/rejected": -3.1238036155700684, |
|
"logps/chosen": -31.78842544555664, |
|
"logps/rejected": -29.447246551513672, |
|
"loss": 0.5473, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.4923074245452881, |
|
"rewards/margins": 0.8195418119430542, |
|
"rewards/rejected": -0.32723429799079895, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 47.25, |
|
"learning_rate": 3.4304331721118078e-06, |
|
"logits/chosen": -2.9534718990325928, |
|
"logits/rejected": -2.960083484649658, |
|
"logps/chosen": -29.32253074645996, |
|
"logps/rejected": -31.597339630126953, |
|
"loss": 0.5934, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.40046125650405884, |
|
"rewards/margins": 0.7489619255065918, |
|
"rewards/rejected": -0.34850066900253296, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 51.0, |
|
"learning_rate": 3.2162026428305436e-06, |
|
"logits/chosen": -2.8042290210723877, |
|
"logits/rejected": -2.801795244216919, |
|
"logps/chosen": -29.153839111328125, |
|
"logps/rejected": -30.13584327697754, |
|
"loss": 0.6384, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.36120903491973877, |
|
"rewards/margins": 0.618577778339386, |
|
"rewards/rejected": -0.2573687434196472, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 31.125, |
|
"learning_rate": 2.996071664294641e-06, |
|
"logits/chosen": -2.9180445671081543, |
|
"logits/rejected": -2.914367914199829, |
|
"logps/chosen": -29.715240478515625, |
|
"logps/rejected": -28.60836410522461, |
|
"loss": 0.7637, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.33003363013267517, |
|
"rewards/margins": 0.5056184530258179, |
|
"rewards/rejected": -0.1755848377943039, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 46.0, |
|
"learning_rate": 2.7718537898066833e-06, |
|
"logits/chosen": -2.987438201904297, |
|
"logits/rejected": -2.97584867477417, |
|
"logps/chosen": -32.835445404052734, |
|
"logps/rejected": -30.47810935974121, |
|
"loss": 0.7257, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.6045005917549133, |
|
"rewards/margins": 0.6353167295455933, |
|
"rewards/rejected": -0.030816053971648216, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.8270444869995117, |
|
"eval_logits/rejected": -2.8244259357452393, |
|
"eval_logps/chosen": -31.380773544311523, |
|
"eval_logps/rejected": -34.95420837402344, |
|
"eval_loss": 0.9027834534645081, |
|
"eval_rewards/accuracies": 0.5573089718818665, |
|
"eval_rewards/chosen": -0.06882722675800323, |
|
"eval_rewards/margins": 0.11040809750556946, |
|
"eval_rewards/rejected": -0.1792353093624115, |
|
"eval_runtime": 113.148, |
|
"eval_samples_per_second": 3.031, |
|
"eval_steps_per_second": 0.38, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 56.25, |
|
"learning_rate": 2.5453962426402006e-06, |
|
"logits/chosen": -2.9236907958984375, |
|
"logits/rejected": -2.9235434532165527, |
|
"logps/chosen": -32.24640655517578, |
|
"logps/rejected": -34.160125732421875, |
|
"loss": 0.5593, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.5509027242660522, |
|
"rewards/margins": 0.7599622011184692, |
|
"rewards/rejected": -0.2090594321489334, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 42.25, |
|
"learning_rate": 2.3185646976551794e-06, |
|
"logits/chosen": -2.90382981300354, |
|
"logits/rejected": -2.9191505908966064, |
|
"logps/chosen": -29.37839698791504, |
|
"logps/rejected": -28.82822036743164, |
|
"loss": 0.6297, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.5597518682479858, |
|
"rewards/margins": 0.7697170972824097, |
|
"rewards/rejected": -0.20996519923210144, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 44.5, |
|
"learning_rate": 2.0932279108998323e-06, |
|
"logits/chosen": -2.9497718811035156, |
|
"logits/rejected": -2.954267740249634, |
|
"logps/chosen": -30.704570770263672, |
|
"logps/rejected": -32.00530242919922, |
|
"loss": 0.7476, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.3131088614463806, |
|
"rewards/margins": 0.48611265420913696, |
|
"rewards/rejected": -0.17300380766391754, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 41.75, |
|
"learning_rate": 1.8712423238279358e-06, |
|
"logits/chosen": -3.004119396209717, |
|
"logits/rejected": -3.012087106704712, |
|
"logps/chosen": -30.007003784179688, |
|
"logps/rejected": -30.454036712646484, |
|
"loss": 0.4699, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.6285442113876343, |
|
"rewards/margins": 0.7816993594169617, |
|
"rewards/rejected": -0.15315520763397217, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 48.25, |
|
"learning_rate": 1.6544367689701824e-06, |
|
"logits/chosen": -2.8357105255126953, |
|
"logits/rejected": -2.825700283050537, |
|
"logps/chosen": -26.410411834716797, |
|
"logps/rejected": -29.5693416595459, |
|
"loss": 0.719, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.3806636929512024, |
|
"rewards/margins": 0.40705281496047974, |
|
"rewards/rejected": -0.02638910338282585, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 19.25, |
|
"learning_rate": 1.4445974030621963e-06, |
|
"logits/chosen": -2.817229747772217, |
|
"logits/rejected": -2.837646245956421, |
|
"logps/chosen": -28.973642349243164, |
|
"logps/rejected": -34.52573013305664, |
|
"loss": 0.5428, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.599422812461853, |
|
"rewards/margins": 0.8455693125724792, |
|
"rewards/rejected": -0.24614660441875458, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 62.25, |
|
"learning_rate": 1.243452991757889e-06, |
|
"logits/chosen": -2.959622859954834, |
|
"logits/rejected": -2.9659740924835205, |
|
"logps/chosen": -30.064538955688477, |
|
"logps/rejected": -30.272811889648438, |
|
"loss": 0.534, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.5017729997634888, |
|
"rewards/margins": 0.7870587110519409, |
|
"rewards/rejected": -0.28528574109077454, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 31.75, |
|
"learning_rate": 1.0526606671603523e-06, |
|
"logits/chosen": -2.974242687225342, |
|
"logits/rejected": -2.960573673248291, |
|
"logps/chosen": -30.0527400970459, |
|
"logps/rejected": -28.732250213623047, |
|
"loss": 0.7261, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.4682597219944, |
|
"rewards/margins": 0.5388898253440857, |
|
"rewards/rejected": -0.07063011080026627, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 32.25, |
|
"learning_rate": 8.737922755071455e-07, |
|
"logits/chosen": -2.901106595993042, |
|
"logits/rejected": -2.8834261894226074, |
|
"logps/chosen": -31.619491577148438, |
|
"logps/rejected": -31.00750160217285, |
|
"loss": 0.4327, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.7117676734924316, |
|
"rewards/margins": 1.0507009029388428, |
|
"rewards/rejected": -0.3389332592487335, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 43.5, |
|
"learning_rate": 7.08321427484816e-07, |
|
"logits/chosen": -2.9032962322235107, |
|
"logits/rejected": -2.8999171257019043, |
|
"logps/chosen": -31.358158111572266, |
|
"logps/rejected": -27.814193725585938, |
|
"loss": 0.6308, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.6051477789878845, |
|
"rewards/margins": 0.7525756359100342, |
|
"rewards/rejected": -0.14742788672447205, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.8207788467407227, |
|
"eval_logits/rejected": -2.8184332847595215, |
|
"eval_logps/chosen": -31.37415885925293, |
|
"eval_logps/rejected": -34.95107650756836, |
|
"eval_loss": 0.9011654257774353, |
|
"eval_rewards/accuracies": 0.565614640712738, |
|
"eval_rewards/chosen": -0.06419765949249268, |
|
"eval_rewards/margins": 0.11284750699996948, |
|
"eval_rewards/rejected": -0.17704515159130096, |
|
"eval_runtime": 113.1332, |
|
"eval_samples_per_second": 3.032, |
|
"eval_steps_per_second": 0.38, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 52.0, |
|
"learning_rate": 5.576113578589035e-07, |
|
"logits/chosen": -2.7831432819366455, |
|
"logits/rejected": -2.801016092300415, |
|
"logps/chosen": -28.50387191772461, |
|
"logps/rejected": -31.167369842529297, |
|
"loss": 0.4507, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 0.5078839659690857, |
|
"rewards/margins": 0.8126392364501953, |
|
"rewards/rejected": -0.30475518107414246, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 31.625, |
|
"learning_rate": 4.229036944380913e-07, |
|
"logits/chosen": -3.029501438140869, |
|
"logits/rejected": -3.0155110359191895, |
|
"logps/chosen": -29.30743408203125, |
|
"logps/rejected": -28.680965423583984, |
|
"loss": 0.4592, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.6329048871994019, |
|
"rewards/margins": 0.8864381909370422, |
|
"rewards/rejected": -0.25353336334228516, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 17.0, |
|
"learning_rate": 3.053082288996112e-07, |
|
"logits/chosen": -2.943554401397705, |
|
"logits/rejected": -2.925654888153076, |
|
"logps/chosen": -27.233322143554688, |
|
"logps/rejected": -30.869421005249023, |
|
"loss": 0.4325, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.7901242971420288, |
|
"rewards/margins": 1.1245664358139038, |
|
"rewards/rejected": -0.33444222807884216, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 53.5, |
|
"learning_rate": 2.0579377374915805e-07, |
|
"logits/chosen": -3.1611580848693848, |
|
"logits/rejected": -3.167454242706299, |
|
"logps/chosen": -30.857864379882812, |
|
"logps/rejected": -33.212623596191406, |
|
"loss": 0.4854, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.546938955783844, |
|
"rewards/margins": 0.908575177192688, |
|
"rewards/rejected": -0.3616361618041992, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 40.0, |
|
"learning_rate": 1.2518018074041684e-07, |
|
"logits/chosen": -3.037867546081543, |
|
"logits/rejected": -3.040760040283203, |
|
"logps/chosen": -29.823516845703125, |
|
"logps/rejected": -31.82476806640625, |
|
"loss": 0.5086, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.6883540153503418, |
|
"rewards/margins": 0.9063750505447388, |
|
"rewards/rejected": -0.21802091598510742, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 41.5, |
|
"learning_rate": 6.41315865106129e-08, |
|
"logits/chosen": -2.877352476119995, |
|
"logits/rejected": -2.878804922103882, |
|
"logps/chosen": -27.442968368530273, |
|
"logps/rejected": -30.033910751342773, |
|
"loss": 0.4586, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.8447319865226746, |
|
"rewards/margins": 0.986809253692627, |
|
"rewards/rejected": -0.1420772522687912, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 66.5, |
|
"learning_rate": 2.3150941078050325e-08, |
|
"logits/chosen": -2.9520726203918457, |
|
"logits/rejected": -2.949944257736206, |
|
"logps/chosen": -29.80544090270996, |
|
"logps/rejected": -32.19409942626953, |
|
"loss": 0.5126, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.6868453025817871, |
|
"rewards/margins": 0.873086154460907, |
|
"rewards/rejected": -0.1862407922744751, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 49.5, |
|
"learning_rate": 2.575864278703266e-09, |
|
"logits/chosen": -2.908616542816162, |
|
"logits/rejected": -2.890885829925537, |
|
"logps/chosen": -28.092891693115234, |
|
"logps/rejected": -28.3051700592041, |
|
"loss": 0.4503, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.5965021252632141, |
|
"rewards/margins": 0.8873867988586426, |
|
"rewards/rejected": -0.290884792804718, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 385, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6684874992866021, |
|
"train_runtime": 2718.5428, |
|
"train_samples_per_second": 1.133, |
|
"train_steps_per_second": 0.142 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 385, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|