|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 385, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 792.0, |
|
"learning_rate": 1.282051282051282e-07, |
|
"logits/chosen": -2.7358343601226807, |
|
"logits/rejected": -2.7480404376983643, |
|
"logps/chosen": -27.35565757751465, |
|
"logps/rejected": -21.06114387512207, |
|
"loss": 25.0, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 1384.0, |
|
"learning_rate": 1.282051282051282e-06, |
|
"logits/chosen": -3.0097055435180664, |
|
"logits/rejected": -2.9980692863464355, |
|
"logps/chosen": -33.19136047363281, |
|
"logps/rejected": -31.962371826171875, |
|
"loss": 25.1536, |
|
"rewards/accuracies": 0.4166666567325592, |
|
"rewards/chosen": -0.0010418971069157124, |
|
"rewards/margins": -0.001195515040308237, |
|
"rewards/rejected": 0.00015361772966571152, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 940.0, |
|
"learning_rate": 2.564102564102564e-06, |
|
"logits/chosen": -2.899199962615967, |
|
"logits/rejected": -2.893846273422241, |
|
"logps/chosen": -32.49421691894531, |
|
"logps/rejected": -28.94989013671875, |
|
"loss": 25.5664, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": -0.0019120589131489396, |
|
"rewards/margins": -0.005109794903546572, |
|
"rewards/rejected": 0.0031977363396435976, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 872.0, |
|
"learning_rate": 3.846153846153847e-06, |
|
"logits/chosen": -3.0976572036743164, |
|
"logits/rejected": -3.1090171337127686, |
|
"logps/chosen": -32.78887176513672, |
|
"logps/rejected": -30.141361236572266, |
|
"loss": 24.4486, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.011981222778558731, |
|
"rewards/margins": 0.006511862389743328, |
|
"rewards/rejected": 0.005469360388815403, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 788.0, |
|
"learning_rate": 4.999896948438434e-06, |
|
"logits/chosen": -2.8625879287719727, |
|
"logits/rejected": -2.853339195251465, |
|
"logps/chosen": -31.531911849975586, |
|
"logps/rejected": -32.36937713623047, |
|
"loss": 22.726, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.02984103001654148, |
|
"rewards/margins": 0.031033098697662354, |
|
"rewards/rejected": -0.001192068331874907, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 536.0, |
|
"learning_rate": 4.987541037542187e-06, |
|
"logits/chosen": -2.880608558654785, |
|
"logits/rejected": -2.8780317306518555, |
|
"logps/chosen": -29.381378173828125, |
|
"logps/rejected": -30.077362060546875, |
|
"loss": 23.1409, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.035878658294677734, |
|
"rewards/margins": 0.034720782190561295, |
|
"rewards/rejected": 0.0011578750563785434, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 784.0, |
|
"learning_rate": 4.954691471941119e-06, |
|
"logits/chosen": -2.9093430042266846, |
|
"logits/rejected": -2.910740375518799, |
|
"logps/chosen": -29.860986709594727, |
|
"logps/rejected": -28.04913902282715, |
|
"loss": 22.1643, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.02687840536236763, |
|
"rewards/margins": 0.036224402487277985, |
|
"rewards/rejected": -0.00934599619358778, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 1600.0, |
|
"learning_rate": 4.901618883413549e-06, |
|
"logits/chosen": -2.9914920330047607, |
|
"logits/rejected": -2.99847674369812, |
|
"logps/chosen": -29.173349380493164, |
|
"logps/rejected": -30.959280014038086, |
|
"loss": 24.455, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.01076198648661375, |
|
"rewards/margins": 0.017510101199150085, |
|
"rewards/rejected": -0.006748115178197622, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 1072.0, |
|
"learning_rate": 4.828760511501322e-06, |
|
"logits/chosen": -2.8084774017333984, |
|
"logits/rejected": -2.823390007019043, |
|
"logps/chosen": -29.325841903686523, |
|
"logps/rejected": -29.870441436767578, |
|
"loss": 22.0145, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.021488705649971962, |
|
"rewards/margins": 0.041104547679424286, |
|
"rewards/rejected": -0.01961584761738777, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 740.0, |
|
"learning_rate": 4.7367166013034295e-06, |
|
"logits/chosen": -2.8982763290405273, |
|
"logits/rejected": -2.8804843425750732, |
|
"logps/chosen": -32.65864181518555, |
|
"logps/rejected": -30.32712745666504, |
|
"loss": 25.4545, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.017779098823666573, |
|
"rewards/margins": 0.04832718148827553, |
|
"rewards/rejected": -0.030548080801963806, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 720.0, |
|
"learning_rate": 4.626245458345211e-06, |
|
"logits/chosen": -3.0048553943634033, |
|
"logits/rejected": -3.004960298538208, |
|
"logps/chosen": -31.79574966430664, |
|
"logps/rejected": -30.897558212280273, |
|
"loss": 22.8693, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.026112746447324753, |
|
"rewards/margins": 0.034251101315021515, |
|
"rewards/rejected": -0.008138353936374187, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.8112006187438965, |
|
"eval_logits/rejected": -2.8086960315704346, |
|
"eval_logps/chosen": -31.26334571838379, |
|
"eval_logps/rejected": -34.84490203857422, |
|
"eval_loss": 23.988525390625, |
|
"eval_rewards/accuracies": 0.6129568219184875, |
|
"eval_rewards/chosen": 0.0019106707768514752, |
|
"eval_rewards/margins": 0.016585489735007286, |
|
"eval_rewards/rejected": -0.014674817211925983, |
|
"eval_runtime": 113.1272, |
|
"eval_samples_per_second": 3.032, |
|
"eval_steps_per_second": 0.38, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 1008.0, |
|
"learning_rate": 4.498257201263691e-06, |
|
"logits/chosen": -2.9598517417907715, |
|
"logits/rejected": -2.9358468055725098, |
|
"logps/chosen": -31.7904109954834, |
|
"logps/rejected": -31.376705169677734, |
|
"loss": 21.3073, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.042638350278139114, |
|
"rewards/margins": 0.06442411243915558, |
|
"rewards/rejected": -0.02178577333688736, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 792.0, |
|
"learning_rate": 4.353806263777678e-06, |
|
"logits/chosen": -3.0428009033203125, |
|
"logits/rejected": -3.071193218231201, |
|
"logps/chosen": -28.65635108947754, |
|
"logps/rejected": -34.34435272216797, |
|
"loss": 20.586, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.05075501650571823, |
|
"rewards/margins": 0.07043538987636566, |
|
"rewards/rejected": -0.01968037523329258, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 668.0, |
|
"learning_rate": 4.1940827077152755e-06, |
|
"logits/chosen": -2.744912624359131, |
|
"logits/rejected": -2.740849494934082, |
|
"logps/chosen": -28.600677490234375, |
|
"logps/rejected": -30.34041404724121, |
|
"loss": 21.4152, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.037400223314762115, |
|
"rewards/margins": 0.05806100368499756, |
|
"rewards/rejected": -0.020660774782299995, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 624.0, |
|
"learning_rate": 4.0204024186666215e-06, |
|
"logits/chosen": -3.0207009315490723, |
|
"logits/rejected": -3.0183839797973633, |
|
"logps/chosen": -27.157459259033203, |
|
"logps/rejected": -31.912410736083984, |
|
"loss": 22.1262, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.03287653252482414, |
|
"rewards/margins": 0.05844072252511978, |
|
"rewards/rejected": -0.02556418441236019, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 668.0, |
|
"learning_rate": 3.834196265035119e-06, |
|
"logits/chosen": -2.8160903453826904, |
|
"logits/rejected": -2.8107659816741943, |
|
"logps/chosen": -27.24477195739746, |
|
"logps/rejected": -31.60110092163086, |
|
"loss": 18.6765, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.05807359144091606, |
|
"rewards/margins": 0.09581951797008514, |
|
"rewards/rejected": -0.03774593397974968, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 872.0, |
|
"learning_rate": 3.636998309800573e-06, |
|
"logits/chosen": -3.134445905685425, |
|
"logits/rejected": -3.1172776222229004, |
|
"logps/chosen": -31.69814109802246, |
|
"logps/rejected": -29.522869110107422, |
|
"loss": 16.4703, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.07935762405395508, |
|
"rewards/margins": 0.1336670219898224, |
|
"rewards/rejected": -0.054309405386447906, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 688.0, |
|
"learning_rate": 3.4304331721118078e-06, |
|
"logits/chosen": -2.9469733238220215, |
|
"logits/rejected": -2.9541804790496826, |
|
"logps/chosen": -29.33526039123535, |
|
"logps/rejected": -31.71769142150879, |
|
"loss": 17.9149, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.0559355802834034, |
|
"rewards/margins": 0.11775630712509155, |
|
"rewards/rejected": -0.06182072311639786, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 688.0, |
|
"learning_rate": 3.2162026428305436e-06, |
|
"logits/chosen": -2.798689126968384, |
|
"logits/rejected": -2.7966549396514893, |
|
"logps/chosen": -29.22808265686035, |
|
"logps/rejected": -30.211177825927734, |
|
"loss": 19.5177, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.04417658969759941, |
|
"rewards/margins": 0.088477224111557, |
|
"rewards/rejected": -0.044300634413957596, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 496.0, |
|
"learning_rate": 2.996071664294641e-06, |
|
"logits/chosen": -2.9117610454559326, |
|
"logits/rejected": -2.9078633785247803, |
|
"logps/chosen": -29.73740577697754, |
|
"logps/rejected": -28.741764068603516, |
|
"loss": 22.4526, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.044930893927812576, |
|
"rewards/margins": 0.08335400372743607, |
|
"rewards/rejected": -0.03842311352491379, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 446.0, |
|
"learning_rate": 2.7718537898066833e-06, |
|
"logits/chosen": -2.9809372425079346, |
|
"logits/rejected": -2.9683079719543457, |
|
"logps/chosen": -32.74761962890625, |
|
"logps/rejected": -30.57464027404785, |
|
"loss": 19.5946, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.09513962268829346, |
|
"rewards/margins": 0.10919489711523056, |
|
"rewards/rejected": -0.01405528374016285, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.8196890354156494, |
|
"eval_logits/rejected": -2.817232370376587, |
|
"eval_logps/chosen": -31.39967155456543, |
|
"eval_logps/rejected": -35.00971221923828, |
|
"eval_loss": 23.757455825805664, |
|
"eval_rewards/accuracies": 0.5718438625335693, |
|
"eval_rewards/chosen": -0.011722045950591564, |
|
"eval_rewards/margins": 0.019433511421084404, |
|
"eval_rewards/rejected": -0.03115556202828884, |
|
"eval_runtime": 112.9888, |
|
"eval_samples_per_second": 3.036, |
|
"eval_steps_per_second": 0.381, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 756.0, |
|
"learning_rate": 2.5453962426402006e-06, |
|
"logits/chosen": -2.9178690910339355, |
|
"logits/rejected": -2.9186930656433105, |
|
"logps/chosen": -32.30227279663086, |
|
"logps/rejected": -34.28144073486328, |
|
"loss": 17.7864, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.0731135681271553, |
|
"rewards/margins": 0.1151106134057045, |
|
"rewards/rejected": -0.041997045278549194, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 584.0, |
|
"learning_rate": 2.3185646976551794e-06, |
|
"logits/chosen": -2.8974623680114746, |
|
"logits/rejected": -2.9132611751556396, |
|
"logps/chosen": -29.29262924194336, |
|
"logps/rejected": -28.876379013061523, |
|
"loss": 18.1243, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.08854089677333832, |
|
"rewards/margins": 0.12335184961557388, |
|
"rewards/rejected": -0.03481093421578407, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 644.0, |
|
"learning_rate": 2.0932279108998323e-06, |
|
"logits/chosen": -2.9454588890075684, |
|
"logits/rejected": -2.9493844509124756, |
|
"logps/chosen": -30.615680694580078, |
|
"logps/rejected": -32.01018142700195, |
|
"loss": 20.5727, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.05361897870898247, |
|
"rewards/margins": 0.07882187515497208, |
|
"rewards/rejected": -0.02520289644598961, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 692.0, |
|
"learning_rate": 1.8712423238279358e-06, |
|
"logits/chosen": -3.000526189804077, |
|
"logits/rejected": -3.007200241088867, |
|
"logps/chosen": -29.994653701782227, |
|
"logps/rejected": -30.485973358154297, |
|
"loss": 16.6669, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.09102725982666016, |
|
"rewards/margins": 0.11610045284032822, |
|
"rewards/rejected": -0.02507319487631321, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 828.0, |
|
"learning_rate": 1.6544367689701824e-06, |
|
"logits/chosen": -2.8318004608154297, |
|
"logits/rejected": -2.821593761444092, |
|
"logps/chosen": -26.242666244506836, |
|
"logps/rejected": -29.69923973083496, |
|
"loss": 19.8026, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.07115494459867477, |
|
"rewards/margins": 0.08791451901197433, |
|
"rewards/rejected": -0.016759568825364113, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 482.0, |
|
"learning_rate": 1.4445974030621963e-06, |
|
"logits/chosen": -2.813377857208252, |
|
"logits/rejected": -2.8331103324890137, |
|
"logps/chosen": -28.772018432617188, |
|
"logps/rejected": -34.57004165649414, |
|
"loss": 16.4634, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.1057942658662796, |
|
"rewards/margins": 0.14538946747779846, |
|
"rewards/rejected": -0.039595186710357666, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 792.0, |
|
"learning_rate": 1.243452991757889e-06, |
|
"logits/chosen": -2.955620527267456, |
|
"logits/rejected": -2.9620866775512695, |
|
"logps/chosen": -30.066015243530273, |
|
"logps/rejected": -30.39284324645996, |
|
"loss": 16.2985, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.071534164249897, |
|
"rewards/margins": 0.1242922693490982, |
|
"rewards/rejected": -0.0527581050992012, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 552.0, |
|
"learning_rate": 1.0526606671603523e-06, |
|
"logits/chosen": -2.9709465503692627, |
|
"logits/rejected": -2.957740306854248, |
|
"logps/chosen": -30.085498809814453, |
|
"logps/rejected": -28.866928100585938, |
|
"loss": 21.3586, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.06361865997314453, |
|
"rewards/margins": 0.08717648684978485, |
|
"rewards/rejected": -0.02355783060193062, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 616.0, |
|
"learning_rate": 8.737922755071455e-07, |
|
"logits/chosen": -2.89894437789917, |
|
"logits/rejected": -2.881261110305786, |
|
"logps/chosen": -31.43949317932129, |
|
"logps/rejected": -31.19020652770996, |
|
"loss": 13.7665, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.11968123912811279, |
|
"rewards/margins": 0.1863711178302765, |
|
"rewards/rejected": -0.0666898787021637, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 760.0, |
|
"learning_rate": 7.08321427484816e-07, |
|
"logits/chosen": -2.9006505012512207, |
|
"logits/rejected": -2.896925449371338, |
|
"logps/chosen": -31.2346134185791, |
|
"logps/rejected": -27.882694244384766, |
|
"loss": 18.7484, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.0988042801618576, |
|
"rewards/margins": 0.12671498954296112, |
|
"rewards/rejected": -0.027910714969038963, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.818225860595703, |
|
"eval_logits/rejected": -2.81588077545166, |
|
"eval_logps/chosen": -31.401628494262695, |
|
"eval_logps/rejected": -35.010406494140625, |
|
"eval_loss": 23.85563087463379, |
|
"eval_rewards/accuracies": 0.5718438625335693, |
|
"eval_rewards/chosen": -0.011917698197066784, |
|
"eval_rewards/margins": 0.019307522103190422, |
|
"eval_rewards/rejected": -0.03122522123157978, |
|
"eval_runtime": 113.0254, |
|
"eval_samples_per_second": 3.035, |
|
"eval_steps_per_second": 0.38, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 784.0, |
|
"learning_rate": 5.576113578589035e-07, |
|
"logits/chosen": -2.781919002532959, |
|
"logits/rejected": -2.7987494468688965, |
|
"logps/chosen": -28.510242462158203, |
|
"logps/rejected": -31.248886108398438, |
|
"loss": 17.0117, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.07191774249076843, |
|
"rewards/margins": 0.1236056461930275, |
|
"rewards/rejected": -0.051687903702259064, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 524.0, |
|
"learning_rate": 4.229036944380913e-07, |
|
"logits/chosen": -3.0269644260406494, |
|
"logits/rejected": -3.012279748916626, |
|
"logps/chosen": -29.241586685180664, |
|
"logps/rejected": -28.835968017578125, |
|
"loss": 15.086, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.09699983894824982, |
|
"rewards/margins": 0.1487189531326294, |
|
"rewards/rejected": -0.051719121634960175, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 360.0, |
|
"learning_rate": 3.053082288996112e-07, |
|
"logits/chosen": -2.9412882328033447, |
|
"logits/rejected": -2.9239907264709473, |
|
"logps/chosen": -27.266399383544922, |
|
"logps/rejected": -31.0549259185791, |
|
"loss": 14.0005, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 0.10956673324108124, |
|
"rewards/margins": 0.1758948117494583, |
|
"rewards/rejected": -0.06632807105779648, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 716.0, |
|
"learning_rate": 2.0579377374915805e-07, |
|
"logits/chosen": -3.158176898956299, |
|
"logits/rejected": -3.163879871368408, |
|
"logps/chosen": -30.825565338134766, |
|
"logps/rejected": -33.42596435546875, |
|
"loss": 15.6936, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.08136396110057831, |
|
"rewards/margins": 0.15435989201068878, |
|
"rewards/rejected": -0.07299593091011047, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 552.0, |
|
"learning_rate": 1.2518018074041684e-07, |
|
"logits/chosen": -3.034761428833008, |
|
"logits/rejected": -3.038020372390747, |
|
"logps/chosen": -29.743371963500977, |
|
"logps/rejected": -31.885990142822266, |
|
"loss": 17.0818, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.1063508540391922, |
|
"rewards/margins": 0.1436188519001007, |
|
"rewards/rejected": -0.03726799413561821, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 608.0, |
|
"learning_rate": 6.41315865106129e-08, |
|
"logits/chosen": -2.8735318183898926, |
|
"logits/rejected": -2.8751091957092285, |
|
"logps/chosen": -27.433670043945312, |
|
"logps/rejected": -30.212413787841797, |
|
"loss": 15.0018, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.12160570919513702, |
|
"rewards/margins": 0.1597532331943512, |
|
"rewards/rejected": -0.038147542625665665, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 984.0, |
|
"learning_rate": 2.3150941078050325e-08, |
|
"logits/chosen": -2.950293779373169, |
|
"logits/rejected": -2.947977066040039, |
|
"logps/chosen": -29.780099868774414, |
|
"logps/rejected": -32.294410705566406, |
|
"loss": 17.366, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.10065505653619766, |
|
"rewards/margins": 0.13729141652584076, |
|
"rewards/rejected": -0.03663638234138489, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 556.0, |
|
"learning_rate": 2.575864278703266e-09, |
|
"logits/chosen": -2.906440258026123, |
|
"logits/rejected": -2.8900303840637207, |
|
"logps/chosen": -28.174755096435547, |
|
"logps/rejected": -28.421972274780273, |
|
"loss": 16.0238, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.07702798396348953, |
|
"rewards/margins": 0.13026344776153564, |
|
"rewards/rejected": -0.05323547124862671, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 385, |
|
"total_flos": 0.0, |
|
"train_loss": 19.552858486423244, |
|
"train_runtime": 2721.3646, |
|
"train_samples_per_second": 1.131, |
|
"train_steps_per_second": 0.141 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 385, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|