|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9995638901003053, |
|
"eval_steps": 100, |
|
"global_step": 573, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0017444395987788923, |
|
"grad_norm": 5.784088092395373, |
|
"learning_rate": 3.4482758620689654e-09, |
|
"logits/chosen": -2.946424722671509, |
|
"logits/rejected": -2.985557794570923, |
|
"logps/chosen": -261.0296630859375, |
|
"logps/rejected": -338.7343444824219, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01744439598778892, |
|
"grad_norm": 6.628089918841342, |
|
"learning_rate": 3.448275862068966e-08, |
|
"logits/chosen": -2.282945156097412, |
|
"logits/rejected": -2.351466178894043, |
|
"logps/chosen": -388.3266906738281, |
|
"logps/rejected": -407.86871337890625, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.4097222089767456, |
|
"rewards/chosen": 0.0005646743229590356, |
|
"rewards/margins": -0.0001476690813433379, |
|
"rewards/rejected": 0.0007123433169908822, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03488879197557784, |
|
"grad_norm": 5.358446945552647, |
|
"learning_rate": 6.896551724137931e-08, |
|
"logits/chosen": -2.507432222366333, |
|
"logits/rejected": -2.6201975345611572, |
|
"logps/chosen": -354.7762145996094, |
|
"logps/rejected": -351.57476806640625, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.00036403987905941904, |
|
"rewards/margins": 0.00059556431369856, |
|
"rewards/rejected": -0.00023152439098339528, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05233318796336677, |
|
"grad_norm": 6.200484723185969, |
|
"learning_rate": 1.0344827586206897e-07, |
|
"logits/chosen": -2.2877583503723145, |
|
"logits/rejected": -2.38248348236084, |
|
"logps/chosen": -374.0684509277344, |
|
"logps/rejected": -363.9664001464844, |
|
"loss": 0.6921, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": -0.003327027428895235, |
|
"rewards/margins": 0.0030093365348875523, |
|
"rewards/rejected": -0.0063363634981215, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06977758395115569, |
|
"grad_norm": 6.01035041441692, |
|
"learning_rate": 1.3793103448275863e-07, |
|
"logits/chosen": -2.264878988265991, |
|
"logits/rejected": -2.4066078662872314, |
|
"logps/chosen": -391.4086608886719, |
|
"logps/rejected": -380.48248291015625, |
|
"loss": 0.6893, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.01258127111941576, |
|
"rewards/margins": 0.0077340505085885525, |
|
"rewards/rejected": -0.020315321162343025, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08722197993894461, |
|
"grad_norm": 5.071749818122234, |
|
"learning_rate": 1.7241379310344825e-07, |
|
"logits/chosen": -2.1940102577209473, |
|
"logits/rejected": -2.205831289291382, |
|
"logps/chosen": -352.8985290527344, |
|
"logps/rejected": -375.4139099121094, |
|
"loss": 0.6837, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.03738312050700188, |
|
"rewards/margins": 0.01618872582912445, |
|
"rewards/rejected": -0.053571850061416626, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.10466637592673354, |
|
"grad_norm": 5.2912013186214075, |
|
"learning_rate": 1.9999255765332945e-07, |
|
"logits/chosen": -2.174111843109131, |
|
"logits/rejected": -2.182332992553711, |
|
"logps/chosen": -343.0633850097656, |
|
"logps/rejected": -346.3739318847656, |
|
"loss": 0.67, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.07623813301324844, |
|
"rewards/margins": 0.036114297807216644, |
|
"rewards/rejected": -0.11235243082046509, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12211077191452246, |
|
"grad_norm": 6.100737724237624, |
|
"learning_rate": 1.9973219181729437e-07, |
|
"logits/chosen": -2.14943790435791, |
|
"logits/rejected": -2.206282138824463, |
|
"logps/chosen": -377.2199401855469, |
|
"logps/rejected": -405.7254943847656, |
|
"loss": 0.6554, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -0.15880194306373596, |
|
"rewards/margins": 0.06730278581380844, |
|
"rewards/rejected": -0.2261047065258026, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.13955516790231137, |
|
"grad_norm": 6.8468151533929635, |
|
"learning_rate": 1.9910081567726745e-07, |
|
"logits/chosen": -2.1643874645233154, |
|
"logits/rejected": -2.2666897773742676, |
|
"logps/chosen": -394.8481140136719, |
|
"logps/rejected": -411.7466735839844, |
|
"loss": 0.6296, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.32392174005508423, |
|
"rewards/margins": 0.18173113465309143, |
|
"rewards/rejected": -0.5056527853012085, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.1569995638901003, |
|
"grad_norm": 9.734003752103645, |
|
"learning_rate": 1.9810077799395846e-07, |
|
"logits/chosen": -2.2468771934509277, |
|
"logits/rejected": -2.21939754486084, |
|
"logps/chosen": -451.7411193847656, |
|
"logps/rejected": -489.8981018066406, |
|
"loss": 0.6041, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.6296826004981995, |
|
"rewards/margins": 0.2538929581642151, |
|
"rewards/rejected": -0.8835756182670593, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.17444395987788922, |
|
"grad_norm": 9.198990938758325, |
|
"learning_rate": 1.9673579897323202e-07, |
|
"logits/chosen": -2.2583627700805664, |
|
"logits/rejected": -2.319267749786377, |
|
"logps/chosen": -449.26287841796875, |
|
"logps/rejected": -485.2509765625, |
|
"loss": 0.6011, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.8181831240653992, |
|
"rewards/margins": 0.3021746277809143, |
|
"rewards/rejected": -1.1203577518463135, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.17444395987788922, |
|
"eval_logits/chosen": -2.4041965007781982, |
|
"eval_logits/rejected": -2.4543516635894775, |
|
"eval_logps/chosen": -468.4564514160156, |
|
"eval_logps/rejected": -492.26025390625, |
|
"eval_loss": 0.5738404989242554, |
|
"eval_rewards/accuracies": 0.6987951993942261, |
|
"eval_rewards/chosen": -0.8769527673721313, |
|
"eval_rewards/margins": 0.403839111328125, |
|
"eval_rewards/rejected": -1.2807917594909668, |
|
"eval_runtime": 115.5904, |
|
"eval_samples_per_second": 22.865, |
|
"eval_steps_per_second": 0.718, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.19188835586567815, |
|
"grad_norm": 8.908627437983846, |
|
"learning_rate": 1.9501095642669734e-07, |
|
"logits/chosen": -2.5096914768218994, |
|
"logits/rejected": -2.509361982345581, |
|
"logps/chosen": -456.6226501464844, |
|
"logps/rejected": -513.7633056640625, |
|
"loss": 0.5838, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.8924768567085266, |
|
"rewards/margins": 0.5146722793579102, |
|
"rewards/rejected": -1.407149076461792, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2093327518534671, |
|
"grad_norm": 10.31116892436808, |
|
"learning_rate": 1.9293266688191557e-07, |
|
"logits/chosen": -2.586188793182373, |
|
"logits/rejected": -2.6450071334838867, |
|
"logps/chosen": -438.7850646972656, |
|
"logps/rejected": -467.4371032714844, |
|
"loss": 0.5873, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.8641183972358704, |
|
"rewards/margins": 0.36442071199417114, |
|
"rewards/rejected": -1.2285391092300415, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.226777147841256, |
|
"grad_norm": 13.457335930423358, |
|
"learning_rate": 1.9050866171249576e-07, |
|
"logits/chosen": -2.546307325363159, |
|
"logits/rejected": -2.5156311988830566, |
|
"logps/chosen": -459.08306884765625, |
|
"logps/rejected": -529.2793579101562, |
|
"loss": 0.5508, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.9206414222717285, |
|
"rewards/margins": 0.5198907256126404, |
|
"rewards/rejected": -1.4405320882797241, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.24422154382904493, |
|
"grad_norm": 13.212896873797849, |
|
"learning_rate": 1.8774795837687736e-07, |
|
"logits/chosen": -2.499363422393799, |
|
"logits/rejected": -2.6360957622528076, |
|
"logps/chosen": -512.037353515625, |
|
"logps/rejected": -548.9447631835938, |
|
"loss": 0.5728, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -1.1992404460906982, |
|
"rewards/margins": 0.5107727646827698, |
|
"rewards/rejected": -1.7100131511688232, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.26166593981683384, |
|
"grad_norm": 13.642312982739075, |
|
"learning_rate": 1.8466082687279243e-07, |
|
"logits/chosen": -2.574205160140991, |
|
"logits/rejected": -2.595555305480957, |
|
"logps/chosen": -434.45709228515625, |
|
"logps/rejected": -513.8720703125, |
|
"loss": 0.572, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.979955792427063, |
|
"rewards/margins": 0.5753864645957947, |
|
"rewards/rejected": -1.5553423166275024, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.27911033580462274, |
|
"grad_norm": 12.825272909657283, |
|
"learning_rate": 1.8125875153219963e-07, |
|
"logits/chosen": -2.561600923538208, |
|
"logits/rejected": -2.64467716217041, |
|
"logps/chosen": -466.68121337890625, |
|
"logps/rejected": -522.5623168945312, |
|
"loss": 0.5687, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.9761050939559937, |
|
"rewards/margins": 0.5850359797477722, |
|
"rewards/rejected": -1.561141014099121, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.2965547317924117, |
|
"grad_norm": 10.830977614727727, |
|
"learning_rate": 1.77554388298815e-07, |
|
"logits/chosen": -2.541180372238159, |
|
"logits/rejected": -2.5329298973083496, |
|
"logps/chosen": -469.1494140625, |
|
"logps/rejected": -527.3177490234375, |
|
"loss": 0.5516, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.9942892789840698, |
|
"rewards/margins": 0.6251423954963684, |
|
"rewards/rejected": -1.6194318532943726, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.3139991277802006, |
|
"grad_norm": 11.736716858310073, |
|
"learning_rate": 1.735615176471701e-07, |
|
"logits/chosen": -2.4211761951446533, |
|
"logits/rejected": -2.5089921951293945, |
|
"logps/chosen": -524.66259765625, |
|
"logps/rejected": -578.3092651367188, |
|
"loss": 0.5538, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.192502737045288, |
|
"rewards/margins": 0.5517352819442749, |
|
"rewards/rejected": -1.744238257408142, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.3314435237679895, |
|
"grad_norm": 12.055419698459572, |
|
"learning_rate": 1.692949933183416e-07, |
|
"logits/chosen": -2.556756019592285, |
|
"logits/rejected": -2.691668748855591, |
|
"logps/chosen": -500.4410095214844, |
|
"logps/rejected": -543.4578857421875, |
|
"loss": 0.5649, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.0238206386566162, |
|
"rewards/margins": 0.6150747537612915, |
|
"rewards/rejected": -1.6388953924179077, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.34888791975577843, |
|
"grad_norm": 18.710020531498376, |
|
"learning_rate": 1.64770687063059e-07, |
|
"logits/chosen": -2.658360242843628, |
|
"logits/rejected": -2.7501015663146973, |
|
"logps/chosen": -490.89263916015625, |
|
"logps/rejected": -532.71142578125, |
|
"loss": 0.5447, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.049081563949585, |
|
"rewards/margins": 0.5787625312805176, |
|
"rewards/rejected": -1.627844214439392, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.34888791975577843, |
|
"eval_logits/chosen": -2.5731849670410156, |
|
"eval_logits/rejected": -2.631882905960083, |
|
"eval_logps/chosen": -513.11767578125, |
|
"eval_logps/rejected": -572.9752197265625, |
|
"eval_loss": 0.524207353591919, |
|
"eval_rewards/accuracies": 0.7289156913757324, |
|
"eval_rewards/chosen": -1.3235652446746826, |
|
"eval_rewards/margins": 0.7643768787384033, |
|
"eval_rewards/rejected": -2.087942123413086, |
|
"eval_runtime": 115.5218, |
|
"eval_samples_per_second": 22.879, |
|
"eval_steps_per_second": 0.718, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.3663323157435674, |
|
"grad_norm": 14.782374340335828, |
|
"learning_rate": 1.6000542959774935e-07, |
|
"logits/chosen": -2.4421656131744385, |
|
"logits/rejected": -2.49908709526062, |
|
"logps/chosen": -515.1295166015625, |
|
"logps/rejected": -585.6087036132812, |
|
"loss": 0.5478, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -1.2250968217849731, |
|
"rewards/margins": 0.6607337594032288, |
|
"rewards/rejected": -1.8858305215835571, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.3837767117313563, |
|
"grad_norm": 16.586610273944082, |
|
"learning_rate": 1.550169479931667e-07, |
|
"logits/chosen": -2.4158313274383545, |
|
"logits/rejected": -2.621553897857666, |
|
"logps/chosen": -513.0745849609375, |
|
"logps/rejected": -560.3120727539062, |
|
"loss": 0.5455, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.242750883102417, |
|
"rewards/margins": 0.648566722869873, |
|
"rewards/rejected": -1.89131760597229, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.4012211077191452, |
|
"grad_norm": 17.097117314278307, |
|
"learning_rate": 1.498237997285247e-07, |
|
"logits/chosen": -2.6082608699798584, |
|
"logits/rejected": -2.528676748275757, |
|
"logps/chosen": -483.05859375, |
|
"logps/rejected": -599.164794921875, |
|
"loss": 0.5383, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.363515019416809, |
|
"rewards/margins": 0.8434240221977234, |
|
"rewards/rejected": -2.2069389820098877, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.4186655037069342, |
|
"grad_norm": 13.962480844795232, |
|
"learning_rate": 1.4444530365645478e-07, |
|
"logits/chosen": -2.5863888263702393, |
|
"logits/rejected": -2.6373305320739746, |
|
"logps/chosen": -480.4344787597656, |
|
"logps/rejected": -544.6641235351562, |
|
"loss": 0.5372, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.089019536972046, |
|
"rewards/margins": 0.6869910955429077, |
|
"rewards/rejected": -1.7760107517242432, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.4361098996947231, |
|
"grad_norm": 17.51761421234639, |
|
"learning_rate": 1.389014681356059e-07, |
|
"logits/chosen": -2.37886905670166, |
|
"logits/rejected": -2.5640721321105957, |
|
"logps/chosen": -522.6568603515625, |
|
"logps/rejected": -547.7442626953125, |
|
"loss": 0.5414, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.2930206060409546, |
|
"rewards/margins": 0.7195638418197632, |
|
"rewards/rejected": -2.012584686279297, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.453554295682512, |
|
"grad_norm": 12.985606778494319, |
|
"learning_rate": 1.3321291659823587e-07, |
|
"logits/chosen": -2.4836788177490234, |
|
"logits/rejected": -2.5293915271759033, |
|
"logps/chosen": -514.5162353515625, |
|
"logps/rejected": -592.8468627929688, |
|
"loss": 0.5395, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.1346120834350586, |
|
"rewards/margins": 0.8249943852424622, |
|
"rewards/rejected": -1.959606409072876, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.4709986916703009, |
|
"grad_norm": 13.86783402737886, |
|
"learning_rate": 1.2740081082968898e-07, |
|
"logits/chosen": -2.4625072479248047, |
|
"logits/rejected": -2.4806463718414307, |
|
"logps/chosen": -491.41607666015625, |
|
"logps/rejected": -535.6668701171875, |
|
"loss": 0.5356, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.145365595817566, |
|
"rewards/margins": 0.6424422264099121, |
|
"rewards/rejected": -1.787807822227478, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.48844308765808986, |
|
"grad_norm": 17.896568729720236, |
|
"learning_rate": 1.2148677224516458e-07, |
|
"logits/chosen": -2.426962375640869, |
|
"logits/rejected": -2.6068427562713623, |
|
"logps/chosen": -513.377197265625, |
|
"logps/rejected": -570.4599609375, |
|
"loss": 0.5408, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.4353644847869873, |
|
"rewards/margins": 0.6926987767219543, |
|
"rewards/rejected": -2.128063201904297, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5058874836458788, |
|
"grad_norm": 12.317018338997508, |
|
"learning_rate": 1.1549280145663242e-07, |
|
"logits/chosen": -2.395157814025879, |
|
"logits/rejected": -2.5112946033477783, |
|
"logps/chosen": -489.17291259765625, |
|
"logps/rejected": -561.0538940429688, |
|
"loss": 0.5286, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.206729531288147, |
|
"rewards/margins": 0.7861859798431396, |
|
"rewards/rejected": -1.9929155111312866, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.5233318796336677, |
|
"grad_norm": 18.19143218783634, |
|
"learning_rate": 1.0944119642911108e-07, |
|
"logits/chosen": -2.4938693046569824, |
|
"logits/rejected": -2.5243020057678223, |
|
"logps/chosen": -499.1390686035156, |
|
"logps/rejected": -593.7322998046875, |
|
"loss": 0.5173, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.3326854705810547, |
|
"rewards/margins": 0.7864323854446411, |
|
"rewards/rejected": -2.1191182136535645, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5233318796336677, |
|
"eval_logits/chosen": -2.5555672645568848, |
|
"eval_logits/rejected": -2.614039659500122, |
|
"eval_logps/chosen": -549.0404052734375, |
|
"eval_logps/rejected": -632.2808837890625, |
|
"eval_loss": 0.5003006458282471, |
|
"eval_rewards/accuracies": 0.7259036302566528, |
|
"eval_rewards/chosen": -1.682791829109192, |
|
"eval_rewards/margins": 0.998206377029419, |
|
"eval_rewards/rejected": -2.6809980869293213, |
|
"eval_runtime": 115.4293, |
|
"eval_samples_per_second": 22.897, |
|
"eval_steps_per_second": 0.719, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5407762756214566, |
|
"grad_norm": 12.275061942289778, |
|
"learning_rate": 1.0335446953077364e-07, |
|
"logits/chosen": -2.607405424118042, |
|
"logits/rejected": -2.575361728668213, |
|
"logps/chosen": -478.61065673828125, |
|
"logps/rejected": -597.8718872070312, |
|
"loss": 0.5464, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.3932697772979736, |
|
"rewards/margins": 0.8137472867965698, |
|
"rewards/rejected": -2.207017421722412, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.5582206716092455, |
|
"grad_norm": 15.17777342496625, |
|
"learning_rate": 9.725526378545951e-08, |
|
"logits/chosen": -2.447172164916992, |
|
"logits/rejected": -2.569131374359131, |
|
"logps/chosen": -548.3880615234375, |
|
"logps/rejected": -593.2091064453125, |
|
"loss": 0.5209, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -1.2796152830123901, |
|
"rewards/margins": 0.6179531812667847, |
|
"rewards/rejected": -1.8975684642791748, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.5756650675970345, |
|
"grad_norm": 18.51656864343838, |
|
"learning_rate": 9.116626863913827e-08, |
|
"logits/chosen": -2.427771806716919, |
|
"logits/rejected": -2.4281983375549316, |
|
"logps/chosen": -509.34130859375, |
|
"logps/rejected": -607.3682861328125, |
|
"loss": 0.535, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.440760612487793, |
|
"rewards/margins": 0.8197328448295593, |
|
"rewards/rejected": -2.260493755340576, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.5931094635848234, |
|
"grad_norm": 15.020047645081487, |
|
"learning_rate": 8.511013555368081e-08, |
|
"logits/chosen": -2.5503029823303223, |
|
"logits/rejected": -2.5775723457336426, |
|
"logps/chosen": -479.23614501953125, |
|
"logps/rejected": -573.2176513671875, |
|
"loss": 0.533, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.3124325275421143, |
|
"rewards/margins": 0.8196013569831848, |
|
"rewards/rejected": -2.1320338249206543, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.6105538595726123, |
|
"grad_norm": 18.51590550961604, |
|
"learning_rate": 7.910939374193313e-08, |
|
"logits/chosen": -2.548419952392578, |
|
"logits/rejected": -2.51499605178833, |
|
"logps/chosen": -466.15008544921875, |
|
"logps/rejected": -586.0462646484375, |
|
"loss": 0.5176, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.3407198190689087, |
|
"rewards/margins": 0.9716306924819946, |
|
"rewards/rejected": -2.3123507499694824, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.6279982555604012, |
|
"grad_norm": 15.425853441749453, |
|
"learning_rate": 7.31863663575649e-08, |
|
"logits/chosen": -2.620063543319702, |
|
"logits/rejected": -2.692065715789795, |
|
"logps/chosen": -528.7435913085938, |
|
"logps/rejected": -594.0607299804688, |
|
"loss": 0.5143, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.457281231880188, |
|
"rewards/margins": 0.8597862124443054, |
|
"rewards/rejected": -2.3170676231384277, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.6454426515481901, |
|
"grad_norm": 22.465982391087056, |
|
"learning_rate": 6.736308745147167e-08, |
|
"logits/chosen": -2.482309103012085, |
|
"logits/rejected": -2.63562273979187, |
|
"logps/chosen": -524.6629638671875, |
|
"logps/rejected": -600.9884033203125, |
|
"loss": 0.5375, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.3087918758392334, |
|
"rewards/margins": 0.8371159434318542, |
|
"rewards/rejected": -2.1459078788757324, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.662887047535979, |
|
"grad_norm": 16.00565580949092, |
|
"learning_rate": 6.166122000365834e-08, |
|
"logits/chosen": -2.5903308391571045, |
|
"logits/rejected": -2.7240214347839355, |
|
"logps/chosen": -514.4639282226562, |
|
"logps/rejected": -595.83154296875, |
|
"loss": 0.5013, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.2856967449188232, |
|
"rewards/margins": 0.8659652471542358, |
|
"rewards/rejected": -2.1516618728637695, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.680331443523768, |
|
"grad_norm": 13.854127143964314, |
|
"learning_rate": 5.610197533553057e-08, |
|
"logits/chosen": -2.4627175331115723, |
|
"logits/rejected": -2.506941795349121, |
|
"logps/chosen": -571.271484375, |
|
"logps/rejected": -634.978271484375, |
|
"loss": 0.5238, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.5051517486572266, |
|
"rewards/margins": 0.8115229606628418, |
|
"rewards/rejected": -2.3166747093200684, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.6977758395115569, |
|
"grad_norm": 14.662169677675843, |
|
"learning_rate": 5.0706034202386236e-08, |
|
"logits/chosen": -2.586434841156006, |
|
"logits/rejected": -2.635715961456299, |
|
"logps/chosen": -530.5022583007812, |
|
"logps/rejected": -592.48681640625, |
|
"loss": 0.5144, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.4007527828216553, |
|
"rewards/margins": 0.9174124002456665, |
|
"rewards/rejected": -2.3181653022766113, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.6977758395115569, |
|
"eval_logits/chosen": -2.636509895324707, |
|
"eval_logits/rejected": -2.7027053833007812, |
|
"eval_logps/chosen": -551.8306274414062, |
|
"eval_logps/rejected": -645.5278930664062, |
|
"eval_loss": 0.4851160943508148, |
|
"eval_rewards/accuracies": 0.7319276928901672, |
|
"eval_rewards/chosen": -1.7106943130493164, |
|
"eval_rewards/margins": 1.1027746200561523, |
|
"eval_rewards/rejected": -2.8134689331054688, |
|
"eval_runtime": 115.3965, |
|
"eval_samples_per_second": 22.904, |
|
"eval_steps_per_second": 0.719, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7152202354993459, |
|
"grad_norm": 15.725842795128791, |
|
"learning_rate": 4.5493469859647183e-08, |
|
"logits/chosen": -2.5586659908294678, |
|
"logits/rejected": -2.570362091064453, |
|
"logps/chosen": -540.7760009765625, |
|
"logps/rejected": -640.4522705078125, |
|
"loss": 0.5189, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.5797544717788696, |
|
"rewards/margins": 0.9039770364761353, |
|
"rewards/rejected": -2.483731269836426, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.7326646314871348, |
|
"grad_norm": 16.117914868185306, |
|
"learning_rate": 4.048367338903067e-08, |
|
"logits/chosen": -2.653681516647339, |
|
"logits/rejected": -2.6537811756134033, |
|
"logps/chosen": -497.29254150390625, |
|
"logps/rejected": -593.9075927734375, |
|
"loss": 0.5409, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -1.431205153465271, |
|
"rewards/margins": 0.8377032279968262, |
|
"rewards/rejected": -2.2689082622528076, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.7501090274749237, |
|
"grad_norm": 17.441237136205846, |
|
"learning_rate": 3.569528156245196e-08, |
|
"logits/chosen": -2.567112684249878, |
|
"logits/rejected": -2.5458970069885254, |
|
"logps/chosen": -497.869140625, |
|
"logps/rejected": -593.7197875976562, |
|
"loss": 0.5176, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.315579891204834, |
|
"rewards/margins": 0.8977547883987427, |
|
"rewards/rejected": -2.213334798812866, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.7675534234627126, |
|
"grad_norm": 15.596090656894537, |
|
"learning_rate": 3.1146107512008505e-08, |
|
"logits/chosen": -2.474587917327881, |
|
"logits/rejected": -2.5896928310394287, |
|
"logps/chosen": -536.7967529296875, |
|
"logps/rejected": -597.7022705078125, |
|
"loss": 0.5158, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.424076795578003, |
|
"rewards/margins": 0.8131541013717651, |
|
"rewards/rejected": -2.2372307777404785, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.7849978194505015, |
|
"grad_norm": 16.944860501421836, |
|
"learning_rate": 2.6853074463958614e-08, |
|
"logits/chosen": -2.64913010597229, |
|
"logits/rejected": -2.6915199756622314, |
|
"logps/chosen": -557.8106079101562, |
|
"logps/rejected": -629.3753662109375, |
|
"loss": 0.5297, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.6522800922393799, |
|
"rewards/margins": 1.0145955085754395, |
|
"rewards/rejected": -2.6668756008148193, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.8024422154382904, |
|
"grad_norm": 16.014043499865636, |
|
"learning_rate": 2.283215278320839e-08, |
|
"logits/chosen": -2.3578901290893555, |
|
"logits/rejected": -2.526782989501953, |
|
"logps/chosen": -549.39306640625, |
|
"logps/rejected": -602.7447509765625, |
|
"loss": 0.5112, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.3445526361465454, |
|
"rewards/margins": 0.8740399479866028, |
|
"rewards/rejected": -2.218592405319214, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.8198866114260793, |
|
"grad_norm": 16.802924841873157, |
|
"learning_rate": 1.9098300562505266e-08, |
|
"logits/chosen": -2.4426217079162598, |
|
"logits/rejected": -2.56174898147583, |
|
"logps/chosen": -512.5400390625, |
|
"logps/rejected": -600.3233642578125, |
|
"loss": 0.5097, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.3759263753890991, |
|
"rewards/margins": 0.9335910081863403, |
|
"rewards/rejected": -2.3095173835754395, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.8373310074138683, |
|
"grad_norm": 21.49561746024287, |
|
"learning_rate": 1.5665407977350388e-08, |
|
"logits/chosen": -2.4791998863220215, |
|
"logits/rejected": -2.603175640106201, |
|
"logps/chosen": -527.2852783203125, |
|
"logps/rejected": -596.7613525390625, |
|
"loss": 0.4766, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.4009100198745728, |
|
"rewards/margins": 0.7795251607894897, |
|
"rewards/rejected": -2.1804349422454834, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.8547754034016573, |
|
"grad_norm": 16.63581899808769, |
|
"learning_rate": 1.2546245613633688e-08, |
|
"logits/chosen": -2.5286920070648193, |
|
"logits/rejected": -2.70131254196167, |
|
"logps/chosen": -574.4547119140625, |
|
"logps/rejected": -626.7061767578125, |
|
"loss": 0.5147, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.7204509973526, |
|
"rewards/margins": 0.9493522644042969, |
|
"rewards/rejected": -2.6698031425476074, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.8722197993894462, |
|
"grad_norm": 19.877142016669573, |
|
"learning_rate": 9.752416960215437e-09, |
|
"logits/chosen": -2.546855926513672, |
|
"logits/rejected": -2.697226047515869, |
|
"logps/chosen": -550.6837158203125, |
|
"logps/rejected": -651.0521850585938, |
|
"loss": 0.5162, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.7263736724853516, |
|
"rewards/margins": 0.9590435028076172, |
|
"rewards/rejected": -2.6854171752929688, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.8722197993894462, |
|
"eval_logits/chosen": -2.581234931945801, |
|
"eval_logits/rejected": -2.644242525100708, |
|
"eval_logps/chosen": -551.6072387695312, |
|
"eval_logps/rejected": -648.58154296875, |
|
"eval_loss": 0.4797753691673279, |
|
"eval_rewards/accuracies": 0.7259036302566528, |
|
"eval_rewards/chosen": -1.7084604501724243, |
|
"eval_rewards/margins": 1.135544776916504, |
|
"eval_rewards/rejected": -2.8440048694610596, |
|
"eval_runtime": 115.2944, |
|
"eval_samples_per_second": 22.924, |
|
"eval_steps_per_second": 0.72, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.8896641953772351, |
|
"grad_norm": 15.90276225770206, |
|
"learning_rate": 7.294315243185578e-09, |
|
"logits/chosen": -2.5750200748443604, |
|
"logits/rejected": -2.5045828819274902, |
|
"logps/chosen": -503.51995849609375, |
|
"logps/rejected": -624.311279296875, |
|
"loss": 0.4957, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.5083858966827393, |
|
"rewards/margins": 0.9982180595397949, |
|
"rewards/rejected": -2.506603956222534, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.907108591365024, |
|
"grad_norm": 13.947597611873226, |
|
"learning_rate": 5.18108476238015e-09, |
|
"logits/chosen": -2.6210732460021973, |
|
"logits/rejected": -2.634568452835083, |
|
"logps/chosen": -557.1134643554688, |
|
"logps/rejected": -666.3057861328125, |
|
"loss": 0.5147, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -1.5826480388641357, |
|
"rewards/margins": 0.9864925146102905, |
|
"rewards/rejected": -2.569140672683716, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.9245529873528129, |
|
"grad_norm": 15.796674513066053, |
|
"learning_rate": 3.4205868739851316e-09, |
|
"logits/chosen": -2.608445167541504, |
|
"logits/rejected": -2.5723965167999268, |
|
"logps/chosen": -519.9942626953125, |
|
"logps/rejected": -620.4813842773438, |
|
"loss": 0.5135, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.6632301807403564, |
|
"rewards/margins": 0.8547458648681641, |
|
"rewards/rejected": -2.5179758071899414, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.9419973833406018, |
|
"grad_norm": 19.347436900380448, |
|
"learning_rate": 2.0193707457752727e-09, |
|
"logits/chosen": -2.589115858078003, |
|
"logits/rejected": -2.6705126762390137, |
|
"logps/chosen": -543.7213134765625, |
|
"logps/rejected": -621.6940307617188, |
|
"loss": 0.5146, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.5609385967254639, |
|
"rewards/margins": 0.875388503074646, |
|
"rewards/rejected": -2.4363272190093994, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.9594417793283908, |
|
"grad_norm": 17.51566703914577, |
|
"learning_rate": 9.826489937796556e-10, |
|
"logits/chosen": -2.6343085765838623, |
|
"logits/rejected": -2.683014392852783, |
|
"logps/chosen": -527.5286254882812, |
|
"logps/rejected": -622.0753173828125, |
|
"loss": 0.5309, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -1.4900976419448853, |
|
"rewards/margins": 0.976272702217102, |
|
"rewards/rejected": -2.466370105743408, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.9768861753161797, |
|
"grad_norm": 23.275500057845633, |
|
"learning_rate": 3.142782910077968e-10, |
|
"logits/chosen": -2.542066812515259, |
|
"logits/rejected": -2.601771831512451, |
|
"logps/chosen": -528.61328125, |
|
"logps/rejected": -610.769287109375, |
|
"loss": 0.5156, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.7321722507476807, |
|
"rewards/margins": 0.8784612417221069, |
|
"rewards/rejected": -2.610633373260498, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.9943305713039686, |
|
"grad_norm": 18.734942592809794, |
|
"learning_rate": 1.674502037277703e-11, |
|
"logits/chosen": -2.5349037647247314, |
|
"logits/rejected": -2.5301239490509033, |
|
"logps/chosen": -525.6041259765625, |
|
"logps/rejected": -626.7215576171875, |
|
"loss": 0.5109, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.4237686395645142, |
|
"rewards/margins": 0.9176391363143921, |
|
"rewards/rejected": -2.3414077758789062, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.9995638901003053, |
|
"step": 573, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5544013454860002, |
|
"train_runtime": 8395.7281, |
|
"train_samples_per_second": 8.737, |
|
"train_steps_per_second": 0.068 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 573, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|