|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 385, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.282051282051282e-07, |
|
"logits/chosen": -1.7278180122375488, |
|
"logits/rejected": -1.7377450466156006, |
|
"logps/chosen": -29.553977966308594, |
|
"logps/rejected": -42.813133239746094, |
|
"loss": 0.5, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.282051282051282e-06, |
|
"logits/chosen": -1.8667322397232056, |
|
"logits/rejected": -1.8710299730300903, |
|
"logps/chosen": -36.98433303833008, |
|
"logps/rejected": -33.65640640258789, |
|
"loss": 0.4948, |
|
"rewards/accuracies": 0.5277777910232544, |
|
"rewards/chosen": 0.015585740096867085, |
|
"rewards/margins": 0.034019216895103455, |
|
"rewards/rejected": -0.018433474004268646, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.564102564102564e-06, |
|
"logits/chosen": -1.9972655773162842, |
|
"logits/rejected": -1.9999065399169922, |
|
"logps/chosen": -29.6416072845459, |
|
"logps/rejected": -29.046981811523438, |
|
"loss": 0.5013, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.00041136163054034114, |
|
"rewards/margins": -0.011113673448562622, |
|
"rewards/rejected": 0.011525033973157406, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.846153846153847e-06, |
|
"logits/chosen": -1.9205191135406494, |
|
"logits/rejected": -1.9178133010864258, |
|
"logps/chosen": -31.385412216186523, |
|
"logps/rejected": -33.23365020751953, |
|
"loss": 0.4942, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.02147640287876129, |
|
"rewards/margins": 0.03211076930165291, |
|
"rewards/rejected": -0.010634368285536766, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.999896948438434e-06, |
|
"logits/chosen": -2.017784833908081, |
|
"logits/rejected": -2.009059429168701, |
|
"logps/chosen": -32.547088623046875, |
|
"logps/rejected": -32.473243713378906, |
|
"loss": 0.5024, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": 0.020800117403268814, |
|
"rewards/margins": -0.006797379348427057, |
|
"rewards/rejected": 0.027597496286034584, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.987541037542187e-06, |
|
"logits/chosen": -1.863996148109436, |
|
"logits/rejected": -1.8532311916351318, |
|
"logps/chosen": -33.490657806396484, |
|
"logps/rejected": -35.415367126464844, |
|
"loss": 0.4957, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.046776313334703445, |
|
"rewards/margins": 0.02089320495724678, |
|
"rewards/rejected": 0.025883108377456665, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.954691471941119e-06, |
|
"logits/chosen": -1.9439821243286133, |
|
"logits/rejected": -1.9459091424942017, |
|
"logps/chosen": -32.50756072998047, |
|
"logps/rejected": -33.15720748901367, |
|
"loss": 0.489, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.06483479589223862, |
|
"rewards/margins": 0.05491489917039871, |
|
"rewards/rejected": 0.009919902309775352, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.901618883413549e-06, |
|
"logits/chosen": -2.075462579727173, |
|
"logits/rejected": -2.0804481506347656, |
|
"logps/chosen": -33.86588668823242, |
|
"logps/rejected": -36.501365661621094, |
|
"loss": 0.491, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.08581922948360443, |
|
"rewards/margins": 0.038563162088394165, |
|
"rewards/rejected": 0.04725607484579086, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.828760511501322e-06, |
|
"logits/chosen": -1.9374949932098389, |
|
"logits/rejected": -1.9405946731567383, |
|
"logps/chosen": -34.200557708740234, |
|
"logps/rejected": -34.543209075927734, |
|
"loss": 0.4719, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.15907494723796844, |
|
"rewards/margins": 0.1246359720826149, |
|
"rewards/rejected": 0.03443896025419235, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.7367166013034295e-06, |
|
"logits/chosen": -1.9453551769256592, |
|
"logits/rejected": -1.9498746395111084, |
|
"logps/chosen": -32.2825927734375, |
|
"logps/rejected": -32.25676727294922, |
|
"loss": 0.4858, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.12937995791435242, |
|
"rewards/margins": 0.05788033455610275, |
|
"rewards/rejected": 0.07149962335824966, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.626245458345211e-06, |
|
"logits/chosen": -2.04329776763916, |
|
"logits/rejected": -2.0412919521331787, |
|
"logps/chosen": -32.015525817871094, |
|
"logps/rejected": -31.169103622436523, |
|
"loss": 0.4799, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.15771625936031342, |
|
"rewards/margins": 0.08974156528711319, |
|
"rewards/rejected": 0.06797470152378082, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.238145351409912, |
|
"eval_logits/rejected": -2.2333030700683594, |
|
"eval_logps/chosen": -33.86069107055664, |
|
"eval_logps/rejected": -37.356990814208984, |
|
"eval_loss": 0.49784979224205017, |
|
"eval_rewards/accuracies": 0.5340532064437866, |
|
"eval_rewards/chosen": 0.12170296907424927, |
|
"eval_rewards/margins": 0.009964452125132084, |
|
"eval_rewards/rejected": 0.11173851788043976, |
|
"eval_runtime": 145.9039, |
|
"eval_samples_per_second": 2.351, |
|
"eval_steps_per_second": 0.295, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.498257201263691e-06, |
|
"logits/chosen": -1.9996016025543213, |
|
"logits/rejected": -1.9972642660140991, |
|
"logps/chosen": -32.95330810546875, |
|
"logps/rejected": -33.88172912597656, |
|
"loss": 0.4742, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.20335713028907776, |
|
"rewards/margins": 0.10000836849212646, |
|
"rewards/rejected": 0.10334876924753189, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.353806263777678e-06, |
|
"logits/chosen": -2.0096044540405273, |
|
"logits/rejected": -2.0012660026550293, |
|
"logps/chosen": -32.153419494628906, |
|
"logps/rejected": -31.961782455444336, |
|
"loss": 0.4848, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.20403961837291718, |
|
"rewards/margins": 0.06423385441303253, |
|
"rewards/rejected": 0.13980577886104584, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.1940827077152755e-06, |
|
"logits/chosen": -2.038412570953369, |
|
"logits/rejected": -2.0304617881774902, |
|
"logps/chosen": -30.165945053100586, |
|
"logps/rejected": -31.880346298217773, |
|
"loss": 0.4787, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.21683910489082336, |
|
"rewards/margins": 0.0959777683019638, |
|
"rewards/rejected": 0.12086131423711777, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.0204024186666215e-06, |
|
"logits/chosen": -1.9683482646942139, |
|
"logits/rejected": -1.9784959554672241, |
|
"logps/chosen": -31.041671752929688, |
|
"logps/rejected": -32.388893127441406, |
|
"loss": 0.4621, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.264592707157135, |
|
"rewards/margins": 0.16240297257900238, |
|
"rewards/rejected": 0.10218973457813263, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.834196265035119e-06, |
|
"logits/chosen": -1.8799880743026733, |
|
"logits/rejected": -1.8811382055282593, |
|
"logps/chosen": -33.686256408691406, |
|
"logps/rejected": -34.564491271972656, |
|
"loss": 0.4503, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.3545047640800476, |
|
"rewards/margins": 0.21957096457481384, |
|
"rewards/rejected": 0.134933739900589, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.636998309800573e-06, |
|
"logits/chosen": -1.931627869606018, |
|
"logits/rejected": -1.9282808303833008, |
|
"logps/chosen": -35.77287292480469, |
|
"logps/rejected": -32.49742889404297, |
|
"loss": 0.4707, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.27916350960731506, |
|
"rewards/margins": 0.12143567949533463, |
|
"rewards/rejected": 0.15772780776023865, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4304331721118078e-06, |
|
"logits/chosen": -2.0332443714141846, |
|
"logits/rejected": -2.0259475708007812, |
|
"logps/chosen": -33.251670837402344, |
|
"logps/rejected": -31.17826271057129, |
|
"loss": 0.439, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.382335364818573, |
|
"rewards/margins": 0.2608257234096527, |
|
"rewards/rejected": 0.1215096265077591, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 3.2162026428305436e-06, |
|
"logits/chosen": -2.040196657180786, |
|
"logits/rejected": -2.045389413833618, |
|
"logps/chosen": -31.95781898498535, |
|
"logps/rejected": -32.18687057495117, |
|
"loss": 0.4516, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.4108788073062897, |
|
"rewards/margins": 0.20208871364593506, |
|
"rewards/rejected": 0.20879006385803223, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 2.996071664294641e-06, |
|
"logits/chosen": -2.040775775909424, |
|
"logits/rejected": -2.038043737411499, |
|
"logps/chosen": -31.056228637695312, |
|
"logps/rejected": -31.10147476196289, |
|
"loss": 0.4647, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.30290743708610535, |
|
"rewards/margins": 0.1511872559785843, |
|
"rewards/rejected": 0.15172019600868225, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.7718537898066833e-06, |
|
"logits/chosen": -1.9112863540649414, |
|
"logits/rejected": -1.9159523248672485, |
|
"logps/chosen": -31.08163070678711, |
|
"logps/rejected": -32.598487854003906, |
|
"loss": 0.4453, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.3672560155391693, |
|
"rewards/margins": 0.2325470894575119, |
|
"rewards/rejected": 0.1347089260816574, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.234708309173584, |
|
"eval_logits/rejected": -2.229902505874634, |
|
"eval_logps/chosen": -33.76850891113281, |
|
"eval_logps/rejected": -37.29365921020508, |
|
"eval_loss": 0.49275800585746765, |
|
"eval_rewards/accuracies": 0.5369601845741272, |
|
"eval_rewards/chosen": 0.18622951209545135, |
|
"eval_rewards/margins": 0.030155813321471214, |
|
"eval_rewards/rejected": 0.1560736745595932, |
|
"eval_runtime": 145.5287, |
|
"eval_samples_per_second": 2.357, |
|
"eval_steps_per_second": 0.295, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.5453962426402006e-06, |
|
"logits/chosen": -2.022939443588257, |
|
"logits/rejected": -2.033546209335327, |
|
"logps/chosen": -31.5457706451416, |
|
"logps/rejected": -33.67595672607422, |
|
"loss": 0.4628, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.2978750169277191, |
|
"rewards/margins": 0.16715146601200104, |
|
"rewards/rejected": 0.13072356581687927, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.3185646976551794e-06, |
|
"logits/chosen": -1.9158331155776978, |
|
"logits/rejected": -1.9305419921875, |
|
"logps/chosen": -29.57406997680664, |
|
"logps/rejected": -31.42013168334961, |
|
"loss": 0.4363, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.37622833251953125, |
|
"rewards/margins": 0.2795966863632202, |
|
"rewards/rejected": 0.09663165360689163, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2.0932279108998323e-06, |
|
"logits/chosen": -1.9733517169952393, |
|
"logits/rejected": -1.9773433208465576, |
|
"logps/chosen": -32.83649444580078, |
|
"logps/rejected": -31.40326499938965, |
|
"loss": 0.4311, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.40668267011642456, |
|
"rewards/margins": 0.304456502199173, |
|
"rewards/rejected": 0.10222617536783218, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.8712423238279358e-06, |
|
"logits/chosen": -1.9713729619979858, |
|
"logits/rejected": -1.9496545791625977, |
|
"logps/chosen": -33.5602912902832, |
|
"logps/rejected": -34.85026550292969, |
|
"loss": 0.4227, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.41541916131973267, |
|
"rewards/margins": 0.3450462222099304, |
|
"rewards/rejected": 0.07037289440631866, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.6544367689701824e-06, |
|
"logits/chosen": -2.012058734893799, |
|
"logits/rejected": -2.0087602138519287, |
|
"logps/chosen": -32.46921157836914, |
|
"logps/rejected": -36.012245178222656, |
|
"loss": 0.4613, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.31910061836242676, |
|
"rewards/margins": 0.17022350430488586, |
|
"rewards/rejected": 0.1488770991563797, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.4445974030621963e-06, |
|
"logits/chosen": -1.878584861755371, |
|
"logits/rejected": -1.876153588294983, |
|
"logps/chosen": -33.71690368652344, |
|
"logps/rejected": -35.26210403442383, |
|
"loss": 0.4631, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.33004888892173767, |
|
"rewards/margins": 0.1602855622768402, |
|
"rewards/rejected": 0.16976332664489746, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.243452991757889e-06, |
|
"logits/chosen": -1.863705039024353, |
|
"logits/rejected": -1.8611875772476196, |
|
"logps/chosen": -33.94118881225586, |
|
"logps/rejected": -31.579448699951172, |
|
"loss": 0.4582, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.3194878101348877, |
|
"rewards/margins": 0.18517015874385834, |
|
"rewards/rejected": 0.13431766629219055, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0526606671603523e-06, |
|
"logits/chosen": -1.96773362159729, |
|
"logits/rejected": -1.957313895225525, |
|
"logps/chosen": -34.72475814819336, |
|
"logps/rejected": -31.634963989257812, |
|
"loss": 0.4297, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.4423200190067291, |
|
"rewards/margins": 0.298100084066391, |
|
"rewards/rejected": 0.14421990513801575, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.737922755071455e-07, |
|
"logits/chosen": -2.0632526874542236, |
|
"logits/rejected": -2.0484511852264404, |
|
"logps/chosen": -30.412399291992188, |
|
"logps/rejected": -32.339237213134766, |
|
"loss": 0.4682, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.3582096993923187, |
|
"rewards/margins": 0.13939058780670166, |
|
"rewards/rejected": 0.21881911158561707, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 7.08321427484816e-07, |
|
"logits/chosen": -1.9345197677612305, |
|
"logits/rejected": -1.9320701360702515, |
|
"logps/chosen": -32.11776351928711, |
|
"logps/rejected": -30.692516326904297, |
|
"loss": 0.3947, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.5665432810783386, |
|
"rewards/margins": 0.4795381426811218, |
|
"rewards/rejected": 0.08700509369373322, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.2321932315826416, |
|
"eval_logits/rejected": -2.227381944656372, |
|
"eval_logps/chosen": -33.75507736206055, |
|
"eval_logps/rejected": -37.28935623168945, |
|
"eval_loss": 0.49099090695381165, |
|
"eval_rewards/accuracies": 0.5564784407615662, |
|
"eval_rewards/chosen": 0.1956319361925125, |
|
"eval_rewards/margins": 0.03654861077666283, |
|
"eval_rewards/rejected": 0.15908333659172058, |
|
"eval_runtime": 145.678, |
|
"eval_samples_per_second": 2.355, |
|
"eval_steps_per_second": 0.295, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.576113578589035e-07, |
|
"logits/chosen": -1.918630838394165, |
|
"logits/rejected": -1.9154895544052124, |
|
"logps/chosen": -31.033864974975586, |
|
"logps/rejected": -33.57470703125, |
|
"loss": 0.4357, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.3986373543739319, |
|
"rewards/margins": 0.28142648935317993, |
|
"rewards/rejected": 0.11721093952655792, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.229036944380913e-07, |
|
"logits/chosen": -1.9683278799057007, |
|
"logits/rejected": -1.9561859369277954, |
|
"logps/chosen": -34.02568817138672, |
|
"logps/rejected": -33.45148849487305, |
|
"loss": 0.4237, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.39926695823669434, |
|
"rewards/margins": 0.3381231129169464, |
|
"rewards/rejected": 0.06114383786916733, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.053082288996112e-07, |
|
"logits/chosen": -2.0041966438293457, |
|
"logits/rejected": -2.0028293132781982, |
|
"logps/chosen": -32.882896423339844, |
|
"logps/rejected": -32.24889373779297, |
|
"loss": 0.4411, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.4220260679721832, |
|
"rewards/margins": 0.26054397225379944, |
|
"rewards/rejected": 0.16148212552070618, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.0579377374915805e-07, |
|
"logits/chosen": -2.0908923149108887, |
|
"logits/rejected": -2.0752511024475098, |
|
"logps/chosen": -33.49097442626953, |
|
"logps/rejected": -32.81463623046875, |
|
"loss": 0.4381, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.4818909764289856, |
|
"rewards/margins": 0.26951199769973755, |
|
"rewards/rejected": 0.21237893402576447, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.2518018074041684e-07, |
|
"logits/chosen": -1.9633783102035522, |
|
"logits/rejected": -1.9625694751739502, |
|
"logps/chosen": -32.55985641479492, |
|
"logps/rejected": -32.24340057373047, |
|
"loss": 0.4211, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.5055657029151917, |
|
"rewards/margins": 0.3503406047821045, |
|
"rewards/rejected": 0.15522508323192596, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.41315865106129e-08, |
|
"logits/chosen": -1.9187549352645874, |
|
"logits/rejected": -1.9290361404418945, |
|
"logps/chosen": -31.61210060119629, |
|
"logps/rejected": -35.0199089050293, |
|
"loss": 0.4489, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.41481319069862366, |
|
"rewards/margins": 0.22661848366260529, |
|
"rewards/rejected": 0.18819470703601837, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.3150941078050325e-08, |
|
"logits/chosen": -2.0583438873291016, |
|
"logits/rejected": -2.051846742630005, |
|
"logps/chosen": -33.05150604248047, |
|
"logps/rejected": -28.9710750579834, |
|
"loss": 0.4429, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.42032259702682495, |
|
"rewards/margins": 0.2415778934955597, |
|
"rewards/rejected": 0.17874471843242645, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.575864278703266e-09, |
|
"logits/chosen": -1.9180545806884766, |
|
"logits/rejected": -1.92022705078125, |
|
"logps/chosen": -33.62676239013672, |
|
"logps/rejected": -30.719894409179688, |
|
"loss": 0.4281, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.4286006987094879, |
|
"rewards/margins": 0.3216874301433563, |
|
"rewards/rejected": 0.10691330581903458, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 385, |
|
"total_flos": 0.0, |
|
"train_loss": 0.45851393303313814, |
|
"train_runtime": 3252.0771, |
|
"train_samples_per_second": 0.947, |
|
"train_steps_per_second": 0.118 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 385, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|