|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 385, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.282051282051282e-08, |
|
"logits/chosen": -1.7278180122375488, |
|
"logits/rejected": -1.7377450466156006, |
|
"logps/chosen": -29.553977966308594, |
|
"logps/rejected": -42.813133239746094, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.2820512820512818e-07, |
|
"logits/chosen": -1.8660534620285034, |
|
"logits/rejected": -1.870363712310791, |
|
"logps/chosen": -37.0097770690918, |
|
"logps/rejected": -33.656063079833984, |
|
"loss": 0.6866, |
|
"rewards/accuracies": 0.4861111044883728, |
|
"rewards/chosen": -0.0025405611377209425, |
|
"rewards/margins": 0.018248552456498146, |
|
"rewards/rejected": -0.020789114758372307, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.5641025641025636e-07, |
|
"logits/chosen": -1.9975675344467163, |
|
"logits/rejected": -2.0001988410949707, |
|
"logps/chosen": -29.643625259399414, |
|
"logps/rejected": -29.03875732421875, |
|
"loss": 0.7064, |
|
"rewards/accuracies": 0.4000000059604645, |
|
"rewards/chosen": -0.0011415861081331968, |
|
"rewards/margins": -0.02089018002152443, |
|
"rewards/rejected": 0.019748592749238014, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.8461538461538463e-07, |
|
"logits/chosen": -1.919878363609314, |
|
"logits/rejected": -1.9171901941299438, |
|
"logps/chosen": -31.411758422851562, |
|
"logps/rejected": -33.21895980834961, |
|
"loss": 0.6945, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": 0.0034693297930061817, |
|
"rewards/margins": 0.003873053938150406, |
|
"rewards/rejected": -0.0004037246108055115, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.999896948438433e-07, |
|
"logits/chosen": -2.0166752338409424, |
|
"logits/rejected": -2.007917881011963, |
|
"logps/chosen": -32.59661102294922, |
|
"logps/rejected": -32.506187438964844, |
|
"loss": 0.7064, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.01584548130631447, |
|
"rewards/margins": -0.02103354223072529, |
|
"rewards/rejected": 0.005188059527426958, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.987541037542186e-07, |
|
"logits/chosen": -1.8648042678833008, |
|
"logits/rejected": -1.8540267944335938, |
|
"logps/chosen": -33.56422805786133, |
|
"logps/rejected": -35.431602478027344, |
|
"loss": 0.7067, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": -0.005400418769568205, |
|
"rewards/margins": -0.02199491299688816, |
|
"rewards/rejected": 0.01659449376165867, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.954691471941118e-07, |
|
"logits/chosen": -1.9453144073486328, |
|
"logits/rejected": -1.9472593069076538, |
|
"logps/chosen": -32.59089279174805, |
|
"logps/rejected": -33.192405700683594, |
|
"loss": 0.6844, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": 0.007432769052684307, |
|
"rewards/margins": 0.024254899471998215, |
|
"rewards/rejected": -0.016822131350636482, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.901618883413548e-07, |
|
"logits/chosen": -2.079728364944458, |
|
"logits/rejected": -2.0847129821777344, |
|
"logps/chosen": -33.983558654785156, |
|
"logps/rejected": -36.584938049316406, |
|
"loss": 0.688, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.003943340387195349, |
|
"rewards/margins": 0.016796352341771126, |
|
"rewards/rejected": -0.01285301148891449, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.828760511501322e-07, |
|
"logits/chosen": -1.9422305822372437, |
|
"logits/rejected": -1.9453967809677124, |
|
"logps/chosen": -34.4206657409668, |
|
"logps/rejected": -34.566123962402344, |
|
"loss": 0.7053, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": 0.005717824678868055, |
|
"rewards/margins": -0.015306837856769562, |
|
"rewards/rejected": 0.021024659276008606, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.736716601303429e-07, |
|
"logits/chosen": -1.950453519821167, |
|
"logits/rejected": -1.9549684524536133, |
|
"logps/chosen": -32.457984924316406, |
|
"logps/rejected": -32.36473083496094, |
|
"loss": 0.6897, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.007545561995357275, |
|
"rewards/margins": 0.012203911319375038, |
|
"rewards/rejected": -0.004658351186662912, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.62624545834521e-07, |
|
"logits/chosen": -2.048896312713623, |
|
"logits/rejected": -2.0468926429748535, |
|
"logps/chosen": -32.23190689086914, |
|
"logps/rejected": -31.26666831970215, |
|
"loss": 0.6927, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.007141563110053539, |
|
"rewards/margins": 0.007507171481847763, |
|
"rewards/rejected": -0.0003656085464172065, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.2437734603881836, |
|
"eval_logits/rejected": -2.2388932704925537, |
|
"eval_logps/chosen": -34.01585006713867, |
|
"eval_logps/rejected": -37.50562286376953, |
|
"eval_loss": 0.6927134394645691, |
|
"eval_rewards/accuracies": 0.5186877250671387, |
|
"eval_rewards/chosen": 0.014961617067456245, |
|
"eval_rewards/margins": 0.006164299789816141, |
|
"eval_rewards/rejected": 0.008797316811978817, |
|
"eval_runtime": 146.2315, |
|
"eval_samples_per_second": 2.346, |
|
"eval_steps_per_second": 0.294, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.4982572012636904e-07, |
|
"logits/chosen": -2.005382537841797, |
|
"logits/rejected": -2.002962589263916, |
|
"logps/chosen": -33.231327056884766, |
|
"logps/rejected": -34.03110885620117, |
|
"loss": 0.6912, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.009991822764277458, |
|
"rewards/margins": 0.01138134766370058, |
|
"rewards/rejected": -0.0013895228039473295, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.353806263777677e-07, |
|
"logits/chosen": -2.01668643951416, |
|
"logits/rejected": -2.0083160400390625, |
|
"logps/chosen": -32.46741485595703, |
|
"logps/rejected": -32.169979095458984, |
|
"loss": 0.7019, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.018008466809988022, |
|
"rewards/margins": -0.011230994947254658, |
|
"rewards/rejected": -0.006777471862733364, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.194082707715275e-07, |
|
"logits/chosen": -2.046079158782959, |
|
"logits/rejected": -2.038038969039917, |
|
"logps/chosen": -30.489391326904297, |
|
"logps/rejected": -32.04558563232422, |
|
"loss": 0.7042, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": -0.010940661653876305, |
|
"rewards/margins": -0.01687880977988243, |
|
"rewards/rejected": 0.005938149057328701, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.020402418666621e-07, |
|
"logits/chosen": -1.9770376682281494, |
|
"logits/rejected": -1.9873006343841553, |
|
"logps/chosen": -31.392288208007812, |
|
"logps/rejected": -32.56155014038086, |
|
"loss": 0.6749, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.021898187696933746, |
|
"rewards/margins": 0.043236203491687775, |
|
"rewards/rejected": -0.02133801393210888, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.8341962650351185e-07, |
|
"logits/chosen": -1.8905258178710938, |
|
"logits/rejected": -1.891603708267212, |
|
"logps/chosen": -34.185401916503906, |
|
"logps/rejected": -34.75876998901367, |
|
"loss": 0.6933, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": 0.005832195747643709, |
|
"rewards/margins": 0.007043843157589436, |
|
"rewards/rejected": -0.0012116472935304046, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.636998309800572e-07, |
|
"logits/chosen": -1.942525863647461, |
|
"logits/rejected": -1.9390523433685303, |
|
"logps/chosen": -36.15251922607422, |
|
"logps/rejected": -32.721561431884766, |
|
"loss": 0.6891, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.01532670110464096, |
|
"rewards/margins": 0.014371859841048717, |
|
"rewards/rejected": 0.0009548395755700767, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.430433172111807e-07, |
|
"logits/chosen": -2.042137622833252, |
|
"logits/rejected": -2.0347514152526855, |
|
"logps/chosen": -33.78362274169922, |
|
"logps/rejected": -31.34653091430664, |
|
"loss": 0.6922, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.011391163803637028, |
|
"rewards/margins": 0.007135935127735138, |
|
"rewards/rejected": 0.004255227744579315, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 3.216202642830543e-07, |
|
"logits/chosen": -2.047314405441284, |
|
"logits/rejected": -2.052593231201172, |
|
"logps/chosen": -32.52313995361328, |
|
"logps/rejected": -32.499427795410156, |
|
"loss": 0.6811, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.01731465384364128, |
|
"rewards/margins": 0.028742337599396706, |
|
"rewards/rejected": -0.011427680030465126, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 2.9960716642946403e-07, |
|
"logits/chosen": -2.048703908920288, |
|
"logits/rejected": -2.0459342002868652, |
|
"logps/chosen": -31.495092391967773, |
|
"logps/rejected": -31.32244300842285, |
|
"loss": 0.6967, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": -0.004913434386253357, |
|
"rewards/margins": -0.0015291940653696656, |
|
"rewards/rejected": -0.0033842422999441624, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.771853789806683e-07, |
|
"logits/chosen": -1.9186630249023438, |
|
"logits/rejected": -1.9233324527740479, |
|
"logps/chosen": -31.604812622070312, |
|
"logps/rejected": -32.791358947753906, |
|
"loss": 0.6949, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": 0.0011774369049817324, |
|
"rewards/margins": 0.0015200242633000016, |
|
"rewards/rejected": -0.00034258683444932103, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.2436025142669678, |
|
"eval_logits/rejected": -2.2387301921844482, |
|
"eval_logps/chosen": -34.01983642578125, |
|
"eval_logps/rejected": -37.50978469848633, |
|
"eval_loss": 0.6928001642227173, |
|
"eval_rewards/accuracies": 0.5274086594581604, |
|
"eval_rewards/chosen": 0.011776229366660118, |
|
"eval_rewards/margins": 0.006309796124696732, |
|
"eval_rewards/rejected": 0.0054664346389472485, |
|
"eval_runtime": 145.9171, |
|
"eval_samples_per_second": 2.351, |
|
"eval_steps_per_second": 0.295, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.5453962426402e-07, |
|
"logits/chosen": -2.0316667556762695, |
|
"logits/rejected": -2.0423641204833984, |
|
"logps/chosen": -31.9429874420166, |
|
"logps/rejected": -33.863956451416016, |
|
"loss": 0.6846, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.022652912884950638, |
|
"rewards/margins": 0.023658016696572304, |
|
"rewards/rejected": -0.0010051012504845858, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.318564697655179e-07, |
|
"logits/chosen": -1.9254566431045532, |
|
"logits/rejected": -1.9402967691421509, |
|
"logps/chosen": -30.087352752685547, |
|
"logps/rejected": -31.55807113647461, |
|
"loss": 0.6858, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.019350551068782806, |
|
"rewards/margins": 0.019265536218881607, |
|
"rewards/rejected": 8.501634147251025e-05, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2.093227910899832e-07, |
|
"logits/chosen": -1.9833059310913086, |
|
"logits/rejected": -1.9872699975967407, |
|
"logps/chosen": -33.40017318725586, |
|
"logps/rejected": -31.5333309173584, |
|
"loss": 0.6963, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": 0.013836635276675224, |
|
"rewards/margins": 0.0010608620941638947, |
|
"rewards/rejected": 0.012775774113833904, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.8712423238279356e-07, |
|
"logits/chosen": -1.983289122581482, |
|
"logits/rejected": -1.9613111019134521, |
|
"logps/chosen": -34.167720794677734, |
|
"logps/rejected": -34.96076965332031, |
|
"loss": 0.6978, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.011179247871041298, |
|
"rewards/margins": -0.0031959381885826588, |
|
"rewards/rejected": -0.007983307354152203, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.654436768970182e-07, |
|
"logits/chosen": -2.0248379707336426, |
|
"logits/rejected": -2.0215275287628174, |
|
"logps/chosen": -32.91853713989258, |
|
"logps/rejected": -36.22992706298828, |
|
"loss": 0.6918, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.005225626286119223, |
|
"rewards/margins": 0.009226549416780472, |
|
"rewards/rejected": -0.004000924527645111, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.444597403062196e-07, |
|
"logits/chosen": -1.8912242650985718, |
|
"logits/rejected": -1.8887859582901, |
|
"logps/chosen": -34.17466735839844, |
|
"logps/rejected": -35.498863220214844, |
|
"loss": 0.6925, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.01098463125526905, |
|
"rewards/margins": 0.006378169171512127, |
|
"rewards/rejected": 0.004606460686773062, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.2434529917578887e-07, |
|
"logits/chosen": -1.8761816024780273, |
|
"logits/rejected": -1.8736416101455688, |
|
"logps/chosen": -34.40642166137695, |
|
"logps/rejected": -31.732311248779297, |
|
"loss": 0.7147, |
|
"rewards/accuracies": 0.36250001192092896, |
|
"rewards/chosen": -0.007056808564811945, |
|
"rewards/margins": -0.03827430680394173, |
|
"rewards/rejected": 0.03121749497950077, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0526606671603521e-07, |
|
"logits/chosen": -1.9794687032699585, |
|
"logits/rejected": -1.9688432216644287, |
|
"logps/chosen": -35.32493591308594, |
|
"logps/rejected": -31.86197853088379, |
|
"loss": 0.6757, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.025365391746163368, |
|
"rewards/margins": 0.04215434193611145, |
|
"rewards/rejected": -0.016788948327302933, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.737922755071453e-08, |
|
"logits/chosen": -2.0760245323181152, |
|
"logits/rejected": -2.0610060691833496, |
|
"logps/chosen": -30.925411224365234, |
|
"logps/rejected": -32.648170471191406, |
|
"loss": 0.6978, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": -0.0010284179588779807, |
|
"rewards/margins": -0.003961210139095783, |
|
"rewards/rejected": 0.0029327913653105497, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 7.08321427484816e-08, |
|
"logits/chosen": -1.946812629699707, |
|
"logits/rejected": -1.9442851543426514, |
|
"logps/chosen": -32.90645217895508, |
|
"logps/rejected": -30.836864471435547, |
|
"loss": 0.6798, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.016527386382222176, |
|
"rewards/margins": 0.032571423798799515, |
|
"rewards/rejected": -0.016044041141867638, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.243879556655884, |
|
"eval_logits/rejected": -2.239006996154785, |
|
"eval_logps/chosen": -34.026798248291016, |
|
"eval_logps/rejected": -37.50396728515625, |
|
"eval_loss": 0.6981266140937805, |
|
"eval_rewards/accuracies": 0.49833887815475464, |
|
"eval_rewards/chosen": 0.006204271223396063, |
|
"eval_rewards/margins": -0.003920448012650013, |
|
"eval_rewards/rejected": 0.010124719701707363, |
|
"eval_runtime": 145.9481, |
|
"eval_samples_per_second": 2.35, |
|
"eval_steps_per_second": 0.295, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.576113578589034e-08, |
|
"logits/chosen": -1.9286400079727173, |
|
"logits/rejected": -1.92538583278656, |
|
"logps/chosen": -31.571802139282227, |
|
"logps/rejected": -33.7106819152832, |
|
"loss": 0.6961, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.025238346308469772, |
|
"rewards/margins": 5.8975441788788885e-05, |
|
"rewards/rejected": 0.025179371237754822, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.229036944380912e-08, |
|
"logits/chosen": -1.9804341793060303, |
|
"logits/rejected": -1.9681293964385986, |
|
"logps/chosen": -34.57010269165039, |
|
"logps/rejected": -33.552330017089844, |
|
"loss": 0.6803, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.020780155435204506, |
|
"rewards/margins": 0.031573377549648285, |
|
"rewards/rejected": -0.01079322025179863, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.053082288996112e-08, |
|
"logits/chosen": -2.0159571170806885, |
|
"logits/rejected": -2.0144896507263184, |
|
"logps/chosen": -33.452049255371094, |
|
"logps/rejected": -32.46733856201172, |
|
"loss": 0.687, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.026994183659553528, |
|
"rewards/margins": 0.017200354486703873, |
|
"rewards/rejected": 0.009793824516236782, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.05793773749158e-08, |
|
"logits/chosen": -2.103339433670044, |
|
"logits/rejected": -2.0875279903411865, |
|
"logps/chosen": -34.17384719848633, |
|
"logps/rejected": -33.08417510986328, |
|
"loss": 0.7089, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": 0.004437476396560669, |
|
"rewards/margins": -0.02264832705259323, |
|
"rewards/rejected": 0.02708580158650875, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.251801807404168e-08, |
|
"logits/chosen": -1.974784255027771, |
|
"logits/rejected": -1.9738404750823975, |
|
"logps/chosen": -33.24315643310547, |
|
"logps/rejected": -32.44605255126953, |
|
"loss": 0.6895, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": 0.031151747331023216, |
|
"rewards/margins": 0.01587487757205963, |
|
"rewards/rejected": 0.015276871621608734, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.41315865106129e-09, |
|
"logits/chosen": -1.9305660724639893, |
|
"logits/rejected": -1.940930724143982, |
|
"logps/chosen": -32.193580627441406, |
|
"logps/rejected": -35.30970764160156, |
|
"loss": 0.6828, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.008886909112334251, |
|
"rewards/margins": 0.025648051872849464, |
|
"rewards/rejected": -0.016761142760515213, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.3150941078050324e-09, |
|
"logits/chosen": -2.069566249847412, |
|
"logits/rejected": -2.0630078315734863, |
|
"logps/chosen": -33.643856048583984, |
|
"logps/rejected": -29.229293823242188, |
|
"loss": 0.6922, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.00649127596989274, |
|
"rewards/margins": 0.008784732781350613, |
|
"rewards/rejected": -0.0022934596054255962, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.575864278703266e-10, |
|
"logits/chosen": -1.929059386253357, |
|
"logits/rejected": -1.931227445602417, |
|
"logps/chosen": -34.235416412353516, |
|
"logps/rejected": -30.882593154907227, |
|
"loss": 0.6913, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.0029094829224050045, |
|
"rewards/margins": 0.010880164802074432, |
|
"rewards/rejected": -0.00797068141400814, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 385, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6928803004227675, |
|
"train_runtime": 3255.7403, |
|
"train_samples_per_second": 0.946, |
|
"train_steps_per_second": 0.118 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 385, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|