|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 385, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.282051282051282e-08, |
|
"logits/chosen": -1.7278180122375488, |
|
"logits/rejected": -1.7377450466156006, |
|
"logps/chosen": -29.553977966308594, |
|
"logps/rejected": -42.813133239746094, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.2820512820512818e-07, |
|
"logits/chosen": -1.865976333618164, |
|
"logits/rejected": -1.8702969551086426, |
|
"logps/chosen": -36.97197723388672, |
|
"logps/rejected": -33.66468811035156, |
|
"loss": 0.6739, |
|
"rewards/accuracies": 0.6388888955116272, |
|
"rewards/chosen": 0.020770318806171417, |
|
"rewards/margins": 0.041539546102285385, |
|
"rewards/rejected": -0.020769227296113968, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.5641025641025636e-07, |
|
"logits/chosen": -1.9976377487182617, |
|
"logits/rejected": -2.0002894401550293, |
|
"logps/chosen": -29.63763427734375, |
|
"logps/rejected": -29.050308227539062, |
|
"loss": 0.6974, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": 0.0027363565750420094, |
|
"rewards/margins": -0.005146201699972153, |
|
"rewards/rejected": 0.007882559671998024, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.8461538461538463e-07, |
|
"logits/chosen": -1.9201209545135498, |
|
"logits/rejected": -1.917419195175171, |
|
"logps/chosen": -31.423137664794922, |
|
"logps/rejected": -33.23155975341797, |
|
"loss": 0.6936, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": -0.004226746968924999, |
|
"rewards/margins": 0.0036326400004327297, |
|
"rewards/rejected": -0.007859388366341591, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.999896948438433e-07, |
|
"logits/chosen": -2.0166988372802734, |
|
"logits/rejected": -2.007939100265503, |
|
"logps/chosen": -32.58073806762695, |
|
"logps/rejected": -32.50107192993164, |
|
"loss": 0.6996, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": -0.0023586261086165905, |
|
"rewards/margins": -0.009320550598204136, |
|
"rewards/rejected": 0.006961924023926258, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.987541037542186e-07, |
|
"logits/chosen": -1.8645102977752686, |
|
"logits/rejected": -1.8537269830703735, |
|
"logps/chosen": -33.561344146728516, |
|
"logps/rejected": -35.44156265258789, |
|
"loss": 0.6992, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": -0.0023178723640739918, |
|
"rewards/margins": -0.00878816656768322, |
|
"rewards/rejected": 0.006470293737947941, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.954691471941118e-07, |
|
"logits/chosen": -1.9450910091400146, |
|
"logits/rejected": -1.947035551071167, |
|
"logps/chosen": -32.59477233886719, |
|
"logps/rejected": -33.191925048828125, |
|
"loss": 0.6868, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.003247396554797888, |
|
"rewards/margins": 0.015577316284179688, |
|
"rewards/rejected": -0.012329919263720512, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.901618883413548e-07, |
|
"logits/chosen": -2.0797348022460938, |
|
"logits/rejected": -2.0847277641296387, |
|
"logps/chosen": -34.00436782836914, |
|
"logps/rejected": -36.588375091552734, |
|
"loss": 0.6939, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.009527785703539848, |
|
"rewards/margins": 0.002171074505895376, |
|
"rewards/rejected": -0.011698859743773937, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.828760511501322e-07, |
|
"logits/chosen": -1.9422187805175781, |
|
"logits/rejected": -1.945369005203247, |
|
"logps/chosen": -34.411216735839844, |
|
"logps/rejected": -34.58810806274414, |
|
"loss": 0.6913, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": 0.009955625049769878, |
|
"rewards/margins": 0.007375092711299658, |
|
"rewards/rejected": 0.002580532105639577, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.736716601303429e-07, |
|
"logits/chosen": -1.9506874084472656, |
|
"logits/rejected": -1.9551897048950195, |
|
"logps/chosen": -32.461769104003906, |
|
"logps/rejected": -32.3617057800293, |
|
"loss": 0.6923, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.0033923503942787647, |
|
"rewards/margins": 0.005069470964372158, |
|
"rewards/rejected": -0.0016771212685853243, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.62624545834521e-07, |
|
"logits/chosen": -2.049078941345215, |
|
"logits/rejected": -2.0470805168151855, |
|
"logps/chosen": -32.23583984375, |
|
"logps/rejected": -31.266429901123047, |
|
"loss": 0.6933, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": 0.0029988770838826895, |
|
"rewards/margins": 0.0031290887854993343, |
|
"rewards/rejected": -0.0001302130549447611, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.2439465522766113, |
|
"eval_logits/rejected": -2.239065408706665, |
|
"eval_logps/chosen": -34.00984573364258, |
|
"eval_logps/rejected": -37.50405502319336, |
|
"eval_loss": 0.6912049055099487, |
|
"eval_rewards/accuracies": 0.4987541437149048, |
|
"eval_rewards/chosen": 0.014824562706053257, |
|
"eval_rewards/margins": 0.007284608669579029, |
|
"eval_rewards/rejected": 0.007539951708167791, |
|
"eval_runtime": 146.2285, |
|
"eval_samples_per_second": 2.346, |
|
"eval_steps_per_second": 0.294, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.4982572012636904e-07, |
|
"logits/chosen": -2.005307912826538, |
|
"logits/rejected": -2.0029025077819824, |
|
"logps/chosen": -33.245155334472656, |
|
"logps/rejected": -34.01426696777344, |
|
"loss": 0.6998, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.000803128641564399, |
|
"rewards/margins": -0.009863952174782753, |
|
"rewards/rejected": 0.009060824289917946, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.353806263777677e-07, |
|
"logits/chosen": -2.0167527198791504, |
|
"logits/rejected": -2.008373737335205, |
|
"logps/chosen": -32.43388748168945, |
|
"logps/rejected": -32.184364318847656, |
|
"loss": 0.6844, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.006612470839172602, |
|
"rewards/margins": 0.020328599959611893, |
|
"rewards/rejected": -0.013716126792132854, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.194082707715275e-07, |
|
"logits/chosen": -2.0465807914733887, |
|
"logits/rejected": -2.03853178024292, |
|
"logps/chosen": -30.49270248413086, |
|
"logps/rejected": -32.03563690185547, |
|
"loss": 0.7048, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": -0.01019179355353117, |
|
"rewards/margins": -0.020615343004465103, |
|
"rewards/rejected": 0.010423547588288784, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.020402418666621e-07, |
|
"logits/chosen": -1.9769595861434937, |
|
"logits/rejected": -1.98722243309021, |
|
"logps/chosen": -31.402965545654297, |
|
"logps/rejected": -32.55664825439453, |
|
"loss": 0.6836, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.010017503052949905, |
|
"rewards/margins": 0.02307732030749321, |
|
"rewards/rejected": -0.013059815391898155, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.8341962650351185e-07, |
|
"logits/chosen": -1.8905092477798462, |
|
"logits/rejected": -1.8915998935699463, |
|
"logps/chosen": -34.19499588012695, |
|
"logps/rejected": -34.76214599609375, |
|
"loss": 0.6941, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": -0.0013823460321873426, |
|
"rewards/margins": 0.0015530474483966827, |
|
"rewards/rejected": -0.00293539697304368, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.636998309800572e-07, |
|
"logits/chosen": -1.9429140090942383, |
|
"logits/rejected": -1.939447045326233, |
|
"logps/chosen": -36.15991973876953, |
|
"logps/rejected": -32.71558380126953, |
|
"loss": 0.6935, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.007058164570480585, |
|
"rewards/margins": 0.0027567949146032333, |
|
"rewards/rejected": 0.004301370121538639, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.430433172111807e-07, |
|
"logits/chosen": -2.042171001434326, |
|
"logits/rejected": -2.0347886085510254, |
|
"logps/chosen": -33.77357864379883, |
|
"logps/rejected": -31.36397361755371, |
|
"loss": 0.684, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.014572665095329285, |
|
"rewards/margins": 0.021845515817403793, |
|
"rewards/rejected": -0.007272848393768072, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 3.216202642830543e-07, |
|
"logits/chosen": -2.0476887226104736, |
|
"logits/rejected": -2.05295729637146, |
|
"logps/chosen": -32.52228546142578, |
|
"logps/rejected": -32.505516052246094, |
|
"loss": 0.6821, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": 0.013503499329090118, |
|
"rewards/margins": 0.025730689987540245, |
|
"rewards/rejected": -0.012227190658450127, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 2.9960716642946403e-07, |
|
"logits/chosen": -2.0486130714416504, |
|
"logits/rejected": -2.0458409786224365, |
|
"logps/chosen": -31.47336196899414, |
|
"logps/rejected": -31.337549209594727, |
|
"loss": 0.6856, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.009355323389172554, |
|
"rewards/margins": 0.020955625921487808, |
|
"rewards/rejected": -0.011600303463637829, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.771853789806683e-07, |
|
"logits/chosen": -1.9189808368682861, |
|
"logits/rejected": -1.9236522912979126, |
|
"logps/chosen": -31.58243179321289, |
|
"logps/rejected": -32.80672073364258, |
|
"loss": 0.6827, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.014310021884739399, |
|
"rewards/margins": 0.02378040924668312, |
|
"rewards/rejected": -0.009470385499298573, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.243415355682373, |
|
"eval_logits/rejected": -2.238534688949585, |
|
"eval_logps/chosen": -34.02439880371094, |
|
"eval_logps/rejected": -37.504756927490234, |
|
"eval_loss": 0.6955486536026001, |
|
"eval_rewards/accuracies": 0.48131227493286133, |
|
"eval_rewards/chosen": 0.006092504132539034, |
|
"eval_rewards/margins": -0.0010250143241137266, |
|
"eval_rewards/rejected": 0.007117518223822117, |
|
"eval_runtime": 145.9398, |
|
"eval_samples_per_second": 2.35, |
|
"eval_steps_per_second": 0.295, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.5453962426402e-07, |
|
"logits/chosen": -2.0316381454467773, |
|
"logits/rejected": -2.0423309803009033, |
|
"logps/chosen": -31.946680068969727, |
|
"logps/rejected": -33.87198257446289, |
|
"loss": 0.6848, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.014773803763091564, |
|
"rewards/margins": 0.02034572884440422, |
|
"rewards/rejected": -0.005571924149990082, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.318564697655179e-07, |
|
"logits/chosen": -1.9256389141082764, |
|
"logits/rejected": -1.9404897689819336, |
|
"logps/chosen": -30.095138549804688, |
|
"logps/rejected": -31.5872802734375, |
|
"loss": 0.6813, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.009841076098382473, |
|
"rewards/margins": 0.027301892638206482, |
|
"rewards/rejected": -0.017460813745856285, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2.093227910899832e-07, |
|
"logits/chosen": -1.983253836631775, |
|
"logits/rejected": -1.9872106313705444, |
|
"logps/chosen": -33.377899169921875, |
|
"logps/rejected": -31.550067901611328, |
|
"loss": 0.6835, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.02374371513724327, |
|
"rewards/margins": 0.024203147739171982, |
|
"rewards/rejected": -0.00045942998258396983, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.8712423238279356e-07, |
|
"logits/chosen": -1.9831098318099976, |
|
"logits/rejected": -1.9611488580703735, |
|
"logps/chosen": -34.15457534790039, |
|
"logps/rejected": -34.96303176879883, |
|
"loss": 0.6918, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": -0.0004945084219798446, |
|
"rewards/margins": 0.006849849130958319, |
|
"rewards/rejected": -0.007344359997659922, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.654436768970182e-07, |
|
"logits/chosen": -2.0243515968322754, |
|
"logits/rejected": -2.021042585372925, |
|
"logps/chosen": -32.92682647705078, |
|
"logps/rejected": -36.226043701171875, |
|
"loss": 0.6951, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": -0.001055869972333312, |
|
"rewards/margins": -0.00038459646748378873, |
|
"rewards/rejected": -0.000671274377964437, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.444597403062196e-07, |
|
"logits/chosen": -1.8913053274154663, |
|
"logits/rejected": -1.8888483047485352, |
|
"logps/chosen": -34.18505096435547, |
|
"logps/rejected": -35.491554260253906, |
|
"loss": 0.6986, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": 0.002010275376960635, |
|
"rewards/margins": -0.005831328686326742, |
|
"rewards/rejected": 0.007841606624424458, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.2434529917578887e-07, |
|
"logits/chosen": -1.8760089874267578, |
|
"logits/rejected": -1.8734655380249023, |
|
"logps/chosen": -34.394832611083984, |
|
"logps/rejected": -31.769023895263672, |
|
"loss": 0.6946, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.0016577050555497408, |
|
"rewards/margins": 0.00027335790218785405, |
|
"rewards/rejected": 0.001384348259307444, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0526606671603521e-07, |
|
"logits/chosen": -1.9796526432037354, |
|
"logits/rejected": -1.9690182209014893, |
|
"logps/chosen": -35.34165954589844, |
|
"logps/rejected": -31.8492488861084, |
|
"loss": 0.6882, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": 0.008989132940769196, |
|
"rewards/margins": 0.013942519202828407, |
|
"rewards/rejected": -0.004953385330736637, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.737922755071453e-08, |
|
"logits/chosen": -2.075491428375244, |
|
"logits/rejected": -2.060464859008789, |
|
"logps/chosen": -30.913305282592773, |
|
"logps/rejected": -32.65747833251953, |
|
"loss": 0.6898, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.006493172142654657, |
|
"rewards/margins": 0.009881803765892982, |
|
"rewards/rejected": -0.003388631623238325, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 7.08321427484816e-08, |
|
"logits/chosen": -1.946946144104004, |
|
"logits/rejected": -1.9443925619125366, |
|
"logps/chosen": -32.91460037231445, |
|
"logps/rejected": -30.81583595275879, |
|
"loss": 0.6911, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": 0.00750743318349123, |
|
"rewards/margins": 0.00692352931946516, |
|
"rewards/rejected": 0.0005839023506268859, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.244297504425049, |
|
"eval_logits/rejected": -2.2394087314605713, |
|
"eval_logps/chosen": -34.02434539794922, |
|
"eval_logps/rejected": -37.515743255615234, |
|
"eval_loss": 0.6924626231193542, |
|
"eval_rewards/accuracies": 0.5456810593605042, |
|
"eval_rewards/chosen": 0.006125003099441528, |
|
"eval_rewards/margins": 0.00560029735788703, |
|
"eval_rewards/rejected": 0.0005247062654234469, |
|
"eval_runtime": 145.9317, |
|
"eval_samples_per_second": 2.35, |
|
"eval_steps_per_second": 0.295, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.576113578589034e-08, |
|
"logits/chosen": -1.9294414520263672, |
|
"logits/rejected": -1.9261829853057861, |
|
"logps/chosen": -31.56203269958496, |
|
"logps/rejected": -33.72837448120117, |
|
"loss": 0.6871, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.02479010447859764, |
|
"rewards/margins": 0.01652434654533863, |
|
"rewards/rejected": 0.008265760727226734, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.229036944380912e-08, |
|
"logits/chosen": -1.980583906173706, |
|
"logits/rejected": -1.9682567119598389, |
|
"logps/chosen": -34.58124542236328, |
|
"logps/rejected": -33.5312614440918, |
|
"loss": 0.6926, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": 0.00889885425567627, |
|
"rewards/margins": 0.004354512318968773, |
|
"rewards/rejected": 0.004544342402368784, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.053082288996112e-08, |
|
"logits/chosen": -2.0161619186401367, |
|
"logits/rejected": -2.014702558517456, |
|
"logps/chosen": -33.476417541503906, |
|
"logps/rejected": -32.486053466796875, |
|
"loss": 0.6904, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.005623627454042435, |
|
"rewards/margins": 0.009506640955805779, |
|
"rewards/rejected": -0.0038830172270536423, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.05793773749158e-08, |
|
"logits/chosen": -2.103116512298584, |
|
"logits/rejected": -2.087329149246216, |
|
"logps/chosen": -34.173370361328125, |
|
"logps/rejected": -33.09913635253906, |
|
"loss": 0.699, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": 0.0036146938800811768, |
|
"rewards/margins": -0.007723378483206034, |
|
"rewards/rejected": 0.011338073760271072, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.251801807404168e-08, |
|
"logits/chosen": -1.9745566844940186, |
|
"logits/rejected": -1.9736087322235107, |
|
"logps/chosen": -33.24656295776367, |
|
"logps/rejected": -32.44744110107422, |
|
"loss": 0.6899, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": 0.021319344639778137, |
|
"rewards/margins": 0.01069412101060152, |
|
"rewards/rejected": 0.010625220835208893, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.41315865106129e-09, |
|
"logits/chosen": -1.9309154748916626, |
|
"logits/rejected": -1.9412847757339478, |
|
"logps/chosen": -32.214351654052734, |
|
"logps/rejected": -35.293235778808594, |
|
"loss": 0.6968, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": -0.005797215737402439, |
|
"rewards/margins": -0.003113214625045657, |
|
"rewards/rejected": -0.0026840015780180693, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.3150941078050324e-09, |
|
"logits/chosen": -2.0695526599884033, |
|
"logits/rejected": -2.0629897117614746, |
|
"logps/chosen": -33.649295806884766, |
|
"logps/rejected": -29.209609985351562, |
|
"loss": 0.6991, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": 0.0016033422434702516, |
|
"rewards/margins": -0.008483831770718098, |
|
"rewards/rejected": 0.01008717343211174, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.575864278703266e-10, |
|
"logits/chosen": -1.9294023513793945, |
|
"logits/rejected": -1.9315516948699951, |
|
"logps/chosen": -34.24349594116211, |
|
"logps/rejected": -30.889087677001953, |
|
"loss": 0.6916, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": -0.0026698322035372257, |
|
"rewards/margins": 0.007205922156572342, |
|
"rewards/rejected": -0.009875754825770855, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 385, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6911570282725544, |
|
"train_runtime": 3257.344, |
|
"train_samples_per_second": 0.945, |
|
"train_steps_per_second": 0.118 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 385, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|