|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 385, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.282051282051282e-08, |
|
"logits/chosen": -1.7278180122375488, |
|
"logits/rejected": -1.7377450466156006, |
|
"logps/chosen": -29.553977966308594, |
|
"logps/rejected": -42.813133239746094, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.2820512820512818e-07, |
|
"logits/chosen": -1.866059422492981, |
|
"logits/rejected": -1.870389699935913, |
|
"logps/chosen": -36.98250198364258, |
|
"logps/rejected": -33.64270782470703, |
|
"loss": 0.6804, |
|
"rewards/accuracies": 0.4027777910232544, |
|
"rewards/chosen": 0.021689780056476593, |
|
"rewards/margins": 0.033059459179639816, |
|
"rewards/rejected": -0.011369682848453522, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.5641025641025636e-07, |
|
"logits/chosen": -1.9973828792572021, |
|
"logits/rejected": -2.0000317096710205, |
|
"logps/chosen": -29.631702423095703, |
|
"logps/rejected": -29.05877113342285, |
|
"loss": 0.6937, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.009445475414395332, |
|
"rewards/margins": 0.005240145605057478, |
|
"rewards/rejected": 0.004205327946692705, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.8461538461538463e-07, |
|
"logits/chosen": -1.9198739528656006, |
|
"logits/rejected": -1.9171788692474365, |
|
"logps/chosen": -31.4038028717041, |
|
"logps/rejected": -33.20899963378906, |
|
"loss": 0.696, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.01106296293437481, |
|
"rewards/margins": 0.0025471807457506657, |
|
"rewards/rejected": 0.008515783585608006, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.999896948438433e-07, |
|
"logits/chosen": -2.0166726112365723, |
|
"logits/rejected": -2.0079195499420166, |
|
"logps/chosen": -32.57840347290039, |
|
"logps/rejected": -32.493370056152344, |
|
"loss": 0.7066, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.00143451988697052, |
|
"rewards/margins": -0.018809262663125992, |
|
"rewards/rejected": 0.017374742776155472, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.987541037542186e-07, |
|
"logits/chosen": -1.8648544549942017, |
|
"logits/rejected": -1.8540741205215454, |
|
"logps/chosen": -33.54566955566406, |
|
"logps/rejected": -35.43162155151367, |
|
"loss": 0.7004, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": 0.01062955055385828, |
|
"rewards/margins": -0.00802132673561573, |
|
"rewards/rejected": 0.018650878220796585, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.954691471941118e-07, |
|
"logits/chosen": -1.9450620412826538, |
|
"logits/rejected": -1.947021245956421, |
|
"logps/chosen": -32.5836296081543, |
|
"logps/rejected": -33.20051193237305, |
|
"loss": 0.6761, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.014901289716362953, |
|
"rewards/margins": 0.04112350940704346, |
|
"rewards/rejected": -0.026222219690680504, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.901618883413548e-07, |
|
"logits/chosen": -2.079589366912842, |
|
"logits/rejected": -2.0845787525177, |
|
"logps/chosen": -33.9754524230957, |
|
"logps/rejected": -36.5793342590332, |
|
"loss": 0.6876, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.01173111516982317, |
|
"rewards/margins": 0.021146392449736595, |
|
"rewards/rejected": -0.00941527634859085, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.828760511501322e-07, |
|
"logits/chosen": -1.9424660205841064, |
|
"logits/rejected": -1.945634126663208, |
|
"logps/chosen": -34.394561767578125, |
|
"logps/rejected": -34.57395553588867, |
|
"loss": 0.6921, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.029921507462859154, |
|
"rewards/margins": 0.013314949348568916, |
|
"rewards/rejected": 0.016606558114290237, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.736716601303429e-07, |
|
"logits/chosen": -1.950564980506897, |
|
"logits/rejected": -1.955082893371582, |
|
"logps/chosen": -32.44951248168945, |
|
"logps/rejected": -32.353668212890625, |
|
"loss": 0.6909, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.0161209125071764, |
|
"rewards/margins": 0.011405264027416706, |
|
"rewards/rejected": 0.004715651273727417, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.62624545834521e-07, |
|
"logits/chosen": -2.0489234924316406, |
|
"logits/rejected": -2.046917200088501, |
|
"logps/chosen": -32.236671447753906, |
|
"logps/rejected": -31.263763427734375, |
|
"loss": 0.6951, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.0037471160758286715, |
|
"rewards/margins": 0.0015437646070495248, |
|
"rewards/rejected": 0.002203352050855756, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.243756055831909, |
|
"eval_logits/rejected": -2.238872766494751, |
|
"eval_logps/chosen": -34.025177001953125, |
|
"eval_logps/rejected": -37.50759506225586, |
|
"eval_loss": 0.696557343006134, |
|
"eval_rewards/accuracies": 0.49543190002441406, |
|
"eval_rewards/chosen": 0.008434689603745937, |
|
"eval_rewards/margins": 0.0003138432221021503, |
|
"eval_rewards/rejected": 0.008120844140648842, |
|
"eval_runtime": 146.3373, |
|
"eval_samples_per_second": 2.344, |
|
"eval_steps_per_second": 0.294, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.4982572012636904e-07, |
|
"logits/chosen": -2.0052237510681152, |
|
"logits/rejected": -2.002803325653076, |
|
"logps/chosen": -33.24525833129883, |
|
"logps/rejected": -34.01074981689453, |
|
"loss": 0.7064, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.0013012334238737822, |
|
"rewards/margins": -0.018059223890304565, |
|
"rewards/rejected": 0.016757991164922714, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.353806263777677e-07, |
|
"logits/chosen": -2.016326665878296, |
|
"logits/rejected": -2.007951498031616, |
|
"logps/chosen": -32.478302001953125, |
|
"logps/rejected": -32.186073303222656, |
|
"loss": 0.7012, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.030056962743401527, |
|
"rewards/margins": -0.007947373203933239, |
|
"rewards/rejected": -0.022109590470790863, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.194082707715275e-07, |
|
"logits/chosen": -2.045950412750244, |
|
"logits/rejected": -2.037904739379883, |
|
"logps/chosen": -30.491313934326172, |
|
"logps/rejected": -32.0540771484375, |
|
"loss": 0.7025, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.01403898186981678, |
|
"rewards/margins": -0.013075167313218117, |
|
"rewards/rejected": -0.0009638145565986633, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.020402418666621e-07, |
|
"logits/chosen": -1.9765291213989258, |
|
"logits/rejected": -1.986802101135254, |
|
"logps/chosen": -31.39646339416504, |
|
"logps/rejected": -32.522178649902344, |
|
"loss": 0.6923, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.02087983302772045, |
|
"rewards/margins": 0.00944516807794571, |
|
"rewards/rejected": 0.011434664018452168, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.8341962650351185e-07, |
|
"logits/chosen": -1.8905407190322876, |
|
"logits/rejected": -1.8916336297988892, |
|
"logps/chosen": -34.17607116699219, |
|
"logps/rejected": -34.76817321777344, |
|
"loss": 0.6851, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": 0.01495879702270031, |
|
"rewards/margins": 0.024784717708826065, |
|
"rewards/rejected": -0.00982591975480318, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.636998309800572e-07, |
|
"logits/chosen": -1.942565679550171, |
|
"logits/rejected": -1.9390919208526611, |
|
"logps/chosen": -36.1614875793457, |
|
"logps/rejected": -32.72848129272461, |
|
"loss": 0.6907, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": 0.00917207170277834, |
|
"rewards/margins": 0.01433003693819046, |
|
"rewards/rejected": -0.0051579661667346954, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.430433172111807e-07, |
|
"logits/chosen": -2.0418710708618164, |
|
"logits/rejected": -2.034475564956665, |
|
"logps/chosen": -33.76544952392578, |
|
"logps/rejected": -31.358556747436523, |
|
"loss": 0.6791, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.02917439118027687, |
|
"rewards/margins": 0.03521214798092842, |
|
"rewards/rejected": -0.0060377540066838264, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 3.216202642830543e-07, |
|
"logits/chosen": -2.0476231575012207, |
|
"logits/rejected": -2.0529019832611084, |
|
"logps/chosen": -32.533023834228516, |
|
"logps/rejected": -32.506805419921875, |
|
"loss": 0.6814, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.010587882250547409, |
|
"rewards/margins": 0.03008626028895378, |
|
"rewards/rejected": -0.019498378038406372, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 2.9960716642946403e-07, |
|
"logits/chosen": -2.048576593399048, |
|
"logits/rejected": -2.0457961559295654, |
|
"logps/chosen": -31.470510482788086, |
|
"logps/rejected": -31.322284698486328, |
|
"loss": 0.687, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.016599375754594803, |
|
"rewards/margins": 0.020261235535144806, |
|
"rewards/rejected": -0.0036618602462112904, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.771853789806683e-07, |
|
"logits/chosen": -1.9184268712997437, |
|
"logits/rejected": -1.9230947494506836, |
|
"logps/chosen": -31.57404136657715, |
|
"logps/rejected": -32.775550842285156, |
|
"loss": 0.6891, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": 0.02901501953601837, |
|
"rewards/margins": 0.015173261985182762, |
|
"rewards/rejected": 0.013841753825545311, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.243248462677002, |
|
"eval_logits/rejected": -2.238374948501587, |
|
"eval_logps/chosen": -34.02431869506836, |
|
"eval_logps/rejected": -37.511497497558594, |
|
"eval_loss": 0.6946919560432434, |
|
"eval_rewards/accuracies": 0.49335551261901855, |
|
"eval_rewards/chosen": 0.009210066869854927, |
|
"eval_rewards/margins": 0.0045979218557477, |
|
"eval_rewards/rejected": 0.004612144082784653, |
|
"eval_runtime": 146.0514, |
|
"eval_samples_per_second": 2.348, |
|
"eval_steps_per_second": 0.294, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.5453962426402e-07, |
|
"logits/chosen": -2.0314748287200928, |
|
"logits/rejected": -2.0421440601348877, |
|
"logps/chosen": -31.954639434814453, |
|
"logps/rejected": -33.87318801879883, |
|
"loss": 0.6864, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.014999927952885628, |
|
"rewards/margins": 0.024442464113235474, |
|
"rewards/rejected": -0.009442536160349846, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.318564697655179e-07, |
|
"logits/chosen": -1.9249544143676758, |
|
"logits/rejected": -1.9398053884506226, |
|
"logps/chosen": -30.072036743164062, |
|
"logps/rejected": -31.55165672302246, |
|
"loss": 0.6827, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.035550959408283234, |
|
"rewards/margins": 0.029681822285056114, |
|
"rewards/rejected": 0.005869132932275534, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2.093227910899832e-07, |
|
"logits/chosen": -1.9830297231674194, |
|
"logits/rejected": -1.9869968891143799, |
|
"logps/chosen": -33.3900146484375, |
|
"logps/rejected": -31.545156478881836, |
|
"loss": 0.6872, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.024708259850740433, |
|
"rewards/margins": 0.02097567543387413, |
|
"rewards/rejected": 0.00373258744366467, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.8712423238279356e-07, |
|
"logits/chosen": -1.9825174808502197, |
|
"logits/rejected": -1.9605509042739868, |
|
"logps/chosen": -34.16549301147461, |
|
"logps/rejected": -34.945499420166016, |
|
"loss": 0.7065, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.010572168976068497, |
|
"rewards/margins": -0.015337007120251656, |
|
"rewards/rejected": 0.004764837212860584, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.654436768970182e-07, |
|
"logits/chosen": -2.0242011547088623, |
|
"logits/rejected": -2.02089262008667, |
|
"logps/chosen": -32.91257858276367, |
|
"logps/rejected": -36.22357177734375, |
|
"loss": 0.6913, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.01124012004584074, |
|
"rewards/margins": 0.010020162910223007, |
|
"rewards/rejected": 0.0012199539924040437, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.444597403062196e-07, |
|
"logits/chosen": -1.8910300731658936, |
|
"logits/rejected": -1.888593316078186, |
|
"logps/chosen": -34.191619873046875, |
|
"logps/rejected": -35.50851058959961, |
|
"loss": 0.6975, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.0028982784133404493, |
|
"rewards/margins": 0.0006028197822161019, |
|
"rewards/rejected": -0.003501094877719879, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.2434529917578887e-07, |
|
"logits/chosen": -1.8757693767547607, |
|
"logits/rejected": -1.8732143640518188, |
|
"logps/chosen": -34.38648223876953, |
|
"logps/rejected": -31.753692626953125, |
|
"loss": 0.6995, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.010007266886532307, |
|
"rewards/margins": -0.005866709630936384, |
|
"rewards/rejected": 0.015873977914452553, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0526606671603521e-07, |
|
"logits/chosen": -1.9795001745224, |
|
"logits/rejected": -1.9688692092895508, |
|
"logps/chosen": -35.32746887207031, |
|
"logps/rejected": -31.84292221069336, |
|
"loss": 0.6843, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.026254409924149513, |
|
"rewards/margins": 0.027990642935037613, |
|
"rewards/rejected": -0.001736226724460721, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.737922755071453e-08, |
|
"logits/chosen": -2.0753533840179443, |
|
"logits/rejected": -2.0603318214416504, |
|
"logps/chosen": -30.902263641357422, |
|
"logps/rejected": -32.65242385864258, |
|
"loss": 0.6872, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.019674303010106087, |
|
"rewards/margins": 0.020201902836561203, |
|
"rewards/rejected": -0.0005276011070236564, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 7.08321427484816e-08, |
|
"logits/chosen": -1.946671724319458, |
|
"logits/rejected": -1.9441486597061157, |
|
"logps/chosen": -32.906158447265625, |
|
"logps/rejected": -30.805999755859375, |
|
"loss": 0.693, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": 0.018858108669519424, |
|
"rewards/margins": 0.009128611534833908, |
|
"rewards/rejected": 0.009729497134685516, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.243694305419922, |
|
"eval_logits/rejected": -2.2388274669647217, |
|
"eval_logps/chosen": -34.017173767089844, |
|
"eval_logps/rejected": -37.497615814208984, |
|
"eval_loss": 0.6981561183929443, |
|
"eval_rewards/accuracies": 0.49833887815475464, |
|
"eval_rewards/chosen": 0.015641551464796066, |
|
"eval_rewards/margins": -0.0014640000881627202, |
|
"eval_rewards/rejected": 0.017105549573898315, |
|
"eval_runtime": 146.0253, |
|
"eval_samples_per_second": 2.349, |
|
"eval_steps_per_second": 0.294, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.576113578589034e-08, |
|
"logits/chosen": -1.9287757873535156, |
|
"logits/rejected": -1.9255115985870361, |
|
"logps/chosen": -31.573471069335938, |
|
"logps/rejected": -33.71125030517578, |
|
"loss": 0.698, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.026891669258475304, |
|
"rewards/margins": -0.000918733305297792, |
|
"rewards/rejected": 0.02781040407717228, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.229036944380912e-08, |
|
"logits/chosen": -1.9807313680648804, |
|
"logits/rejected": -1.968425989151001, |
|
"logps/chosen": -34.54930877685547, |
|
"logps/rejected": -33.57306671142578, |
|
"loss": 0.6615, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.04208679869771004, |
|
"rewards/margins": 0.07289497554302216, |
|
"rewards/rejected": -0.030808180570602417, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.053082288996112e-08, |
|
"logits/chosen": -2.0161099433898926, |
|
"logits/rejected": -2.0146496295928955, |
|
"logps/chosen": -33.46127700805664, |
|
"logps/rejected": -32.4744873046875, |
|
"loss": 0.6883, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.022065162658691406, |
|
"rewards/margins": 0.01747960038483143, |
|
"rewards/rejected": 0.004585559479892254, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.05793773749158e-08, |
|
"logits/chosen": -2.102802276611328, |
|
"logits/rejected": -2.0869994163513184, |
|
"logps/chosen": -34.16718292236328, |
|
"logps/rejected": -33.08827590942383, |
|
"loss": 0.706, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": 0.010987209156155586, |
|
"rewards/margins": -0.01579815149307251, |
|
"rewards/rejected": 0.026785362511873245, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.251801807404168e-08, |
|
"logits/chosen": -1.97471022605896, |
|
"logits/rejected": -1.9737637042999268, |
|
"logps/chosen": -33.25645065307617, |
|
"logps/rejected": -32.46539306640625, |
|
"loss": 0.6861, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.023080622777342796, |
|
"rewards/margins": 0.023302335292100906, |
|
"rewards/rejected": -0.00022171511955093592, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.41315865106129e-09, |
|
"logits/chosen": -1.9304895401000977, |
|
"logits/rejected": -1.9408460855484009, |
|
"logps/chosen": -32.22315979003906, |
|
"logps/rejected": -35.28199005126953, |
|
"loss": 0.7085, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": -0.01662321388721466, |
|
"rewards/margins": -0.022716889157891273, |
|
"rewards/rejected": 0.006093672942370176, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.3150941078050324e-09, |
|
"logits/chosen": -2.0692858695983887, |
|
"logits/rejected": -2.0627408027648926, |
|
"logps/chosen": -33.64545440673828, |
|
"logps/rejected": -29.203441619873047, |
|
"loss": 0.704, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": 0.00586346909403801, |
|
"rewards/margins": -0.014821496792137623, |
|
"rewards/rejected": 0.020684964954853058, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.575864278703266e-10, |
|
"logits/chosen": -1.928911805152893, |
|
"logits/rejected": -1.9310725927352905, |
|
"logps/chosen": -34.24489212036133, |
|
"logps/rejected": -30.92240333557129, |
|
"loss": 0.6763, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": -0.005258283112198114, |
|
"rewards/margins": 0.03954260051250458, |
|
"rewards/rejected": -0.044800881296396255, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 385, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6916276120520257, |
|
"train_runtime": 3257.6062, |
|
"train_samples_per_second": 0.945, |
|
"train_steps_per_second": 0.118 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 385, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|