|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 385, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.282051282051282e-07, |
|
"logits/chosen": -1.7278180122375488, |
|
"logits/rejected": -1.7377450466156006, |
|
"logps/chosen": -29.553977966308594, |
|
"logps/rejected": -42.813133239746094, |
|
"loss": 0.3906, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.282051282051282e-06, |
|
"logits/chosen": -1.8662821054458618, |
|
"logits/rejected": -1.8706018924713135, |
|
"logps/chosen": -36.98260498046875, |
|
"logps/rejected": -33.66376876831055, |
|
"loss": 0.3501, |
|
"rewards/accuracies": 0.5694444179534912, |
|
"rewards/chosen": 0.01919599249958992, |
|
"rewards/margins": 0.04614981636404991, |
|
"rewards/rejected": -0.02695382386445999, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.564102564102564e-06, |
|
"logits/chosen": -1.9981708526611328, |
|
"logits/rejected": -2.000824451446533, |
|
"logps/chosen": -29.6453914642334, |
|
"logps/rejected": -29.056461334228516, |
|
"loss": 0.4314, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.0025594178587198257, |
|
"rewards/margins": -0.008148794062435627, |
|
"rewards/rejected": 0.005589376203715801, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.846153846153847e-06, |
|
"logits/chosen": -1.9205211400985718, |
|
"logits/rejected": -1.917824149131775, |
|
"logps/chosen": -31.402332305908203, |
|
"logps/rejected": -33.20569610595703, |
|
"loss": 0.4366, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.011007689870893955, |
|
"rewards/margins": 0.0007972270250320435, |
|
"rewards/rejected": 0.010210464708507061, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.999896948438434e-06, |
|
"logits/chosen": -2.0170958042144775, |
|
"logits/rejected": -2.0083703994750977, |
|
"logps/chosen": -32.547943115234375, |
|
"logps/rejected": -32.49810791015625, |
|
"loss": 0.4286, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.023093106225132942, |
|
"rewards/margins": 0.011441526934504509, |
|
"rewards/rejected": 0.011651577427983284, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.987541037542187e-06, |
|
"logits/chosen": -1.8613898754119873, |
|
"logits/rejected": -1.8506320714950562, |
|
"logps/chosen": -33.56399154663086, |
|
"logps/rejected": -35.46310043334961, |
|
"loss": 0.4498, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": -0.005208463408052921, |
|
"rewards/margins": 0.0033974028192460537, |
|
"rewards/rejected": -0.008605867624282837, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.954691471941119e-06, |
|
"logits/chosen": -1.9389715194702148, |
|
"logits/rejected": -1.9409143924713135, |
|
"logps/chosen": -32.57710266113281, |
|
"logps/rejected": -33.21430206298828, |
|
"loss": 0.3914, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.018466468900442123, |
|
"rewards/margins": 0.05280427262187004, |
|
"rewards/rejected": -0.03433779999613762, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.901618883413549e-06, |
|
"logits/chosen": -2.0723679065704346, |
|
"logits/rejected": -2.0773472785949707, |
|
"logps/chosen": -33.946128845214844, |
|
"logps/rejected": -36.60929489135742, |
|
"loss": 0.4672, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.033884815871715546, |
|
"rewards/margins": 0.0662197396159172, |
|
"rewards/rejected": -0.03233493119478226, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.828760511501322e-06, |
|
"logits/chosen": -1.9366763830184937, |
|
"logits/rejected": -1.9398372173309326, |
|
"logps/chosen": -34.32460403442383, |
|
"logps/rejected": -34.59550094604492, |
|
"loss": 0.3932, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.0825641006231308, |
|
"rewards/margins": 0.08503931760787964, |
|
"rewards/rejected": -0.002475212560966611, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.7367166013034295e-06, |
|
"logits/chosen": -1.9469963312149048, |
|
"logits/rejected": -1.9515202045440674, |
|
"logps/chosen": -32.401084899902344, |
|
"logps/rejected": -32.32483673095703, |
|
"loss": 0.4402, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.05306895822286606, |
|
"rewards/margins": 0.02581069990992546, |
|
"rewards/rejected": 0.027258265763521194, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.626245458345211e-06, |
|
"logits/chosen": -2.0446877479553223, |
|
"logits/rejected": -2.0427000522613525, |
|
"logps/chosen": -32.168331146240234, |
|
"logps/rejected": -31.263940811157227, |
|
"loss": 0.3817, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.05800473690032959, |
|
"rewards/margins": 0.056188035756349564, |
|
"rewards/rejected": 0.0018166989320889115, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.2377800941467285, |
|
"eval_logits/rejected": -2.2329328060150146, |
|
"eval_logps/chosen": -33.97116470336914, |
|
"eval_logps/rejected": -37.47990036010742, |
|
"eval_loss": 0.42868927121162415, |
|
"eval_rewards/accuracies": 0.5577242374420166, |
|
"eval_rewards/chosen": 0.05071057379245758, |
|
"eval_rewards/margins": 0.021336428821086884, |
|
"eval_rewards/rejected": 0.029374146834015846, |
|
"eval_runtime": 146.0732, |
|
"eval_samples_per_second": 2.348, |
|
"eval_steps_per_second": 0.294, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.498257201263691e-06, |
|
"logits/chosen": -1.9993603229522705, |
|
"logits/rejected": -1.996971845626831, |
|
"logps/chosen": -33.112144470214844, |
|
"logps/rejected": -33.986610412597656, |
|
"loss": 0.5297, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.105336032807827, |
|
"rewards/margins": 0.07112538814544678, |
|
"rewards/rejected": 0.03421063348650932, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.353806263777678e-06, |
|
"logits/chosen": -2.009766101837158, |
|
"logits/rejected": -2.0014333724975586, |
|
"logps/chosen": -32.351280212402344, |
|
"logps/rejected": -32.099830627441406, |
|
"loss": 0.5288, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.07490243762731552, |
|
"rewards/margins": 0.025563379749655724, |
|
"rewards/rejected": 0.04933905601501465, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.1940827077152755e-06, |
|
"logits/chosen": -2.0372567176818848, |
|
"logits/rejected": -2.029240369796753, |
|
"logps/chosen": -30.34758949279785, |
|
"logps/rejected": -32.03752899169922, |
|
"loss": 0.4171, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.10250232368707657, |
|
"rewards/margins": 0.09011942148208618, |
|
"rewards/rejected": 0.01238289289176464, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.0204024186666215e-06, |
|
"logits/chosen": -1.966684341430664, |
|
"logits/rejected": -1.9769500494003296, |
|
"logps/chosen": -31.219501495361328, |
|
"logps/rejected": -32.558082580566406, |
|
"loss": 0.4241, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.1601306051015854, |
|
"rewards/margins": 0.178691104054451, |
|
"rewards/rejected": -0.018560513854026794, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.834196265035119e-06, |
|
"logits/chosen": -1.8799800872802734, |
|
"logits/rejected": -1.8811286687850952, |
|
"logps/chosen": -34.00492858886719, |
|
"logps/rejected": -34.78579330444336, |
|
"loss": 0.3742, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.15021029114723206, |
|
"rewards/margins": 0.17304366827011108, |
|
"rewards/rejected": -0.02283337712287903, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.636998309800573e-06, |
|
"logits/chosen": -1.932443618774414, |
|
"logits/rejected": -1.9290469884872437, |
|
"logps/chosen": -36.061256408691406, |
|
"logps/rejected": -32.7225456237793, |
|
"loss": 0.377, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.0883389413356781, |
|
"rewards/margins": 0.0881747230887413, |
|
"rewards/rejected": 0.00016421750478912145, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4304331721118078e-06, |
|
"logits/chosen": -2.0334668159484863, |
|
"logits/rejected": -2.026076078414917, |
|
"logps/chosen": -33.546730041503906, |
|
"logps/rejected": -31.355152130126953, |
|
"loss": 0.3457, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.2009061872959137, |
|
"rewards/margins": 0.2035447061061859, |
|
"rewards/rejected": -0.0026385621167719364, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 3.2162026428305436e-06, |
|
"logits/chosen": -2.0394930839538574, |
|
"logits/rejected": -2.044752359390259, |
|
"logps/chosen": -32.33013153076172, |
|
"logps/rejected": -32.470909118652344, |
|
"loss": 0.2919, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.17172439396381378, |
|
"rewards/margins": 0.1603413224220276, |
|
"rewards/rejected": 0.01138305850327015, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 2.996071664294641e-06, |
|
"logits/chosen": -2.0403592586517334, |
|
"logits/rejected": -2.0375704765319824, |
|
"logps/chosen": -31.314708709716797, |
|
"logps/rejected": -31.29607582092285, |
|
"loss": 0.4191, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.13939552009105682, |
|
"rewards/margins": 0.12168798595666885, |
|
"rewards/rejected": 0.017707547172904015, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.7718537898066833e-06, |
|
"logits/chosen": -1.9107002019882202, |
|
"logits/rejected": -1.915356993675232, |
|
"logps/chosen": -31.39776611328125, |
|
"logps/rejected": -32.7841682434082, |
|
"loss": 0.3991, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.1668148785829544, |
|
"rewards/margins": 0.16140125691890717, |
|
"rewards/rejected": 0.005413623061031103, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.234880208969116, |
|
"eval_logits/rejected": -2.2300188541412354, |
|
"eval_logps/chosen": -34.03581619262695, |
|
"eval_logps/rejected": -37.54121017456055, |
|
"eval_loss": 0.4536176025867462, |
|
"eval_rewards/accuracies": 0.5373754501342773, |
|
"eval_rewards/chosen": -0.00101160176564008, |
|
"eval_rewards/margins": 0.01865854486823082, |
|
"eval_rewards/rejected": -0.019670147448778152, |
|
"eval_runtime": 145.4825, |
|
"eval_samples_per_second": 2.358, |
|
"eval_steps_per_second": 0.296, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.5453962426402006e-06, |
|
"logits/chosen": -2.022460460662842, |
|
"logits/rejected": -2.03314471244812, |
|
"logps/chosen": -31.764179229736328, |
|
"logps/rejected": -33.924598693847656, |
|
"loss": 0.2827, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.1656998097896576, |
|
"rewards/margins": 0.21521887183189392, |
|
"rewards/rejected": -0.049519073218107224, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.3185646976551794e-06, |
|
"logits/chosen": -1.9155023097991943, |
|
"logits/rejected": -1.9303117990493774, |
|
"logps/chosen": -29.97748374938965, |
|
"logps/rejected": -31.563989639282227, |
|
"loss": 0.3625, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.10724345594644547, |
|
"rewards/margins": 0.11189230531454086, |
|
"rewards/rejected": -0.004648865200579166, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2.0932279108998323e-06, |
|
"logits/chosen": -1.9727697372436523, |
|
"logits/rejected": -1.9767513275146484, |
|
"logps/chosen": -33.23460006713867, |
|
"logps/rejected": -31.581985473632812, |
|
"loss": 0.3234, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.1462947130203247, |
|
"rewards/margins": 0.17244157195091248, |
|
"rewards/rejected": -0.026146870106458664, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.8712423238279358e-06, |
|
"logits/chosen": -1.971836805343628, |
|
"logits/rejected": -1.9498882293701172, |
|
"logps/chosen": -33.9459228515625, |
|
"logps/rejected": -35.02843475341797, |
|
"loss": 0.3771, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.1662607342004776, |
|
"rewards/margins": 0.2283717393875122, |
|
"rewards/rejected": -0.0621110200881958, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.6544367689701824e-06, |
|
"logits/chosen": -2.0139522552490234, |
|
"logits/rejected": -2.010632038116455, |
|
"logps/chosen": -32.76514434814453, |
|
"logps/rejected": -36.231590270996094, |
|
"loss": 0.3601, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.1279398649930954, |
|
"rewards/margins": 0.1332733929157257, |
|
"rewards/rejected": -0.005333536770194769, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.4445974030621963e-06, |
|
"logits/chosen": -1.8817164897918701, |
|
"logits/rejected": -1.8792623281478882, |
|
"logps/chosen": -34.03675842285156, |
|
"logps/rejected": -35.52077102661133, |
|
"loss": 0.3873, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.12131254374980927, |
|
"rewards/margins": 0.13423141837120056, |
|
"rewards/rejected": -0.012918862514197826, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.243452991757889e-06, |
|
"logits/chosen": -1.8678665161132812, |
|
"logits/rejected": -1.8653860092163086, |
|
"logps/chosen": -34.24772262573242, |
|
"logps/rejected": -31.75213050842285, |
|
"loss": 0.3738, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.11989933252334595, |
|
"rewards/margins": 0.1045403853058815, |
|
"rewards/rejected": 0.015358942560851574, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0526606671603523e-06, |
|
"logits/chosen": -1.9715077877044678, |
|
"logits/rejected": -1.9609073400497437, |
|
"logps/chosen": -35.06243133544922, |
|
"logps/rejected": -31.837047576904297, |
|
"loss": 0.2652, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.2353697121143341, |
|
"rewards/margins": 0.2322143018245697, |
|
"rewards/rejected": 0.003155359299853444, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.737922755071455e-07, |
|
"logits/chosen": -2.0671467781066895, |
|
"logits/rejected": -2.052150011062622, |
|
"logps/chosen": -30.7277774810791, |
|
"logps/rejected": -32.63950729370117, |
|
"loss": 0.3926, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.15707775950431824, |
|
"rewards/margins": 0.14721640944480896, |
|
"rewards/rejected": 0.009861335158348083, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 7.08321427484816e-07, |
|
"logits/chosen": -1.9383538961410522, |
|
"logits/rejected": -1.935782790184021, |
|
"logps/chosen": -32.601158142089844, |
|
"logps/rejected": -30.8317813873291, |
|
"loss": 0.2816, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.26075875759124756, |
|
"rewards/margins": 0.2727365493774414, |
|
"rewards/rejected": -0.011977789923548698, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.2351884841918945, |
|
"eval_logits/rejected": -2.230333089828491, |
|
"eval_logps/chosen": -34.01213836669922, |
|
"eval_logps/rejected": -37.532142639160156, |
|
"eval_loss": 0.4463596045970917, |
|
"eval_rewards/accuracies": 0.5427741408348083, |
|
"eval_rewards/chosen": 0.017930733039975166, |
|
"eval_rewards/margins": 0.03035038523375988, |
|
"eval_rewards/rejected": -0.012419654987752438, |
|
"eval_runtime": 145.7115, |
|
"eval_samples_per_second": 2.354, |
|
"eval_steps_per_second": 0.295, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.576113578589035e-07, |
|
"logits/chosen": -1.9204727411270142, |
|
"logits/rejected": -1.9172074794769287, |
|
"logps/chosen": -31.360204696655273, |
|
"logps/rejected": -33.759212493896484, |
|
"loss": 0.3011, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.19451384246349335, |
|
"rewards/margins": 0.20816302299499512, |
|
"rewards/rejected": -0.01364919263869524, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.229036944380913e-07, |
|
"logits/chosen": -1.9719558954238892, |
|
"logits/rejected": -1.959651231765747, |
|
"logps/chosen": -34.44158935546875, |
|
"logps/rejected": -33.596771240234375, |
|
"loss": 0.2835, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.12358621507883072, |
|
"rewards/margins": 0.1699295938014984, |
|
"rewards/rejected": -0.0463433675467968, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.053082288996112e-07, |
|
"logits/chosen": -2.0068559646606445, |
|
"logits/rejected": -2.00539231300354, |
|
"logps/chosen": -33.331153869628906, |
|
"logps/rejected": -32.49522018432617, |
|
"loss": 0.3707, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.12371524423360825, |
|
"rewards/margins": 0.13622507452964783, |
|
"rewards/rejected": -0.012509837746620178, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.0579377374915805e-07, |
|
"logits/chosen": -2.0939385890960693, |
|
"logits/rejected": -2.0781521797180176, |
|
"logps/chosen": -33.88405227661133, |
|
"logps/rejected": -33.090179443359375, |
|
"loss": 0.36, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.23627004027366638, |
|
"rewards/margins": 0.21398480236530304, |
|
"rewards/rejected": 0.02228522300720215, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.2518018074041684e-07, |
|
"logits/chosen": -1.9664535522460938, |
|
"logits/rejected": -1.9655389785766602, |
|
"logps/chosen": -32.96236038208008, |
|
"logps/rejected": -32.478477478027344, |
|
"loss": 0.3, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.2557886242866516, |
|
"rewards/margins": 0.2664529085159302, |
|
"rewards/rejected": -0.0106642572209239, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.41315865106129e-08, |
|
"logits/chosen": -1.921805739402771, |
|
"logits/rejected": -1.9321292638778687, |
|
"logps/chosen": -32.022247314453125, |
|
"logps/rejected": -35.31328582763672, |
|
"loss": 0.3671, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.14595167338848114, |
|
"rewards/margins": 0.16557307541370392, |
|
"rewards/rejected": -0.01962139829993248, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.3150941078050325e-08, |
|
"logits/chosen": -2.060645580291748, |
|
"logits/rejected": -2.0541434288024902, |
|
"logps/chosen": -33.4498405456543, |
|
"logps/rejected": -29.211456298828125, |
|
"loss": 0.3226, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.16169998049736023, |
|
"rewards/margins": 0.1497238427400589, |
|
"rewards/rejected": 0.011976108886301517, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.575864278703266e-09, |
|
"logits/chosen": -1.920902967453003, |
|
"logits/rejected": -1.9230811595916748, |
|
"logps/chosen": -33.97188186645508, |
|
"logps/rejected": -30.862768173217773, |
|
"loss": 0.3117, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.21373608708381653, |
|
"rewards/margins": 0.20584869384765625, |
|
"rewards/rejected": 0.00788736529648304, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 385, |
|
"total_flos": 0.0, |
|
"train_loss": 0.3756082092012678, |
|
"train_runtime": 3251.4223, |
|
"train_samples_per_second": 0.947, |
|
"train_steps_per_second": 0.118 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 385, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|