|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 385, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 396.0, |
|
"learning_rate": 1.282051282051282e-07, |
|
"logits/chosen": -2.7358343601226807, |
|
"logits/rejected": -2.7480404376983643, |
|
"logps/chosen": -27.35565757751465, |
|
"logps/rejected": -21.06114387512207, |
|
"loss": 6.25, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 672.0, |
|
"learning_rate": 1.282051282051282e-06, |
|
"logits/chosen": -3.0095510482788086, |
|
"logits/rejected": -2.9984350204467773, |
|
"logps/chosen": -33.197166442871094, |
|
"logps/rejected": -31.98773193359375, |
|
"loss": 6.2548, |
|
"rewards/accuracies": 0.4861111044883728, |
|
"rewards/chosen": -0.0032450438011437654, |
|
"rewards/margins": 0.0015198996989056468, |
|
"rewards/rejected": -0.004764944314956665, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 480.0, |
|
"learning_rate": 2.564102564102564e-06, |
|
"logits/chosen": -2.8992972373962402, |
|
"logits/rejected": -2.8939919471740723, |
|
"logps/chosen": -32.48074722290039, |
|
"logps/rejected": -28.958911895751953, |
|
"loss": 6.4479, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": -0.0011298510944470763, |
|
"rewards/margins": -0.005721274763345718, |
|
"rewards/rejected": 0.004591422621160746, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 426.0, |
|
"learning_rate": 3.846153846153847e-06, |
|
"logits/chosen": -3.097346067428589, |
|
"logits/rejected": -3.1091365814208984, |
|
"logps/chosen": -32.795387268066406, |
|
"logps/rejected": -30.13690185546875, |
|
"loss": 6.087, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.02266053855419159, |
|
"rewards/margins": 0.01083013229072094, |
|
"rewards/rejected": 0.011830407194793224, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 376.0, |
|
"learning_rate": 4.999896948438434e-06, |
|
"logits/chosen": -2.8621602058410645, |
|
"logits/rejected": -2.852827310562134, |
|
"logps/chosen": -31.5306396484375, |
|
"logps/rejected": -32.392234802246094, |
|
"loss": 5.4681, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.05993650108575821, |
|
"rewards/margins": 0.06689226627349854, |
|
"rewards/rejected": -0.006955766584724188, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 330.0, |
|
"learning_rate": 4.987541037542187e-06, |
|
"logits/chosen": -2.881068468093872, |
|
"logits/rejected": -2.8789007663726807, |
|
"logps/chosen": -29.444555282592773, |
|
"logps/rejected": -30.105321884155273, |
|
"loss": 5.8178, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.05912144109606743, |
|
"rewards/margins": 0.06239712983369827, |
|
"rewards/rejected": -0.003275694791227579, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 378.0, |
|
"learning_rate": 4.954691471941119e-06, |
|
"logits/chosen": -2.9106826782226562, |
|
"logits/rejected": -2.9123446941375732, |
|
"logps/chosen": -29.89569091796875, |
|
"logps/rejected": -28.091384887695312, |
|
"loss": 4.9965, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.046815626323223114, |
|
"rewards/margins": 0.07395622134208679, |
|
"rewards/rejected": -0.027140596881508827, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 1256.0, |
|
"learning_rate": 4.901618883413549e-06, |
|
"logits/chosen": -2.9927854537963867, |
|
"logits/rejected": -2.9993896484375, |
|
"logps/chosen": -29.258081436157227, |
|
"logps/rejected": -31.0142822265625, |
|
"loss": 6.6005, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.004577371757477522, |
|
"rewards/margins": 0.029073869809508324, |
|
"rewards/rejected": -0.024496497586369514, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 552.0, |
|
"learning_rate": 4.828760511501322e-06, |
|
"logits/chosen": -2.8086979389190674, |
|
"logits/rejected": -2.823762893676758, |
|
"logps/chosen": -29.380992889404297, |
|
"logps/rejected": -29.88627052307129, |
|
"loss": 5.3018, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.031947556883096695, |
|
"rewards/margins": 0.07434491813182831, |
|
"rewards/rejected": -0.04239736869931221, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 424.0, |
|
"learning_rate": 4.7367166013034295e-06, |
|
"logits/chosen": -2.8993382453918457, |
|
"logits/rejected": -2.8812999725341797, |
|
"logps/chosen": -32.65332794189453, |
|
"logps/rejected": -30.278905868530273, |
|
"loss": 9.3008, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.03662032634019852, |
|
"rewards/margins": 0.08807238936424255, |
|
"rewards/rejected": -0.05145206302404404, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 332.0, |
|
"learning_rate": 4.626245458345211e-06, |
|
"logits/chosen": -3.0058953762054443, |
|
"logits/rejected": -3.0061850547790527, |
|
"logps/chosen": -31.760330200195312, |
|
"logps/rejected": -30.88376808166504, |
|
"loss": 5.3752, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.05930906534194946, |
|
"rewards/margins": 0.07282786071300507, |
|
"rewards/rejected": -0.013518787920475006, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.8137402534484863, |
|
"eval_logits/rejected": -2.810950994491577, |
|
"eval_logps/chosen": -31.279376983642578, |
|
"eval_logps/rejected": -34.8190803527832, |
|
"eval_loss": 6.049151420593262, |
|
"eval_rewards/accuracies": 0.5892857313156128, |
|
"eval_rewards/chosen": 0.0006145605584606528, |
|
"eval_rewards/margins": 0.024798991158604622, |
|
"eval_rewards/rejected": -0.02418443188071251, |
|
"eval_runtime": 113.3821, |
|
"eval_samples_per_second": 3.025, |
|
"eval_steps_per_second": 0.379, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 524.0, |
|
"learning_rate": 4.498257201263691e-06, |
|
"logits/chosen": -2.961501359939575, |
|
"logits/rejected": -2.9370064735412598, |
|
"logps/chosen": -31.725719451904297, |
|
"logps/rejected": -31.284133911132812, |
|
"loss": 5.3812, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.09821529686450958, |
|
"rewards/margins": 0.12327277660369873, |
|
"rewards/rejected": -0.02505749836564064, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 380.0, |
|
"learning_rate": 4.353806263777678e-06, |
|
"logits/chosen": -3.0424726009368896, |
|
"logits/rejected": -3.0713469982147217, |
|
"logps/chosen": -28.6945858001709, |
|
"logps/rejected": -34.26778793334961, |
|
"loss": 5.044, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.09386318176984787, |
|
"rewards/margins": 0.11791019141674042, |
|
"rewards/rejected": -0.024047017097473145, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 294.0, |
|
"learning_rate": 4.1940827077152755e-06, |
|
"logits/chosen": -2.7436156272888184, |
|
"logits/rejected": -2.7392959594726562, |
|
"logps/chosen": -28.56683349609375, |
|
"logps/rejected": -30.19887924194336, |
|
"loss": 5.5435, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.08156983554363251, |
|
"rewards/margins": 0.0945839211344719, |
|
"rewards/rejected": -0.01301408838480711, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 238.0, |
|
"learning_rate": 4.0204024186666215e-06, |
|
"logits/chosen": -3.0176939964294434, |
|
"logits/rejected": -3.01535964012146, |
|
"logps/chosen": -27.101856231689453, |
|
"logps/rejected": -31.852075576782227, |
|
"loss": 5.3282, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.07687394320964813, |
|
"rewards/margins": 0.11593522876501083, |
|
"rewards/rejected": -0.039061289280653, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 366.0, |
|
"learning_rate": 3.834196265035119e-06, |
|
"logits/chosen": -2.813143014907837, |
|
"logits/rejected": -2.8078806400299072, |
|
"logps/chosen": -27.3418025970459, |
|
"logps/rejected": -31.519561767578125, |
|
"loss": 4.5462, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.09674083441495895, |
|
"rewards/margins": 0.15592411160469055, |
|
"rewards/rejected": -0.05918328836560249, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 414.0, |
|
"learning_rate": 3.636998309800573e-06, |
|
"logits/chosen": -3.1300997734069824, |
|
"logits/rejected": -3.1127312183380127, |
|
"logps/chosen": -31.788288116455078, |
|
"logps/rejected": -29.369943618774414, |
|
"loss": 4.1399, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.1406870186328888, |
|
"rewards/margins": 0.2187211960554123, |
|
"rewards/rejected": -0.0780341774225235, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 316.0, |
|
"learning_rate": 3.4304331721118078e-06, |
|
"logits/chosen": -2.9419102668762207, |
|
"logits/rejected": -2.949079751968384, |
|
"logps/chosen": -29.38619613647461, |
|
"logps/rejected": -31.473434448242188, |
|
"loss": 4.3154, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.10168395191431046, |
|
"rewards/margins": 0.1764744520187378, |
|
"rewards/rejected": -0.07479049265384674, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 330.0, |
|
"learning_rate": 3.2162026428305436e-06, |
|
"logits/chosen": -2.790714740753174, |
|
"logits/rejected": -2.7880821228027344, |
|
"logps/chosen": -29.101242065429688, |
|
"logps/rejected": -30.093902587890625, |
|
"loss": 4.5857, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.11372152715921402, |
|
"rewards/margins": 0.17886772751808167, |
|
"rewards/rejected": -0.06514620780944824, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 241.0, |
|
"learning_rate": 2.996071664294641e-06, |
|
"logits/chosen": -2.9045286178588867, |
|
"logits/rejected": -2.901243209838867, |
|
"logps/chosen": -29.739131927490234, |
|
"logps/rejected": -28.617401123046875, |
|
"loss": 6.9402, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.0895165354013443, |
|
"rewards/margins": 0.14149072766304016, |
|
"rewards/rejected": -0.051974184811115265, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 163.0, |
|
"learning_rate": 2.7718537898066833e-06, |
|
"logits/chosen": -2.9731786251068115, |
|
"logits/rejected": -2.9609808921813965, |
|
"logps/chosen": -32.76665496826172, |
|
"logps/rejected": -30.380229949951172, |
|
"loss": 5.6214, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.18647293746471405, |
|
"rewards/margins": 0.17570139467716217, |
|
"rewards/rejected": 0.010771543718874454, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.8129608631134033, |
|
"eval_logits/rejected": -2.810295343399048, |
|
"eval_logps/chosen": -31.312314987182617, |
|
"eval_logps/rejected": -34.8574333190918, |
|
"eval_loss": 5.954337120056152, |
|
"eval_rewards/accuracies": 0.5685215592384338, |
|
"eval_rewards/chosen": -0.005972325801849365, |
|
"eval_rewards/margins": 0.02588305063545704, |
|
"eval_rewards/rejected": -0.031855374574661255, |
|
"eval_runtime": 113.1346, |
|
"eval_samples_per_second": 3.032, |
|
"eval_steps_per_second": 0.38, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 346.0, |
|
"learning_rate": 2.5453962426402006e-06, |
|
"logits/chosen": -2.9092488288879395, |
|
"logits/rejected": -2.9100584983825684, |
|
"logps/chosen": -32.28118133544922, |
|
"logps/rejected": -34.049217224121094, |
|
"loss": 4.2159, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.15044590830802917, |
|
"rewards/margins": 0.18799623847007751, |
|
"rewards/rejected": -0.03755030035972595, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 296.0, |
|
"learning_rate": 2.3185646976551794e-06, |
|
"logits/chosen": -2.886615037918091, |
|
"logits/rejected": -2.902318000793457, |
|
"logps/chosen": -29.267192840576172, |
|
"logps/rejected": -28.660858154296875, |
|
"loss": 4.7348, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.18216967582702637, |
|
"rewards/margins": 0.20868726074695587, |
|
"rewards/rejected": -0.026517590507864952, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 278.0, |
|
"learning_rate": 2.0932279108998323e-06, |
|
"logits/chosen": -2.9349474906921387, |
|
"logits/rejected": -2.9388556480407715, |
|
"logps/chosen": -30.56326675415039, |
|
"logps/rejected": -31.848459243774414, |
|
"loss": 5.1251, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.11771988868713379, |
|
"rewards/margins": 0.13578179478645325, |
|
"rewards/rejected": -0.01806190237402916, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 294.0, |
|
"learning_rate": 1.8712423238279358e-06, |
|
"logits/chosen": -2.9876904487609863, |
|
"logits/rejected": -2.994788408279419, |
|
"logps/chosen": -30.00099754333496, |
|
"logps/rejected": -30.282485961914062, |
|
"loss": 3.5274, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 0.18078531324863434, |
|
"rewards/margins": 0.19023428857326508, |
|
"rewards/rejected": -0.009448973461985588, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 448.0, |
|
"learning_rate": 1.6544367689701824e-06, |
|
"logits/chosen": -2.818467617034912, |
|
"logits/rejected": -2.8090109825134277, |
|
"logps/chosen": -26.312854766845703, |
|
"logps/rejected": -29.42915916442871, |
|
"loss": 5.123, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.12827233970165253, |
|
"rewards/margins": 0.10777638852596283, |
|
"rewards/rejected": 0.020495977252721786, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 250.0, |
|
"learning_rate": 1.4445974030621963e-06, |
|
"logits/chosen": -2.800107002258301, |
|
"logits/rejected": -2.8200669288635254, |
|
"logps/chosen": -28.879364013671875, |
|
"logps/rejected": -34.295921325683594, |
|
"loss": 3.958, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.19011935591697693, |
|
"rewards/margins": 0.21448710560798645, |
|
"rewards/rejected": -0.024367772042751312, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 326.0, |
|
"learning_rate": 1.243452991757889e-06, |
|
"logits/chosen": -2.942564010620117, |
|
"logits/rejected": -2.9483418464660645, |
|
"logps/chosen": -29.95509910583496, |
|
"logps/rejected": -30.009634017944336, |
|
"loss": 3.7773, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.16525179147720337, |
|
"rewards/margins": 0.19412627816200256, |
|
"rewards/rejected": -0.028874486684799194, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 214.0, |
|
"learning_rate": 1.0526606671603523e-06, |
|
"logits/chosen": -2.957038402557373, |
|
"logits/rejected": -2.9439315795898438, |
|
"logps/chosen": -30.10283851623535, |
|
"logps/rejected": -28.607885360717773, |
|
"loss": 6.1162, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.12376952171325684, |
|
"rewards/margins": 0.11907708644866943, |
|
"rewards/rejected": 0.004692415706813335, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 292.0, |
|
"learning_rate": 8.737922755071455e-07, |
|
"logits/chosen": -2.884387969970703, |
|
"logits/rejected": -2.8665497303009033, |
|
"logps/chosen": -31.607385635375977, |
|
"logps/rejected": -30.795541763305664, |
|
"loss": 3.528, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.2057834416627884, |
|
"rewards/margins": 0.26023000478744507, |
|
"rewards/rejected": -0.05444660037755966, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 428.0, |
|
"learning_rate": 7.08321427484816e-07, |
|
"logits/chosen": -2.887512445449829, |
|
"logits/rejected": -2.8846189975738525, |
|
"logps/chosen": -31.37001609802246, |
|
"logps/rejected": -27.58599281311035, |
|
"loss": 4.7461, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.17052757740020752, |
|
"rewards/margins": 0.16700942814350128, |
|
"rewards/rejected": 0.003518153680488467, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.8048255443573, |
|
"eval_logits/rejected": -2.8025119304656982, |
|
"eval_logps/chosen": -31.280437469482422, |
|
"eval_logps/rejected": -34.820987701416016, |
|
"eval_loss": 5.971487522125244, |
|
"eval_rewards/accuracies": 0.5747508406639099, |
|
"eval_rewards/chosen": 0.00040244098636321723, |
|
"eval_rewards/margins": 0.024968715384602547, |
|
"eval_rewards/rejected": -0.024566275998950005, |
|
"eval_runtime": 112.898, |
|
"eval_samples_per_second": 3.038, |
|
"eval_steps_per_second": 0.381, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 408.0, |
|
"learning_rate": 5.576113578589035e-07, |
|
"logits/chosen": -2.767286777496338, |
|
"logits/rejected": -2.7845776081085205, |
|
"logps/chosen": -28.39835548400879, |
|
"logps/rejected": -30.915149688720703, |
|
"loss": 4.0113, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.1662132441997528, |
|
"rewards/margins": 0.20284172892570496, |
|
"rewards/rejected": -0.03662847727537155, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 186.0, |
|
"learning_rate": 4.229036944380913e-07, |
|
"logits/chosen": -3.013503074645996, |
|
"logits/rejected": -2.9991822242736816, |
|
"logps/chosen": -29.416873931884766, |
|
"logps/rejected": -28.512454986572266, |
|
"loss": 3.4113, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.1589430421590805, |
|
"rewards/margins": 0.19767849147319794, |
|
"rewards/rejected": -0.03873545676469803, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 159.0, |
|
"learning_rate": 3.053082288996112e-07, |
|
"logits/chosen": -2.928607702255249, |
|
"logits/rejected": -2.911008358001709, |
|
"logps/chosen": -27.41733741760254, |
|
"logps/rejected": -30.696624755859375, |
|
"loss": 3.3907, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.188947394490242, |
|
"rewards/margins": 0.24994292855262756, |
|
"rewards/rejected": -0.060995571315288544, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 342.0, |
|
"learning_rate": 2.0579377374915805e-07, |
|
"logits/chosen": -3.143974542617798, |
|
"logits/rejected": -3.1502108573913574, |
|
"logps/chosen": -30.86354637145996, |
|
"logps/rejected": -33.04155349731445, |
|
"loss": 4.0138, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.15513095259666443, |
|
"rewards/margins": 0.22424118220806122, |
|
"rewards/rejected": -0.06911022216081619, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 200.0, |
|
"learning_rate": 1.2518018074041684e-07, |
|
"logits/chosen": -3.021571159362793, |
|
"logits/rejected": -3.025432586669922, |
|
"logps/chosen": -29.817678451538086, |
|
"logps/rejected": -31.675588607788086, |
|
"loss": 4.188, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.19784057140350342, |
|
"rewards/margins": 0.23029597103595734, |
|
"rewards/rejected": -0.03245542570948601, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 286.0, |
|
"learning_rate": 6.41315865106129e-08, |
|
"logits/chosen": -2.86122727394104, |
|
"logits/rejected": -2.8630313873291016, |
|
"logps/chosen": -27.54501724243164, |
|
"logps/rejected": -29.912155151367188, |
|
"loss": 3.5611, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.2209421843290329, |
|
"rewards/margins": 0.23718467354774475, |
|
"rewards/rejected": -0.01624247059226036, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 324.0, |
|
"learning_rate": 2.3150941078050325e-08, |
|
"logits/chosen": -2.935516357421875, |
|
"logits/rejected": -2.9337944984436035, |
|
"logps/chosen": -29.81747055053711, |
|
"logps/rejected": -32.104270935058594, |
|
"loss": 3.9741, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.1938357651233673, |
|
"rewards/margins": 0.2290809601545334, |
|
"rewards/rejected": -0.03524521738290787, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 222.0, |
|
"learning_rate": 2.575864278703266e-09, |
|
"logits/chosen": -2.8935000896453857, |
|
"logits/rejected": -2.8769774436950684, |
|
"logps/chosen": -28.101531982421875, |
|
"logps/rejected": -28.089923858642578, |
|
"loss": 3.3899, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.1687012016773224, |
|
"rewards/margins": 0.20876184105873108, |
|
"rewards/rejected": -0.0400606133043766, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 385, |
|
"total_flos": 0.0, |
|
"train_loss": 4.931960544338474, |
|
"train_runtime": 2724.0851, |
|
"train_samples_per_second": 1.13, |
|
"train_steps_per_second": 0.141 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 385, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|