|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 385, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 23.75, |
|
"learning_rate": 1.282051282051282e-07, |
|
"logits/chosen": -2.7358343601226807, |
|
"logits/rejected": -2.7480404376983643, |
|
"logps/chosen": -27.35565757751465, |
|
"logps/rejected": -21.06114387512207, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 41.0, |
|
"learning_rate": 1.282051282051282e-06, |
|
"logits/chosen": -3.0097458362579346, |
|
"logits/rejected": -2.9984540939331055, |
|
"logps/chosen": -33.19679260253906, |
|
"logps/rejected": -31.95996856689453, |
|
"loss": 0.7015, |
|
"rewards/accuracies": 0.4305555522441864, |
|
"rewards/chosen": -0.009508713148534298, |
|
"rewards/margins": -0.011873150244355202, |
|
"rewards/rejected": 0.002364437561482191, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 28.875, |
|
"learning_rate": 2.564102564102564e-06, |
|
"logits/chosen": -2.899174213409424, |
|
"logits/rejected": -2.894191026687622, |
|
"logps/chosen": -32.46300506591797, |
|
"logps/rejected": -28.93667984008789, |
|
"loss": 0.7056, |
|
"rewards/accuracies": 0.4000000059604645, |
|
"rewards/chosen": 0.007253733463585377, |
|
"rewards/margins": -0.019858861342072487, |
|
"rewards/rejected": 0.02711259201169014, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 25.625, |
|
"learning_rate": 3.846153846153847e-06, |
|
"logits/chosen": -3.0968523025512695, |
|
"logits/rejected": -3.1085915565490723, |
|
"logps/chosen": -32.78853988647461, |
|
"logps/rejected": -30.1524715423584, |
|
"loss": 0.6746, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.07208743691444397, |
|
"rewards/margins": 0.04593749716877937, |
|
"rewards/rejected": 0.026149939745664597, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 22.5, |
|
"learning_rate": 4.999896948438434e-06, |
|
"logits/chosen": -2.8626997470855713, |
|
"logits/rejected": -2.853968858718872, |
|
"logps/chosen": -31.534814834594727, |
|
"logps/rejected": -32.39773941040039, |
|
"loss": 0.6281, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.17730504274368286, |
|
"rewards/margins": 0.20147554576396942, |
|
"rewards/rejected": -0.024170514196157455, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 15.6875, |
|
"learning_rate": 4.987541037542187e-06, |
|
"logits/chosen": -2.8814034461975098, |
|
"logits/rejected": -2.8791728019714355, |
|
"logps/chosen": -29.38239097595215, |
|
"logps/rejected": -30.105327606201172, |
|
"loss": 0.6362, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.21466302871704102, |
|
"rewards/margins": 0.2244940996170044, |
|
"rewards/rejected": -0.00983109325170517, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 23.625, |
|
"learning_rate": 4.954691471941119e-06, |
|
"logits/chosen": -2.910447835922241, |
|
"logits/rejected": -2.9117722511291504, |
|
"logps/chosen": -29.861053466796875, |
|
"logps/rejected": -28.043041229248047, |
|
"loss": 0.6186, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.16123077273368835, |
|
"rewards/margins": 0.21364574134349823, |
|
"rewards/rejected": -0.05241497606039047, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 45.0, |
|
"learning_rate": 4.901618883413549e-06, |
|
"logits/chosen": -2.9926552772521973, |
|
"logits/rejected": -2.99902606010437, |
|
"logps/chosen": -29.191287994384766, |
|
"logps/rejected": -30.99324607849121, |
|
"loss": 0.6821, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.05380876734852791, |
|
"rewards/margins": 0.11467660963535309, |
|
"rewards/rejected": -0.06086784601211548, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 33.5, |
|
"learning_rate": 4.828760511501322e-06, |
|
"logits/chosen": -2.809256076812744, |
|
"logits/rejected": -2.823986530303955, |
|
"logps/chosen": -29.3474063873291, |
|
"logps/rejected": -29.863327026367188, |
|
"loss": 0.6239, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.11599240452051163, |
|
"rewards/margins": 0.22941946983337402, |
|
"rewards/rejected": -0.1134270578622818, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 21.375, |
|
"learning_rate": 4.7367166013034295e-06, |
|
"logits/chosen": -2.898897171020508, |
|
"logits/rejected": -2.881232261657715, |
|
"logps/chosen": -32.679298400878906, |
|
"logps/rejected": -30.30511474609375, |
|
"loss": 0.6893, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.09428185969591141, |
|
"rewards/margins": 0.26436275243759155, |
|
"rewards/rejected": -0.17008085548877716, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 20.25, |
|
"learning_rate": 4.626245458345211e-06, |
|
"logits/chosen": -3.005113124847412, |
|
"logits/rejected": -3.004727840423584, |
|
"logps/chosen": -31.76202392578125, |
|
"logps/rejected": -30.88519859313965, |
|
"loss": 0.6371, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.17691317200660706, |
|
"rewards/margins": 0.21832820773124695, |
|
"rewards/rejected": -0.04141503572463989, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.812013864517212, |
|
"eval_logits/rejected": -2.809272050857544, |
|
"eval_logps/chosen": -31.26671600341797, |
|
"eval_logps/rejected": -34.82568359375, |
|
"eval_loss": 0.6738281846046448, |
|
"eval_rewards/accuracies": 0.6013289093971252, |
|
"eval_rewards/chosen": 0.009443400427699089, |
|
"eval_rewards/margins": 0.08595836907625198, |
|
"eval_rewards/rejected": -0.07651496678590775, |
|
"eval_runtime": 113.3302, |
|
"eval_samples_per_second": 3.027, |
|
"eval_steps_per_second": 0.379, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 31.625, |
|
"learning_rate": 4.498257201263691e-06, |
|
"logits/chosen": -2.960212469100952, |
|
"logits/rejected": -2.936410665512085, |
|
"logps/chosen": -31.749141693115234, |
|
"logps/rejected": -31.339487075805664, |
|
"loss": 0.5919, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.28059250116348267, |
|
"rewards/margins": 0.38897648453712463, |
|
"rewards/rejected": -0.10838399827480316, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 22.5, |
|
"learning_rate": 4.353806263777678e-06, |
|
"logits/chosen": -3.0437393188476562, |
|
"logits/rejected": -3.072226047515869, |
|
"logps/chosen": -28.659826278686523, |
|
"logps/rejected": -34.33867263793945, |
|
"loss": 0.5712, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.30244481563568115, |
|
"rewards/margins": 0.41711607575416565, |
|
"rewards/rejected": -0.1146712526679039, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 18.25, |
|
"learning_rate": 4.1940827077152755e-06, |
|
"logits/chosen": -2.74613881111145, |
|
"logits/rejected": -2.7419228553771973, |
|
"logps/chosen": -28.56654930114746, |
|
"logps/rejected": -30.318378448486328, |
|
"loss": 0.602, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.244877889752388, |
|
"rewards/margins": 0.3556198477745056, |
|
"rewards/rejected": -0.11074197292327881, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 16.0, |
|
"learning_rate": 4.0204024186666215e-06, |
|
"logits/chosen": -3.0208590030670166, |
|
"logits/rejected": -3.0186707973480225, |
|
"logps/chosen": -27.12565040588379, |
|
"logps/rejected": -31.909475326538086, |
|
"loss": 0.614, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.2163436859846115, |
|
"rewards/margins": 0.3679680824279785, |
|
"rewards/rejected": -0.1516243815422058, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 18.125, |
|
"learning_rate": 3.834196265035119e-06, |
|
"logits/chosen": -2.8165249824523926, |
|
"logits/rejected": -2.8110954761505127, |
|
"logps/chosen": -27.234394073486328, |
|
"logps/rejected": -31.60061264038086, |
|
"loss": 0.5206, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.35466745495796204, |
|
"rewards/margins": 0.5808485746383667, |
|
"rewards/rejected": -0.22618110477924347, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 25.375, |
|
"learning_rate": 3.636998309800573e-06, |
|
"logits/chosen": -3.1346049308776855, |
|
"logits/rejected": -3.116758108139038, |
|
"logps/chosen": -31.697032928466797, |
|
"logps/rejected": -29.48983383178711, |
|
"loss": 0.4703, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.4768128991127014, |
|
"rewards/margins": 0.7828513979911804, |
|
"rewards/rejected": -0.306038498878479, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 20.25, |
|
"learning_rate": 3.4304331721118078e-06, |
|
"logits/chosen": -2.946504831314087, |
|
"logits/rejected": -2.9540839195251465, |
|
"logps/chosen": -29.325267791748047, |
|
"logps/rejected": -31.663928985595703, |
|
"loss": 0.506, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.3416058421134949, |
|
"rewards/margins": 0.6802752017974854, |
|
"rewards/rejected": -0.3386693596839905, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 22.0, |
|
"learning_rate": 3.2162026428305436e-06, |
|
"logits/chosen": -2.7982583045959473, |
|
"logits/rejected": -2.796060085296631, |
|
"logps/chosen": -29.182764053344727, |
|
"logps/rejected": -30.200658798217773, |
|
"loss": 0.534, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.29225003719329834, |
|
"rewards/margins": 0.5517427325248718, |
|
"rewards/rejected": -0.2594926953315735, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 14.1875, |
|
"learning_rate": 2.996071664294641e-06, |
|
"logits/chosen": -2.9119534492492676, |
|
"logits/rejected": -2.908388137817383, |
|
"logps/chosen": -29.725021362304688, |
|
"logps/rejected": -28.668323516845703, |
|
"loss": 0.6396, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.27701830863952637, |
|
"rewards/margins": 0.4634927213191986, |
|
"rewards/rejected": -0.18647442758083344, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 11.5625, |
|
"learning_rate": 2.7718537898066833e-06, |
|
"logits/chosen": -2.98229718208313, |
|
"logits/rejected": -2.9696133136749268, |
|
"logps/chosen": -32.741214752197266, |
|
"logps/rejected": -30.54966163635254, |
|
"loss": 0.5701, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.5746826529502869, |
|
"rewards/margins": 0.6440282464027405, |
|
"rewards/rejected": -0.06934557855129242, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.822352170944214, |
|
"eval_logits/rejected": -2.819713592529297, |
|
"eval_logps/chosen": -31.407958984375, |
|
"eval_logps/rejected": -34.99641036987305, |
|
"eval_loss": 0.6704452037811279, |
|
"eval_rewards/accuracies": 0.5776578187942505, |
|
"eval_rewards/chosen": -0.07530408352613449, |
|
"eval_rewards/margins": 0.10365016013383865, |
|
"eval_rewards/rejected": -0.17895422875881195, |
|
"eval_runtime": 113.0717, |
|
"eval_samples_per_second": 3.033, |
|
"eval_steps_per_second": 0.38, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 20.5, |
|
"learning_rate": 2.5453962426402006e-06, |
|
"logits/chosen": -2.919523000717163, |
|
"logits/rejected": -2.9201595783233643, |
|
"logps/chosen": -32.2874870300293, |
|
"logps/rejected": -34.245338439941406, |
|
"loss": 0.5124, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.44755497574806213, |
|
"rewards/margins": 0.6778786778450012, |
|
"rewards/rejected": -0.23032374680042267, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 16.125, |
|
"learning_rate": 2.3185646976551794e-06, |
|
"logits/chosen": -2.900707960128784, |
|
"logits/rejected": -2.915796995162964, |
|
"logps/chosen": -29.271808624267578, |
|
"logps/rejected": -28.87980079650879, |
|
"loss": 0.5181, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.5437403321266174, |
|
"rewards/margins": 0.7546579241752625, |
|
"rewards/rejected": -0.21091759204864502, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 18.875, |
|
"learning_rate": 2.0932279108998323e-06, |
|
"logits/chosen": -2.9470958709716797, |
|
"logits/rejected": -2.951080560684204, |
|
"logps/chosen": -30.639541625976562, |
|
"logps/rejected": -31.99834632873535, |
|
"loss": 0.596, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.30739614367485046, |
|
"rewards/margins": 0.4515131115913391, |
|
"rewards/rejected": -0.14411698281764984, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 18.0, |
|
"learning_rate": 1.8712423238279358e-06, |
|
"logits/chosen": -3.0017409324645996, |
|
"logits/rejected": -3.0088515281677246, |
|
"logps/chosen": -29.93971824645996, |
|
"logps/rejected": -30.499004364013672, |
|
"loss": 0.4599, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.5791231393814087, |
|
"rewards/margins": 0.7373798489570618, |
|
"rewards/rejected": -0.1582566350698471, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 24.0, |
|
"learning_rate": 1.6544367689701824e-06, |
|
"logits/chosen": -2.832463502883911, |
|
"logits/rejected": -2.8226616382598877, |
|
"logps/chosen": -26.2772159576416, |
|
"logps/rejected": -29.6693115234375, |
|
"loss": 0.5588, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.40620192885398865, |
|
"rewards/margins": 0.4888034462928772, |
|
"rewards/rejected": -0.08260142803192139, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 13.375, |
|
"learning_rate": 1.4445974030621963e-06, |
|
"logits/chosen": -2.8149280548095703, |
|
"logits/rejected": -2.8346195220947266, |
|
"logps/chosen": -28.79793357849121, |
|
"logps/rejected": -34.521610260009766, |
|
"loss": 0.489, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.6192152500152588, |
|
"rewards/margins": 0.8277270197868347, |
|
"rewards/rejected": -0.20851179957389832, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 20.5, |
|
"learning_rate": 1.243452991757889e-06, |
|
"logits/chosen": -2.9565839767456055, |
|
"logits/rejected": -2.962963819503784, |
|
"logps/chosen": -30.051294326782227, |
|
"logps/rejected": -30.362972259521484, |
|
"loss": 0.4648, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.4380358159542084, |
|
"rewards/margins": 0.7366625070571899, |
|
"rewards/rejected": -0.2986266314983368, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 13.25, |
|
"learning_rate": 1.0526606671603523e-06, |
|
"logits/chosen": -2.9723668098449707, |
|
"logits/rejected": -2.958524227142334, |
|
"logps/chosen": -30.074031829833984, |
|
"logps/rejected": -28.827747344970703, |
|
"loss": 0.6158, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.38859033584594727, |
|
"rewards/margins": 0.5064281225204468, |
|
"rewards/rejected": -0.11783774942159653, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 17.875, |
|
"learning_rate": 8.737922755071455e-07, |
|
"logits/chosen": -2.899937391281128, |
|
"logits/rejected": -2.882223129272461, |
|
"logps/chosen": -31.454309463500977, |
|
"logps/rejected": -31.11252212524414, |
|
"loss": 0.4135, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.7091949582099915, |
|
"rewards/margins": 1.062723994255066, |
|
"rewards/rejected": -0.35352909564971924, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 23.25, |
|
"learning_rate": 7.08321427484816e-07, |
|
"logits/chosen": -2.9015772342681885, |
|
"logits/rejected": -2.8981502056121826, |
|
"logps/chosen": -31.222009658813477, |
|
"logps/rejected": -27.84316062927246, |
|
"loss": 0.5462, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.6003884077072144, |
|
"rewards/margins": 0.744132936000824, |
|
"rewards/rejected": -0.14374449849128723, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.819934844970703, |
|
"eval_logits/rejected": -2.817535877227783, |
|
"eval_logps/chosen": -31.41133689880371, |
|
"eval_logps/rejected": -34.98634338378906, |
|
"eval_loss": 0.6756160259246826, |
|
"eval_rewards/accuracies": 0.5568937063217163, |
|
"eval_rewards/chosen": -0.0773315504193306, |
|
"eval_rewards/margins": 0.09558116644620895, |
|
"eval_rewards/rejected": -0.17291273176670074, |
|
"eval_runtime": 113.0471, |
|
"eval_samples_per_second": 3.034, |
|
"eval_steps_per_second": 0.38, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 20.0, |
|
"learning_rate": 5.576113578589035e-07, |
|
"logits/chosen": -2.782235622406006, |
|
"logits/rejected": -2.799635410308838, |
|
"logps/chosen": -28.45285987854004, |
|
"logps/rejected": -31.2011775970459, |
|
"loss": 0.4761, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.46593648195266724, |
|
"rewards/margins": 0.7474375367164612, |
|
"rewards/rejected": -0.28150105476379395, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 13.9375, |
|
"learning_rate": 4.229036944380913e-07, |
|
"logits/chosen": -3.0274462699890137, |
|
"logits/rejected": -3.0134167671203613, |
|
"logps/chosen": -29.269433975219727, |
|
"logps/rejected": -28.76506996154785, |
|
"loss": 0.4419, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.5652929544448853, |
|
"rewards/margins": 0.8330700993537903, |
|
"rewards/rejected": -0.2677772045135498, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 9.375, |
|
"learning_rate": 3.053082288996112e-07, |
|
"logits/chosen": -2.941959857940674, |
|
"logits/rejected": -2.924685001373291, |
|
"logps/chosen": -27.270111083984375, |
|
"logps/rejected": -31.031490325927734, |
|
"loss": 0.4054, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.655176043510437, |
|
"rewards/margins": 1.039083480834961, |
|
"rewards/rejected": -0.3839074671268463, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 20.625, |
|
"learning_rate": 2.0579377374915805e-07, |
|
"logits/chosen": -3.1593995094299316, |
|
"logits/rejected": -3.166159152984619, |
|
"logps/chosen": -30.800434112548828, |
|
"logps/rejected": -33.33651351928711, |
|
"loss": 0.451, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.5032647848129272, |
|
"rewards/margins": 0.8875719904899597, |
|
"rewards/rejected": -0.38430729508399963, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 13.75, |
|
"learning_rate": 1.2518018074041684e-07, |
|
"logits/chosen": -3.035806655883789, |
|
"logits/rejected": -3.039640426635742, |
|
"logps/chosen": -29.766101837158203, |
|
"logps/rejected": -31.866451263427734, |
|
"loss": 0.5007, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.6244680881500244, |
|
"rewards/margins": 0.8363536596298218, |
|
"rewards/rejected": -0.21188569068908691, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 16.625, |
|
"learning_rate": 6.41315865106129e-08, |
|
"logits/chosen": -2.875148296356201, |
|
"logits/rejected": -2.8767988681793213, |
|
"logps/chosen": -27.400989532470703, |
|
"logps/rejected": -30.16061019897461, |
|
"loss": 0.4355, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.7492430806159973, |
|
"rewards/margins": 0.9470453262329102, |
|
"rewards/rejected": -0.1978023201227188, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 27.375, |
|
"learning_rate": 2.3150941078050325e-08, |
|
"logits/chosen": -2.9514451026916504, |
|
"logits/rejected": -2.949070930480957, |
|
"logps/chosen": -29.79317855834961, |
|
"logps/rejected": -32.29186248779297, |
|
"loss": 0.4813, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.5960837602615356, |
|
"rewards/margins": 0.8143755197525024, |
|
"rewards/rejected": -0.2182917296886444, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 14.875, |
|
"learning_rate": 2.575864278703266e-09, |
|
"logits/chosen": -2.907784938812256, |
|
"logits/rejected": -2.891355037689209, |
|
"logps/chosen": -28.129526138305664, |
|
"logps/rejected": -28.36810874938965, |
|
"loss": 0.4625, |
|
"rewards/accuracies": 0.824999988079071, |
|
"rewards/chosen": 0.4893053472042084, |
|
"rewards/margins": 0.7763983011245728, |
|
"rewards/rejected": -0.2870928943157196, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 385, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5523774078914098, |
|
"train_runtime": 2722.392, |
|
"train_samples_per_second": 1.131, |
|
"train_steps_per_second": 0.141 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 385, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|