|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 385, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.282051282051282e-07, |
|
"logits/chosen": -1.7278180122375488, |
|
"logits/rejected": -1.7377450466156006, |
|
"logps/chosen": -29.553977966308594, |
|
"logps/rejected": -42.813133239746094, |
|
"loss": 0.5, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.282051282051282e-06, |
|
"logits/chosen": -1.8665987253189087, |
|
"logits/rejected": -1.8709272146224976, |
|
"logps/chosen": -36.985595703125, |
|
"logps/rejected": -33.68160629272461, |
|
"loss": 0.4886, |
|
"rewards/accuracies": 0.5694444179534912, |
|
"rewards/chosen": 0.018904482945799828, |
|
"rewards/margins": 0.06528304517269135, |
|
"rewards/rejected": -0.04637856408953667, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.564102564102564e-06, |
|
"logits/chosen": -1.997780203819275, |
|
"logits/rejected": -2.000434398651123, |
|
"logps/chosen": -29.643661499023438, |
|
"logps/rejected": -29.043325424194336, |
|
"loss": 0.5031, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.001316396868787706, |
|
"rewards/margins": -0.019422104582190514, |
|
"rewards/rejected": 0.018105709925293922, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.846153846153847e-06, |
|
"logits/chosen": -1.9207446575164795, |
|
"logits/rejected": -1.918060064315796, |
|
"logps/chosen": -31.41064453125, |
|
"logps/rejected": -33.227088928222656, |
|
"loss": 0.4976, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.004905471112579107, |
|
"rewards/margins": 0.012669263407588005, |
|
"rewards/rejected": -0.007763790898025036, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.999896948438434e-06, |
|
"logits/chosen": -2.017446517944336, |
|
"logits/rejected": -2.0087125301361084, |
|
"logps/chosen": -32.553016662597656, |
|
"logps/rejected": -32.50551986694336, |
|
"loss": 0.4977, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.021415216848254204, |
|
"rewards/margins": 0.014982220716774464, |
|
"rewards/rejected": 0.006432999856770039, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.987541037542187e-06, |
|
"logits/chosen": -1.8627235889434814, |
|
"logits/rejected": -1.851959228515625, |
|
"logps/chosen": -33.5064697265625, |
|
"logps/rejected": -35.43267059326172, |
|
"loss": 0.4951, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.0459100641310215, |
|
"rewards/margins": 0.02820250764489174, |
|
"rewards/rejected": 0.017707552760839462, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.954691471941119e-06, |
|
"logits/chosen": -1.9425691366195679, |
|
"logits/rejected": -1.94449782371521, |
|
"logps/chosen": -32.46650695800781, |
|
"logps/rejected": -33.15652847290039, |
|
"loss": 0.4765, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.12030963599681854, |
|
"rewards/margins": 0.10694190114736557, |
|
"rewards/rejected": 0.013367725536227226, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.901618883413549e-06, |
|
"logits/chosen": -2.073408842086792, |
|
"logits/rejected": -2.078367233276367, |
|
"logps/chosen": -33.917694091796875, |
|
"logps/rejected": -36.547218322753906, |
|
"loss": 0.4901, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.06371410191059113, |
|
"rewards/margins": 0.04422418028116226, |
|
"rewards/rejected": 0.019489921629428864, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.828760511501322e-06, |
|
"logits/chosen": -1.9349607229232788, |
|
"logits/rejected": -1.9380786418914795, |
|
"logps/chosen": -34.223785400390625, |
|
"logps/rejected": -34.53069305419922, |
|
"loss": 0.4713, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.1836203634738922, |
|
"rewards/margins": 0.12807974219322205, |
|
"rewards/rejected": 0.05554063245654106, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.7367166013034295e-06, |
|
"logits/chosen": -1.9439691305160522, |
|
"logits/rejected": -1.9484784603118896, |
|
"logps/chosen": -32.27050018310547, |
|
"logps/rejected": -32.26476287841797, |
|
"loss": 0.4779, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.1772255003452301, |
|
"rewards/margins": 0.09250012785196304, |
|
"rewards/rejected": 0.08472537249326706, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.626245458345211e-06, |
|
"logits/chosen": -2.0411603450775146, |
|
"logits/rejected": -2.039163112640381, |
|
"logps/chosen": -31.98573875427246, |
|
"logps/rejected": -31.193227767944336, |
|
"loss": 0.4639, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.22958631813526154, |
|
"rewards/margins": 0.16390272974967957, |
|
"rewards/rejected": 0.06568360328674316, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.235391855239868, |
|
"eval_logits/rejected": -2.2305493354797363, |
|
"eval_logps/chosen": -33.869815826416016, |
|
"eval_logps/rejected": -37.382774353027344, |
|
"eval_loss": 0.4939241409301758, |
|
"eval_rewards/accuracies": 0.5627076625823975, |
|
"eval_rewards/chosen": 0.1482628434896469, |
|
"eval_rewards/margins": 0.02780282311141491, |
|
"eval_rewards/rejected": 0.12046003341674805, |
|
"eval_runtime": 145.9747, |
|
"eval_samples_per_second": 2.35, |
|
"eval_steps_per_second": 0.295, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.498257201263691e-06, |
|
"logits/chosen": -1.997287392616272, |
|
"logits/rejected": -1.9949369430541992, |
|
"logps/chosen": -32.96843719482422, |
|
"logps/rejected": -33.866310119628906, |
|
"loss": 0.4739, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.24784216284751892, |
|
"rewards/margins": 0.10108550637960434, |
|
"rewards/rejected": 0.14675670862197876, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.353806263777678e-06, |
|
"logits/chosen": -2.008091688156128, |
|
"logits/rejected": -1.9997599124908447, |
|
"logps/chosen": -32.20352554321289, |
|
"logps/rejected": -31.995223999023438, |
|
"loss": 0.485, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.2172430008649826, |
|
"rewards/margins": 0.06759083271026611, |
|
"rewards/rejected": 0.1496521681547165, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.1940827077152755e-06, |
|
"logits/chosen": -2.035614490509033, |
|
"logits/rejected": -2.027682304382324, |
|
"logps/chosen": -30.1588077545166, |
|
"logps/rejected": -31.886260986328125, |
|
"loss": 0.4717, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.2852162718772888, |
|
"rewards/margins": 0.1351451873779297, |
|
"rewards/rejected": 0.15007111430168152, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.0204024186666215e-06, |
|
"logits/chosen": -1.965490698814392, |
|
"logits/rejected": -1.9756921529769897, |
|
"logps/chosen": -31.065088272094727, |
|
"logps/rejected": -32.42934036254883, |
|
"loss": 0.4482, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.3191176950931549, |
|
"rewards/margins": 0.22413134574890137, |
|
"rewards/rejected": 0.09498633444309235, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.834196265035119e-06, |
|
"logits/chosen": -1.8782259225845337, |
|
"logits/rejected": -1.8793823719024658, |
|
"logps/chosen": -33.68832778930664, |
|
"logps/rejected": -34.58278274536133, |
|
"loss": 0.4367, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.45392999053001404, |
|
"rewards/margins": 0.2969031035900116, |
|
"rewards/rejected": 0.15702682733535767, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.636998309800573e-06, |
|
"logits/chosen": -1.9295704364776611, |
|
"logits/rejected": -1.9261808395385742, |
|
"logps/chosen": -35.74212646484375, |
|
"logps/rejected": -32.51028060913086, |
|
"loss": 0.4538, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.38659390807151794, |
|
"rewards/margins": 0.1953679323196411, |
|
"rewards/rejected": 0.1912260353565216, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4304331721118078e-06, |
|
"logits/chosen": -2.031176805496216, |
|
"logits/rejected": -2.023855686187744, |
|
"logps/chosen": -33.24225616455078, |
|
"logps/rejected": -31.193195343017578, |
|
"loss": 0.42, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.5000473260879517, |
|
"rewards/margins": 0.3572581112384796, |
|
"rewards/rejected": 0.1427893042564392, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 3.2162026428305436e-06, |
|
"logits/chosen": -2.038222074508667, |
|
"logits/rejected": -2.0434067249298096, |
|
"logps/chosen": -31.95560646057129, |
|
"logps/rejected": -32.17836380004883, |
|
"loss": 0.4404, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.5302629470825195, |
|
"rewards/margins": 0.2541634440422058, |
|
"rewards/rejected": 0.2760995924472809, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 2.996071664294641e-06, |
|
"logits/chosen": -2.0387401580810547, |
|
"logits/rejected": -2.036006450653076, |
|
"logps/chosen": -31.0674991607666, |
|
"logps/rejected": -31.083877563476562, |
|
"loss": 0.4607, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.3793107569217682, |
|
"rewards/margins": 0.16841106116771698, |
|
"rewards/rejected": 0.21089968085289001, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 2.7718537898066833e-06, |
|
"logits/chosen": -1.9085681438446045, |
|
"logits/rejected": -1.9132543802261353, |
|
"logps/chosen": -31.083459854125977, |
|
"logps/rejected": -32.602638244628906, |
|
"loss": 0.4308, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.4705420434474945, |
|
"rewards/margins": 0.301074743270874, |
|
"rewards/rejected": 0.16946731507778168, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.2338287830352783, |
|
"eval_logits/rejected": -2.229020357131958, |
|
"eval_logps/chosen": -33.7449951171875, |
|
"eval_logps/rejected": -37.27743911743164, |
|
"eval_loss": 0.48942965269088745, |
|
"eval_rewards/accuracies": 0.5544019937515259, |
|
"eval_rewards/chosen": 0.26059985160827637, |
|
"eval_rewards/margins": 0.0453372597694397, |
|
"eval_rewards/rejected": 0.21526260673999786, |
|
"eval_runtime": 145.8953, |
|
"eval_samples_per_second": 2.351, |
|
"eval_steps_per_second": 0.295, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.5453962426402006e-06, |
|
"logits/chosen": -2.0216596126556396, |
|
"logits/rejected": -2.032275915145874, |
|
"logps/chosen": -31.500268936157227, |
|
"logps/rejected": -33.663352966308594, |
|
"loss": 0.4458, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.42393389344215393, |
|
"rewards/margins": 0.2445230931043625, |
|
"rewards/rejected": 0.17941072583198547, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.3185646976551794e-06, |
|
"logits/chosen": -1.9136396646499634, |
|
"logits/rejected": -1.928344964981079, |
|
"logps/chosen": -29.588964462280273, |
|
"logps/rejected": -31.396224975585938, |
|
"loss": 0.4269, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.4703185558319092, |
|
"rewards/margins": 0.3245617151260376, |
|
"rewards/rejected": 0.1457568258047104, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2.0932279108998323e-06, |
|
"logits/chosen": -1.970298171043396, |
|
"logits/rejected": -1.974283218383789, |
|
"logps/chosen": -32.81959915161133, |
|
"logps/rejected": -31.408565521240234, |
|
"loss": 0.4109, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.5380831956863403, |
|
"rewards/margins": 0.4114208221435547, |
|
"rewards/rejected": 0.12666237354278564, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.8712423238279358e-06, |
|
"logits/chosen": -1.9695065021514893, |
|
"logits/rejected": -1.9477574825286865, |
|
"logps/chosen": -33.58247756958008, |
|
"logps/rejected": -34.828121185302734, |
|
"loss": 0.4129, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.5141419172286987, |
|
"rewards/margins": 0.403735876083374, |
|
"rewards/rejected": 0.11040612310171127, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.6544367689701824e-06, |
|
"logits/chosen": -2.0098202228546143, |
|
"logits/rejected": -2.0065340995788574, |
|
"logps/chosen": -32.43529510498047, |
|
"logps/rejected": -35.97461700439453, |
|
"loss": 0.4514, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.44079580903053284, |
|
"rewards/margins": 0.2155180424451828, |
|
"rewards/rejected": 0.22527781128883362, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.4445974030621963e-06, |
|
"logits/chosen": -1.8770506381988525, |
|
"logits/rejected": -1.8746120929718018, |
|
"logps/chosen": -33.7199821472168, |
|
"logps/rejected": -35.28092575073242, |
|
"loss": 0.4498, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.42157459259033203, |
|
"rewards/margins": 0.2202514111995697, |
|
"rewards/rejected": 0.2013232260942459, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.243452991757889e-06, |
|
"logits/chosen": -1.8618510961532593, |
|
"logits/rejected": -1.8593294620513916, |
|
"logps/chosen": -33.92017364501953, |
|
"logps/rejected": -31.6002197265625, |
|
"loss": 0.4397, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": 0.42968273162841797, |
|
"rewards/margins": 0.27568089962005615, |
|
"rewards/rejected": 0.15400180220603943, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0526606671603523e-06, |
|
"logits/chosen": -1.9657011032104492, |
|
"logits/rejected": -1.9552650451660156, |
|
"logps/chosen": -34.72232437133789, |
|
"logps/rejected": -31.632369995117188, |
|
"loss": 0.4114, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.5708868503570557, |
|
"rewards/margins": 0.3831265866756439, |
|
"rewards/rejected": 0.18776027858257294, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.737922755071455e-07, |
|
"logits/chosen": -2.0614376068115234, |
|
"logits/rejected": -2.046600341796875, |
|
"logps/chosen": -30.400625228881836, |
|
"logps/rejected": -32.34136199951172, |
|
"loss": 0.456, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.4711507260799408, |
|
"rewards/margins": 0.19172403216362, |
|
"rewards/rejected": 0.2794266939163208, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 7.08321427484816e-07, |
|
"logits/chosen": -1.9332094192504883, |
|
"logits/rejected": -1.9307467937469482, |
|
"logps/chosen": -32.10976028442383, |
|
"logps/rejected": -30.661523818969727, |
|
"loss": 0.374, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.7356175184249878, |
|
"rewards/margins": 0.5958597660064697, |
|
"rewards/rejected": 0.13975778222084045, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.2307660579681396, |
|
"eval_logits/rejected": -2.22594952583313, |
|
"eval_logps/chosen": -33.74896240234375, |
|
"eval_logps/rejected": -37.275413513183594, |
|
"eval_loss": 0.49038100242614746, |
|
"eval_rewards/accuracies": 0.5220099687576294, |
|
"eval_rewards/chosen": 0.2570302486419678, |
|
"eval_rewards/margins": 0.039943769574165344, |
|
"eval_rewards/rejected": 0.21708647906780243, |
|
"eval_runtime": 145.8077, |
|
"eval_samples_per_second": 2.352, |
|
"eval_steps_per_second": 0.295, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.576113578589035e-07, |
|
"logits/chosen": -1.9164836406707764, |
|
"logits/rejected": -1.913336992263794, |
|
"logps/chosen": -31.014041900634766, |
|
"logps/rejected": -33.548377990722656, |
|
"loss": 0.4218, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.5303782224655151, |
|
"rewards/margins": 0.3559793531894684, |
|
"rewards/rejected": 0.1743989735841751, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.229036944380913e-07, |
|
"logits/chosen": -1.967760682106018, |
|
"logits/rejected": -1.955615758895874, |
|
"logps/chosen": -34.05602264404297, |
|
"logps/rejected": -33.42683410644531, |
|
"loss": 0.4147, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.48604661226272583, |
|
"rewards/margins": 0.385240375995636, |
|
"rewards/rejected": 0.10080619156360626, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.053082288996112e-07, |
|
"logits/chosen": -2.003138780593872, |
|
"logits/rejected": -2.001786470413208, |
|
"logps/chosen": -32.86919403076172, |
|
"logps/rejected": -32.247493743896484, |
|
"loss": 0.4259, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.5549365878105164, |
|
"rewards/margins": 0.3460560441017151, |
|
"rewards/rejected": 0.20888061821460724, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.0579377374915805e-07, |
|
"logits/chosen": -2.0895984172821045, |
|
"logits/rejected": -2.073963165283203, |
|
"logps/chosen": -33.46659469604492, |
|
"logps/rejected": -32.82307815551758, |
|
"loss": 0.4171, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.6415189504623413, |
|
"rewards/margins": 0.37605759501457214, |
|
"rewards/rejected": 0.26546135544776917, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.2518018074041684e-07, |
|
"logits/chosen": -1.9623206853866577, |
|
"logits/rejected": -1.9614824056625366, |
|
"logps/chosen": -32.549312591552734, |
|
"logps/rejected": -32.24496078491211, |
|
"loss": 0.4, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.6595046520233154, |
|
"rewards/margins": 0.46133819222450256, |
|
"rewards/rejected": 0.19816650450229645, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 6.41315865106129e-08, |
|
"logits/chosen": -1.9189163446426392, |
|
"logits/rejected": -1.9292027950286865, |
|
"logps/chosen": -31.60186767578125, |
|
"logps/rejected": -34.987525939941406, |
|
"loss": 0.4401, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.5425392985343933, |
|
"rewards/margins": 0.27143171429634094, |
|
"rewards/rejected": 0.27110758423805237, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.3150941078050325e-08, |
|
"logits/chosen": -2.0577945709228516, |
|
"logits/rejected": -2.051274061203003, |
|
"logps/chosen": -33.05121612548828, |
|
"logps/rejected": -28.990320205688477, |
|
"loss": 0.4233, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.5406805276870728, |
|
"rewards/margins": 0.3281847834587097, |
|
"rewards/rejected": 0.21249575912952423, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.575864278703266e-09, |
|
"logits/chosen": -1.917284607887268, |
|
"logits/rejected": -1.9194421768188477, |
|
"logps/chosen": -33.59749984741211, |
|
"logps/rejected": -30.708057403564453, |
|
"loss": 0.4061, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.5773938298225403, |
|
"rewards/margins": 0.429278701543808, |
|
"rewards/rejected": 0.14811506867408752, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 385, |
|
"total_flos": 0.0, |
|
"train_loss": 0.447351616079157, |
|
"train_runtime": 3253.4458, |
|
"train_samples_per_second": 0.946, |
|
"train_steps_per_second": 0.118 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 385, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|