|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 385, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 39.5, |
|
"learning_rate": 1.282051282051282e-07, |
|
"logits/chosen": -2.7358343601226807, |
|
"logits/rejected": -2.7480404376983643, |
|
"logps/chosen": -27.35565757751465, |
|
"logps/rejected": -21.06114387512207, |
|
"loss": 1.0, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 67.5, |
|
"learning_rate": 1.282051282051282e-06, |
|
"logits/chosen": -3.0102524757385254, |
|
"logits/rejected": -2.9992215633392334, |
|
"logps/chosen": -33.198204040527344, |
|
"logps/rejected": -31.97161865234375, |
|
"loss": 1.0048, |
|
"rewards/accuracies": 0.3888888955116272, |
|
"rewards/chosen": -0.008629633113741875, |
|
"rewards/margins": -0.00477217324078083, |
|
"rewards/rejected": -0.0038574603386223316, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 45.5, |
|
"learning_rate": 2.564102564102564e-06, |
|
"logits/chosen": -2.8995842933654785, |
|
"logits/rejected": -2.8944945335388184, |
|
"logps/chosen": -32.47186279296875, |
|
"logps/rejected": -28.95808982849121, |
|
"loss": 1.0103, |
|
"rewards/accuracies": 0.44999998807907104, |
|
"rewards/chosen": 0.0016174853080883622, |
|
"rewards/margins": -0.01026993989944458, |
|
"rewards/rejected": 0.011887425556778908, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 44.25, |
|
"learning_rate": 3.846153846153847e-06, |
|
"logits/chosen": -3.0970377922058105, |
|
"logits/rejected": -3.1085267066955566, |
|
"logps/chosen": -32.78118133544922, |
|
"logps/rejected": -30.125417709350586, |
|
"loss": 0.9716, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.06375427544116974, |
|
"rewards/margins": 0.02843797765672207, |
|
"rewards/rejected": 0.03531629964709282, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 36.0, |
|
"learning_rate": 4.999896948438434e-06, |
|
"logits/chosen": -2.8625752925872803, |
|
"logits/rejected": -2.853350877761841, |
|
"logps/chosen": -31.52760887145996, |
|
"logps/rejected": -32.38980484008789, |
|
"loss": 0.8635, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.15135782957077026, |
|
"rewards/margins": 0.16753093898296356, |
|
"rewards/rejected": -0.016173111274838448, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 28.0, |
|
"learning_rate": 4.987541037542187e-06, |
|
"logits/chosen": -2.880800724029541, |
|
"logits/rejected": -2.8784618377685547, |
|
"logps/chosen": -29.383716583251953, |
|
"logps/rejected": -30.082469940185547, |
|
"loss": 0.8801, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.1782219111919403, |
|
"rewards/margins": 0.17498424649238586, |
|
"rewards/rejected": 0.00323770334944129, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 41.5, |
|
"learning_rate": 4.954691471941119e-06, |
|
"logits/chosen": -2.9095096588134766, |
|
"logits/rejected": -2.9110829830169678, |
|
"logps/chosen": -29.854755401611328, |
|
"logps/rejected": -28.0576114654541, |
|
"loss": 0.8396, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.13750770688056946, |
|
"rewards/margins": 0.18847230076789856, |
|
"rewards/rejected": -0.0509646013379097, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 54.75, |
|
"learning_rate": 4.901618883413549e-06, |
|
"logits/chosen": -2.9918501377105713, |
|
"logits/rejected": -2.998004913330078, |
|
"logps/chosen": -29.19577407836914, |
|
"logps/rejected": -31.001514434814453, |
|
"loss": 0.9064, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.042597077786922455, |
|
"rewards/margins": 0.0974539965391159, |
|
"rewards/rejected": -0.05485691875219345, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 47.25, |
|
"learning_rate": 4.828760511501322e-06, |
|
"logits/chosen": -2.8070733547210693, |
|
"logits/rejected": -2.8222880363464355, |
|
"logps/chosen": -29.365554809570312, |
|
"logps/rejected": -29.91384506225586, |
|
"loss": 0.8182, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.08758720755577087, |
|
"rewards/margins": 0.20736832916736603, |
|
"rewards/rejected": -0.11978113651275635, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 35.25, |
|
"learning_rate": 4.7367166013034295e-06, |
|
"logits/chosen": -2.8982958793640137, |
|
"logits/rejected": -2.880619525909424, |
|
"logps/chosen": -32.76674270629883, |
|
"logps/rejected": -30.40105628967285, |
|
"loss": 0.8554, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": 0.03484407439827919, |
|
"rewards/margins": 0.2245481014251709, |
|
"rewards/rejected": -0.1897040456533432, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 34.5, |
|
"learning_rate": 4.626245458345211e-06, |
|
"logits/chosen": -3.005671501159668, |
|
"logits/rejected": -3.0055477619171143, |
|
"logps/chosen": -31.862071990966797, |
|
"logps/rejected": -30.99982261657715, |
|
"loss": 0.841, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.09740196168422699, |
|
"rewards/margins": 0.18922677636146545, |
|
"rewards/rejected": -0.09182481467723846, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_logits/chosen": -2.812790870666504, |
|
"eval_logits/rejected": -2.8098013401031494, |
|
"eval_logps/chosen": -31.30615234375, |
|
"eval_logps/rejected": -34.89762496948242, |
|
"eval_loss": 0.9188198447227478, |
|
"eval_rewards/accuracies": 0.610049843788147, |
|
"eval_rewards/chosen": -0.01184946671128273, |
|
"eval_rewards/margins": 0.08788519352674484, |
|
"eval_rewards/rejected": -0.09973466396331787, |
|
"eval_runtime": 113.3423, |
|
"eval_samples_per_second": 3.026, |
|
"eval_steps_per_second": 0.379, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 52.75, |
|
"learning_rate": 4.498257201263691e-06, |
|
"logits/chosen": -2.962007999420166, |
|
"logits/rejected": -2.937967538833618, |
|
"logps/chosen": -31.860488891601562, |
|
"logps/rejected": -31.523914337158203, |
|
"loss": 0.724, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.17815347015857697, |
|
"rewards/margins": 0.3606840968132019, |
|
"rewards/rejected": -0.18253062665462494, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 44.5, |
|
"learning_rate": 4.353806263777678e-06, |
|
"logits/chosen": -3.046175241470337, |
|
"logits/rejected": -3.074599027633667, |
|
"logps/chosen": -28.74391746520996, |
|
"logps/rejected": -34.43209457397461, |
|
"loss": 0.7306, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.2099936455488205, |
|
"rewards/margins": 0.3522658348083496, |
|
"rewards/rejected": -0.14227214455604553, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 33.75, |
|
"learning_rate": 4.1940827077152755e-06, |
|
"logits/chosen": -2.750256061553955, |
|
"logits/rejected": -2.7464325428009033, |
|
"logps/chosen": -28.576923370361328, |
|
"logps/rejected": -30.405838012695312, |
|
"loss": 0.7558, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.19887787103652954, |
|
"rewards/margins": 0.3348928391933441, |
|
"rewards/rejected": -0.13601499795913696, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 29.625, |
|
"learning_rate": 4.0204024186666215e-06, |
|
"logits/chosen": -3.026742458343506, |
|
"logits/rejected": -3.02447247505188, |
|
"logps/chosen": -27.27823829650879, |
|
"logps/rejected": -32.02156066894531, |
|
"loss": 0.805, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.10399170964956284, |
|
"rewards/margins": 0.28638824820518494, |
|
"rewards/rejected": -0.1823965609073639, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 34.25, |
|
"learning_rate": 3.834196265035119e-06, |
|
"logits/chosen": -2.8219058513641357, |
|
"logits/rejected": -2.816573143005371, |
|
"logps/chosen": -27.304168701171875, |
|
"logps/rejected": -31.6512451171875, |
|
"loss": 0.6938, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.26066717505455017, |
|
"rewards/margins": 0.4744674563407898, |
|
"rewards/rejected": -0.21380028128623962, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 47.0, |
|
"learning_rate": 3.636998309800573e-06, |
|
"logits/chosen": -3.1381523609161377, |
|
"logits/rejected": -3.121229648590088, |
|
"logps/chosen": -31.75846290588379, |
|
"logps/rejected": -29.593923568725586, |
|
"loss": 0.5556, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.36662742495536804, |
|
"rewards/margins": 0.6737016439437866, |
|
"rewards/rejected": -0.30707424879074097, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 41.75, |
|
"learning_rate": 3.4304331721118078e-06, |
|
"logits/chosen": -2.9501750469207764, |
|
"logits/rejected": -2.957343578338623, |
|
"logps/chosen": -29.471771240234375, |
|
"logps/rejected": -31.817256927490234, |
|
"loss": 0.6244, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.21142145991325378, |
|
"rewards/margins": 0.570308268070221, |
|
"rewards/rejected": -0.35888683795928955, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 37.25, |
|
"learning_rate": 3.2162026428305436e-06, |
|
"logits/chosen": -2.802713394165039, |
|
"logits/rejected": -2.8004953861236572, |
|
"logps/chosen": -29.250396728515625, |
|
"logps/rejected": -30.246570587158203, |
|
"loss": 0.6894, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.20972609519958496, |
|
"rewards/margins": 0.44892430305480957, |
|
"rewards/rejected": -0.2391982078552246, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 26.25, |
|
"learning_rate": 2.996071664294641e-06, |
|
"logits/chosen": -2.9141011238098145, |
|
"logits/rejected": -2.9099700450897217, |
|
"logps/chosen": -29.81064796447754, |
|
"logps/rejected": -28.773019790649414, |
|
"loss": 0.7547, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.18803250789642334, |
|
"rewards/margins": 0.3957784175872803, |
|
"rewards/rejected": -0.20774583518505096, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 27.625, |
|
"learning_rate": 2.7718537898066833e-06, |
|
"logits/chosen": -2.9808688163757324, |
|
"logits/rejected": -2.9687676429748535, |
|
"logps/chosen": -32.98746871948242, |
|
"logps/rejected": -30.666748046875, |
|
"loss": 0.7163, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": 0.3557746112346649, |
|
"rewards/margins": 0.4721054136753082, |
|
"rewards/rejected": -0.11633072048425674, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_logits/chosen": -2.819563388824463, |
|
"eval_logits/rejected": -2.817270278930664, |
|
"eval_logps/chosen": -31.472963333129883, |
|
"eval_logps/rejected": -35.05440902709961, |
|
"eval_loss": 0.9220677018165588, |
|
"eval_rewards/accuracies": 0.5514950156211853, |
|
"eval_rewards/chosen": -0.09525591880083084, |
|
"eval_rewards/margins": 0.0828702449798584, |
|
"eval_rewards/rejected": -0.17812614142894745, |
|
"eval_runtime": 113.0963, |
|
"eval_samples_per_second": 3.033, |
|
"eval_steps_per_second": 0.38, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 37.0, |
|
"learning_rate": 2.5453962426402006e-06, |
|
"logits/chosen": -2.9180774688720703, |
|
"logits/rejected": -2.91807222366333, |
|
"logps/chosen": -32.39319610595703, |
|
"logps/rejected": -34.32632064819336, |
|
"loss": 0.6144, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.3201075494289398, |
|
"rewards/margins": 0.552533745765686, |
|
"rewards/rejected": -0.23242616653442383, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 30.25, |
|
"learning_rate": 2.3185646976551794e-06, |
|
"logits/chosen": -2.8982133865356445, |
|
"logits/rejected": -2.9134175777435303, |
|
"logps/chosen": -29.346981048583984, |
|
"logps/rejected": -28.9754581451416, |
|
"loss": 0.6305, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.415529727935791, |
|
"rewards/margins": 0.6391232013702393, |
|
"rewards/rejected": -0.22359342873096466, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 35.0, |
|
"learning_rate": 2.0932279108998323e-06, |
|
"logits/chosen": -2.9441702365875244, |
|
"logits/rejected": -2.948596477508545, |
|
"logps/chosen": -30.654144287109375, |
|
"logps/rejected": -32.061561584472656, |
|
"loss": 0.7505, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": 0.2488628625869751, |
|
"rewards/margins": 0.4005666673183441, |
|
"rewards/rejected": -0.15170380473136902, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 41.5, |
|
"learning_rate": 1.8712423238279358e-06, |
|
"logits/chosen": -2.9988656044006348, |
|
"logits/rejected": -3.006716251373291, |
|
"logps/chosen": -30.050405502319336, |
|
"logps/rejected": -30.539688110351562, |
|
"loss": 0.5427, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.42725998163223267, |
|
"rewards/margins": 0.5794823169708252, |
|
"rewards/rejected": -0.1522223949432373, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 39.75, |
|
"learning_rate": 1.6544367689701824e-06, |
|
"logits/chosen": -2.8314945697784424, |
|
"logits/rejected": -2.8215582370758057, |
|
"logps/chosen": -26.297893524169922, |
|
"logps/rejected": -29.683481216430664, |
|
"loss": 0.737, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": 0.32816022634506226, |
|
"rewards/margins": 0.4040789008140564, |
|
"rewards/rejected": -0.07591867446899414, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 24.875, |
|
"learning_rate": 1.4445974030621963e-06, |
|
"logits/chosen": -2.8124470710754395, |
|
"logits/rejected": -2.83256196975708, |
|
"logps/chosen": -28.941558837890625, |
|
"logps/rejected": -34.62192916870117, |
|
"loss": 0.5683, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.44420257210731506, |
|
"rewards/margins": 0.6681241989135742, |
|
"rewards/rejected": -0.22392161190509796, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 44.25, |
|
"learning_rate": 1.243452991757889e-06, |
|
"logits/chosen": -2.954413890838623, |
|
"logits/rejected": -2.9607253074645996, |
|
"logps/chosen": -30.094287872314453, |
|
"logps/rejected": -30.396358489990234, |
|
"loss": 0.5618, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.3435356020927429, |
|
"rewards/margins": 0.6090848445892334, |
|
"rewards/rejected": -0.2655491232872009, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 30.75, |
|
"learning_rate": 1.0526606671603523e-06, |
|
"logits/chosen": -2.9698939323425293, |
|
"logits/rejected": -2.956036329269409, |
|
"logps/chosen": -30.114521026611328, |
|
"logps/rejected": -28.823684692382812, |
|
"loss": 0.7337, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.30358150601387024, |
|
"rewards/margins": 0.399749219417572, |
|
"rewards/rejected": -0.09616778045892715, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 29.375, |
|
"learning_rate": 8.737922755071455e-07, |
|
"logits/chosen": -2.8976500034332275, |
|
"logits/rejected": -2.8795270919799805, |
|
"logps/chosen": -31.536123275756836, |
|
"logps/rejected": -31.159521102905273, |
|
"loss": 0.4628, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.5500896573066711, |
|
"rewards/margins": 0.8681961297988892, |
|
"rewards/rejected": -0.31810641288757324, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 30.25, |
|
"learning_rate": 7.08321427484816e-07, |
|
"logits/chosen": -2.89839506149292, |
|
"logits/rejected": -2.8952107429504395, |
|
"logps/chosen": -31.317790985107422, |
|
"logps/rejected": -27.9440860748291, |
|
"loss": 0.6439, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": 0.45242971181869507, |
|
"rewards/margins": 0.6226793527603149, |
|
"rewards/rejected": -0.17024962604045868, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_logits/chosen": -2.8154501914978027, |
|
"eval_logits/rejected": -2.8133084774017334, |
|
"eval_logps/chosen": -31.455341339111328, |
|
"eval_logps/rejected": -35.035438537597656, |
|
"eval_loss": 0.9215981364250183, |
|
"eval_rewards/accuracies": 0.5573089718818665, |
|
"eval_rewards/chosen": -0.0864456370472908, |
|
"eval_rewards/margins": 0.08219624310731888, |
|
"eval_rewards/rejected": -0.16864188015460968, |
|
"eval_runtime": 113.1056, |
|
"eval_samples_per_second": 3.033, |
|
"eval_steps_per_second": 0.38, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 39.5, |
|
"learning_rate": 5.576113578589035e-07, |
|
"logits/chosen": -2.779205560684204, |
|
"logits/rejected": -2.7966835498809814, |
|
"logps/chosen": -28.479604721069336, |
|
"logps/rejected": -31.288341522216797, |
|
"loss": 0.5317, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 0.37490805983543396, |
|
"rewards/margins": 0.6530753374099731, |
|
"rewards/rejected": -0.2781672477722168, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 32.25, |
|
"learning_rate": 4.229036944380913e-07, |
|
"logits/chosen": -3.0243513584136963, |
|
"logits/rejected": -3.0100343227386475, |
|
"logps/chosen": -29.37557601928711, |
|
"logps/rejected": -28.844478607177734, |
|
"loss": 0.5359, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.41800540685653687, |
|
"rewards/margins": 0.6808558106422424, |
|
"rewards/rejected": -0.262850284576416, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 22.875, |
|
"learning_rate": 3.053082288996112e-07, |
|
"logits/chosen": -2.9381539821624756, |
|
"logits/rejected": -2.920642852783203, |
|
"logps/chosen": -27.252010345458984, |
|
"logps/rejected": -31.086780548095703, |
|
"loss": 0.4505, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.5550299882888794, |
|
"rewards/margins": 0.9025982022285461, |
|
"rewards/rejected": -0.3475683331489563, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 42.75, |
|
"learning_rate": 2.0579377374915805e-07, |
|
"logits/chosen": -3.1556878089904785, |
|
"logits/rejected": -3.1616902351379395, |
|
"logps/chosen": -30.897449493408203, |
|
"logps/rejected": -33.362003326416016, |
|
"loss": 0.532, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.3708776533603668, |
|
"rewards/margins": 0.7038747072219849, |
|
"rewards/rejected": -0.33299699425697327, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 36.25, |
|
"learning_rate": 1.2518018074041684e-07, |
|
"logits/chosen": -3.032553195953369, |
|
"logits/rejected": -3.0359413623809814, |
|
"logps/chosen": -29.842823028564453, |
|
"logps/rejected": -31.924610137939453, |
|
"loss": 0.5238, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.4820283055305481, |
|
"rewards/margins": 0.6876784563064575, |
|
"rewards/rejected": -0.2056502103805542, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 36.25, |
|
"learning_rate": 6.41315865106129e-08, |
|
"logits/chosen": -2.871373414993286, |
|
"logits/rejected": -2.8733484745025635, |
|
"logps/chosen": -27.545650482177734, |
|
"logps/rejected": -30.173965454101562, |
|
"loss": 0.5016, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.5520386099815369, |
|
"rewards/margins": 0.7235507965087891, |
|
"rewards/rejected": -0.1715122014284134, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 50.75, |
|
"learning_rate": 2.3150941078050325e-08, |
|
"logits/chosen": -2.948784589767456, |
|
"logits/rejected": -2.9460253715515137, |
|
"logps/chosen": -29.852313995361328, |
|
"logps/rejected": -32.32441329956055, |
|
"loss": 0.5601, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.46716976165771484, |
|
"rewards/margins": 0.6653568148612976, |
|
"rewards/rejected": -0.19818702340126038, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 37.5, |
|
"learning_rate": 2.575864278703266e-09, |
|
"logits/chosen": -2.9041707515716553, |
|
"logits/rejected": -2.886718273162842, |
|
"logps/chosen": -28.160015106201172, |
|
"logps/rejected": -28.389995574951172, |
|
"loss": 0.5382, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.3925102651119232, |
|
"rewards/margins": 0.6426987648010254, |
|
"rewards/rejected": -0.25018855929374695, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 385, |
|
"total_flos": 0.0, |
|
"train_loss": 0.694122297732861, |
|
"train_runtime": 2717.9333, |
|
"train_samples_per_second": 1.133, |
|
"train_steps_per_second": 0.142 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 385, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|