File size: 6,423 Bytes
3e4dc6e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.971563981042654,
"eval_steps": 128,
"global_step": 104,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018957345971563982,
"grad_norm": 157.75602672892865,
"learning_rate": 4.545454545454545e-08,
"logits/chosen": 117.53560638427734,
"logits/rejected": 126.8960952758789,
"logps/chosen": -335.40118408203125,
"logps/rejected": -439.16552734375,
"loss": 0.8464,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.1895734597156398,
"grad_norm": 159.84694184422614,
"learning_rate": 4.545454545454545e-07,
"logits/chosen": 135.0513153076172,
"logits/rejected": 138.41073608398438,
"logps/chosen": -395.8233642578125,
"logps/rejected": -439.4460144042969,
"loss": 0.7992,
"rewards/accuracies": 0.4375,
"rewards/chosen": 0.008658057078719139,
"rewards/margins": 0.014583365991711617,
"rewards/rejected": -0.005925314500927925,
"step": 10
},
{
"epoch": 0.3791469194312796,
"grad_norm": 152.55623162543827,
"learning_rate": 4.885348141000122e-07,
"logits/chosen": 121.6577377319336,
"logits/rejected": 125.38299560546875,
"logps/chosen": -372.36334228515625,
"logps/rejected": -426.1581115722656,
"loss": 0.6773,
"rewards/accuracies": 0.59375,
"rewards/chosen": 0.0024319454096257687,
"rewards/margins": 0.3107669949531555,
"rewards/rejected": -0.30833500623703003,
"step": 20
},
{
"epoch": 0.5687203791469194,
"grad_norm": 119.76688287434446,
"learning_rate": 4.5025027361734613e-07,
"logits/chosen": 142.59500122070312,
"logits/rejected": 136.1927947998047,
"logps/chosen": -427.85626220703125,
"logps/rejected": -474.54498291015625,
"loss": 0.5982,
"rewards/accuracies": 0.71875,
"rewards/chosen": -1.7695486545562744,
"rewards/margins": 0.9575872421264648,
"rewards/rejected": -2.7271358966827393,
"step": 30
},
{
"epoch": 0.7582938388625592,
"grad_norm": 101.19518327182783,
"learning_rate": 3.893311157806091e-07,
"logits/chosen": 125.58316802978516,
"logits/rejected": 114.24357604980469,
"logps/chosen": -413.34722900390625,
"logps/rejected": -439.4224548339844,
"loss": 0.557,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -2.9576516151428223,
"rewards/margins": 1.1197086572647095,
"rewards/rejected": -4.077361106872559,
"step": 40
},
{
"epoch": 0.9478672985781991,
"grad_norm": 124.4654073535278,
"learning_rate": 3.126631330646801e-07,
"logits/chosen": 140.7157440185547,
"logits/rejected": 144.85781860351562,
"logps/chosen": -476.4051208496094,
"logps/rejected": -560.07568359375,
"loss": 0.5143,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -2.863062620162964,
"rewards/margins": 1.3244019746780396,
"rewards/rejected": -4.187464237213135,
"step": 50
},
{
"epoch": 1.1374407582938388,
"grad_norm": 60.3803202767405,
"learning_rate": 2.2891223348923882e-07,
"logits/chosen": 133.24928283691406,
"logits/rejected": 136.88035583496094,
"logps/chosen": -462.84130859375,
"logps/rejected": -546.5650024414062,
"loss": 0.3397,
"rewards/accuracies": 0.875,
"rewards/chosen": -3.2417819499969482,
"rewards/margins": 2.2681899070739746,
"rewards/rejected": -5.509971618652344,
"step": 60
},
{
"epoch": 1.3270142180094786,
"grad_norm": 48.59955026130948,
"learning_rate": 1.4754491880085317e-07,
"logits/chosen": 126.54573822021484,
"logits/rejected": 128.51889038085938,
"logps/chosen": -433.62042236328125,
"logps/rejected": -533.0380859375,
"loss": 0.2374,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -3.3238282203674316,
"rewards/margins": 2.6523845195770264,
"rewards/rejected": -5.976212978363037,
"step": 70
},
{
"epoch": 1.5165876777251186,
"grad_norm": 54.174748869518474,
"learning_rate": 7.775827023107834e-08,
"logits/chosen": 112.75434875488281,
"logits/rejected": 129.4608154296875,
"logps/chosen": -436.5595703125,
"logps/rejected": -553.3507080078125,
"loss": 0.2095,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -4.145426273345947,
"rewards/margins": 2.8602230548858643,
"rewards/rejected": -7.005648612976074,
"step": 80
},
{
"epoch": 1.7061611374407581,
"grad_norm": 54.737453014104844,
"learning_rate": 2.7440387297912122e-08,
"logits/chosen": 112.3515396118164,
"logits/rejected": 125.18294525146484,
"logps/chosen": -466.541259765625,
"logps/rejected": -578.0580444335938,
"loss": 0.2032,
"rewards/accuracies": 0.918749988079071,
"rewards/chosen": -4.005508899688721,
"rewards/margins": 3.0133039951324463,
"rewards/rejected": -7.018812656402588,
"step": 90
},
{
"epoch": 1.8957345971563981,
"grad_norm": 52.477271491484125,
"learning_rate": 2.27878296044029e-09,
"logits/chosen": 118.53382873535156,
"logits/rejected": 118.45182800292969,
"logps/chosen": -454.86248779296875,
"logps/rejected": -543.6682739257812,
"loss": 0.1998,
"rewards/accuracies": 0.9312499761581421,
"rewards/chosen": -3.6443698406219482,
"rewards/margins": 2.700843095779419,
"rewards/rejected": -6.345212936401367,
"step": 100
},
{
"epoch": 1.971563981042654,
"step": 104,
"total_flos": 0.0,
"train_loss": 0.4319001900462004,
"train_runtime": 2177.8844,
"train_samples_per_second": 6.199,
"train_steps_per_second": 0.048
}
],
"logging_steps": 10,
"max_steps": 104,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}
|