hugodk-sch commited on
Commit
4470de4
1 Parent(s): 93f15d0

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +3 -16
  4. train_results.json +3 -3
  5. trainer_state.json +374 -374
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.3797
24
- - Rewards/chosen: 0.0228
25
- - Rewards/rejected: 0.0164
26
- - Rewards/accuracies: 0.5336
27
- - Rewards/margins: 0.0064
28
- - Logps/rejected: -37.4984
29
- - Logps/chosen: -34.0092
30
- - Logits/rejected: -2.2334
31
- - Logits/chosen: -2.2383
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.3038 | 0.26 | 100 | 0.3512 | 0.0196 | 0.0046 | 0.5424 | 0.0150 | -37.5115 | -34.0128 | -2.2324 | -2.2372 |
67
- | 0.3157 | 0.52 | 200 | 0.3716 | 0.0148 | -0.0016 | 0.5245 | 0.0164 | -37.5184 | -34.0181 | -2.2322 | -2.2371 |
68
- | 0.2156 | 0.78 | 300 | 0.3845 | 0.0182 | 0.0177 | 0.4934 | 0.0005 | -37.4970 | -34.0143 | -2.2316 | -2.2364 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.9713
21
+ - Rewards/chosen: -0.0468
22
+ - Rewards/rejected: -0.0779
23
+ - Rewards/accuracies: 0.5282
24
+ - Rewards/margins: 0.0310
25
+ - Logps/rejected: -37.6032
26
+ - Logps/chosen: -34.0866
27
+ - Logits/rejected: -2.2201
28
+ - Logits/chosen: -2.2249
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.8913 | 0.26 | 100 | 0.9845 | -0.0055 | -0.0215 | 0.5195 | 0.0159 | -37.5405 | -34.0407 | -2.2273 | -2.2322 |
64
+ | 0.7293 | 0.52 | 200 | 0.9602 | -0.0172 | -0.0580 | 0.5714 | 0.0408 | -37.5811 | -34.0537 | -2.2238 | -2.2286 |
65
+ | 0.6144 | 0.78 | 300 | 0.9713 | -0.0468 | -0.0779 | 0.5282 | 0.0310 | -37.6032 | -34.0866 | -2.2201 | -2.2249 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26f5600378d488770611e839c766c54ea5801ea2b013f52735dae26844dc127e
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:561acdc4b0687e023339344ce10f08031f158883ab4007975eb3440d742cb9de
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.238292694091797,
4
- "eval_logits/rejected": -2.2334160804748535,
5
- "eval_logps/chosen": -34.009239196777344,
6
- "eval_logps/rejected": -37.498390197753906,
7
- "eval_loss": 0.3797042965888977,
8
- "eval_rewards/accuracies": 0.5336378812789917,
9
- "eval_rewards/chosen": 0.02277994342148304,
10
- "eval_rewards/margins": 0.006376237142831087,
11
- "eval_rewards/rejected": 0.01640370488166809,
12
- "eval_runtime": 145.6289,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.355,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.31703355428460356,
17
- "train_runtime": 3251.5033,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 0.947,
20
  "train_steps_per_second": 0.118
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.8125879040012112,
4
+ "train_runtime": 3249.3137,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.948,
7
  "train_steps_per_second": 0.118
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.31703355428460356,
4
- "train_runtime": 3251.5033,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.8125879040012112,
4
+ "train_runtime": 3249.3137,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.948,
7
  "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 0.3086,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,155 +25,155 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
- "logits/chosen": -1.8665881156921387,
29
- "logits/rejected": -1.8709055185317993,
30
- "logps/chosen": -36.99662399291992,
31
- "logps/rejected": -33.65571594238281,
32
- "loss": 0.3052,
33
- "rewards/accuracies": 0.5416666865348816,
34
- "rewards/chosen": 0.008980684913694859,
35
- "rewards/margins": 0.032059140503406525,
36
- "rewards/rejected": -0.02307845838367939,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
- "logits/chosen": -1.9977455139160156,
43
- "logits/rejected": -2.000382423400879,
44
- "logps/chosen": -29.642925262451172,
45
- "logps/rejected": -29.056737899780273,
46
- "loss": 0.3525,
47
- "rewards/accuracies": 0.42500001192092896,
48
- "rewards/chosen": -0.0006573178106918931,
49
- "rewards/margins": -0.00669272243976593,
50
- "rewards/rejected": 0.006035405211150646,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
- "logits/chosen": -1.9204607009887695,
57
- "logits/rejected": -1.9177772998809814,
58
- "logps/chosen": -31.42336654663086,
59
- "logps/rejected": -33.22785568237305,
60
- "loss": 0.3603,
61
- "rewards/accuracies": 0.48750001192092896,
62
- "rewards/chosen": -0.006546213291585445,
63
- "rewards/margins": 0.0019128695130348206,
64
- "rewards/rejected": -0.00845908559858799,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
- "logits/chosen": -2.0172362327575684,
71
- "logits/rejected": -2.008507251739502,
72
- "logps/chosen": -32.56964874267578,
73
- "logps/rejected": -32.50572967529297,
74
- "loss": 0.3558,
75
- "rewards/accuracies": 0.5,
76
- "rewards/chosen": 0.006439635064452887,
77
- "rewards/margins": 0.00019515231542754918,
78
- "rewards/rejected": 0.006244482938200235,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
- "logits/chosen": -1.8618619441986084,
85
- "logits/rejected": -1.8510783910751343,
86
- "logps/chosen": -33.56026077270508,
87
- "logps/rejected": -35.45254898071289,
88
- "loss": 0.3691,
89
- "rewards/accuracies": 0.4000000059604645,
90
- "rewards/chosen": -0.0025013976264744997,
91
- "rewards/margins": -0.002315213903784752,
92
- "rewards/rejected": -0.00018618404283188283,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
- "logits/chosen": -1.9393202066421509,
99
- "logits/rejected": -1.9412600994110107,
100
- "logps/chosen": -32.57838439941406,
101
- "logps/rejected": -33.215576171875,
102
- "loss": 0.3063,
103
- "rewards/accuracies": 0.612500011920929,
104
- "rewards/chosen": 0.01962057128548622,
105
- "rewards/margins": 0.05940054729580879,
106
- "rewards/rejected": -0.03977997973561287,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
- "logits/chosen": -2.0718436241149902,
113
- "logits/rejected": -2.0768017768859863,
114
- "logps/chosen": -33.97806167602539,
115
- "logps/rejected": -36.63082504272461,
116
- "loss": 0.4257,
117
- "rewards/accuracies": 0.5,
118
- "rewards/chosen": 0.00938049890100956,
119
- "rewards/margins": 0.06513925641775131,
120
- "rewards/rejected": -0.0557587556540966,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
- "logits/chosen": -1.9351739883422852,
127
- "logits/rejected": -1.9383188486099243,
128
- "logps/chosen": -34.33073043823242,
129
- "logps/rejected": -34.61904525756836,
130
- "loss": 0.2902,
131
- "rewards/accuracies": 0.625,
132
- "rewards/chosen": 0.08736880123615265,
133
- "rewards/margins": 0.11134655773639679,
134
- "rewards/rejected": -0.023977745324373245,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
- "logits/chosen": -1.944392204284668,
141
- "logits/rejected": -1.9489190578460693,
142
- "logps/chosen": -32.419586181640625,
143
- "logps/rejected": -32.3698844909668,
144
- "loss": 0.3611,
145
  "rewards/accuracies": 0.574999988079071,
146
- "rewards/chosen": 0.043051257729530334,
147
- "rewards/margins": 0.052930813282728195,
148
- "rewards/rejected": -0.009879561141133308,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
- "logits/chosen": -2.0419769287109375,
155
- "logits/rejected": -2.0399627685546875,
156
- "logps/chosen": -32.174407958984375,
157
- "logps/rejected": -31.26608657836914,
158
- "loss": 0.3038,
159
- "rewards/accuracies": 0.5874999761581421,
160
- "rewards/chosen": 0.059785228222608566,
161
- "rewards/margins": 0.05967242643237114,
162
- "rewards/rejected": 0.00011279433965682983,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.2372143268585205,
168
- "eval_logits/rejected": -2.2323503494262695,
169
- "eval_logps/chosen": -34.01276779174805,
170
- "eval_logps/rejected": -37.51152420043945,
171
- "eval_loss": 0.3511974811553955,
172
- "eval_rewards/accuracies": 0.5423588156700134,
173
- "eval_rewards/chosen": 0.01960929110646248,
174
- "eval_rewards/margins": 0.01502405758947134,
175
- "eval_rewards/rejected": 0.004585230257362127,
176
- "eval_runtime": 145.9032,
177
  "eval_samples_per_second": 2.351,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
@@ -181,434 +181,434 @@
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
- "logits/chosen": -1.998891830444336,
185
- "logits/rejected": -1.996492624282837,
186
- "logps/chosen": -33.14598083496094,
187
- "logps/rejected": -34.020729064941406,
188
- "loss": 0.4689,
189
- "rewards/accuracies": 0.6000000238418579,
190
- "rewards/chosen": 0.08805312216281891,
191
- "rewards/margins": 0.08027410507202148,
192
- "rewards/rejected": 0.007779018487781286,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
- "logits/chosen": -2.010932445526123,
199
- "logits/rejected": -2.0025696754455566,
200
- "logps/chosen": -32.37172317504883,
201
- "logps/rejected": -32.118797302246094,
202
- "loss": 0.4465,
203
  "rewards/accuracies": 0.574999988079071,
204
- "rewards/chosen": 0.06586603820323944,
205
- "rewards/margins": 0.02742874063551426,
206
- "rewards/rejected": 0.03843729570508003,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
- "logits/chosen": -2.0387539863586426,
213
- "logits/rejected": -2.030724287033081,
214
- "logps/chosen": -30.41655921936035,
215
- "logps/rejected": -32.060333251953125,
216
- "loss": 0.3844,
217
- "rewards/accuracies": 0.5375000238418579,
218
- "rewards/chosen": 0.05324209854006767,
219
- "rewards/margins": 0.05983499437570572,
220
- "rewards/rejected": -0.00659290561452508,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
- "logits/chosen": -1.967858076095581,
227
- "logits/rejected": -1.9781148433685303,
228
- "logps/chosen": -31.223413467407227,
229
- "logps/rejected": -32.55517578125,
230
- "loss": 0.3904,
231
- "rewards/accuracies": 0.6875,
232
- "rewards/chosen": 0.1766217201948166,
233
- "rewards/margins": 0.1948881596326828,
234
- "rewards/rejected": -0.01826643943786621,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
- "logits/chosen": -1.880910873413086,
241
- "logits/rejected": -1.8820507526397705,
242
- "logps/chosen": -34.01464080810547,
243
- "logps/rejected": -34.783546447753906,
244
- "loss": 0.3871,
245
- "rewards/accuracies": 0.612500011920929,
246
- "rewards/chosen": 0.160243421792984,
247
- "rewards/margins": 0.1839032918214798,
248
- "rewards/rejected": -0.023659853264689445,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
- "logits/chosen": -1.933895468711853,
255
- "logits/rejected": -1.9304730892181396,
256
- "logps/chosen": -36.02853775024414,
257
- "logps/rejected": -32.699058532714844,
258
- "loss": 0.2729,
259
  "rewards/accuracies": 0.637499988079071,
260
- "rewards/chosen": 0.1288261115550995,
261
- "rewards/margins": 0.10750452429056168,
262
- "rewards/rejected": 0.021321602165699005,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
- "logits/chosen": -2.0341715812683105,
269
- "logits/rejected": -2.0267820358276367,
270
- "logps/chosen": -33.55347442626953,
271
- "logps/rejected": -31.3526554107666,
272
- "loss": 0.294,
273
- "rewards/accuracies": 0.6625000238418579,
274
- "rewards/chosen": 0.21994857490062714,
275
- "rewards/margins": 0.22067300975322723,
276
- "rewards/rejected": -0.0007244400912895799,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
- "logits/chosen": -2.0400891304016113,
283
- "logits/rejected": -2.045360565185547,
284
- "logps/chosen": -32.370338439941406,
285
- "logps/rejected": -32.4719123840332,
286
- "loss": 0.2829,
287
- "rewards/accuracies": 0.6625000238418579,
288
- "rewards/chosen": 0.15700635313987732,
289
- "rewards/margins": 0.145101398229599,
290
- "rewards/rejected": 0.011904975399374962,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
- "logits/chosen": -2.041393280029297,
297
- "logits/rejected": -2.038623809814453,
298
- "logps/chosen": -31.328174591064453,
299
- "logps/rejected": -31.316492080688477,
300
- "loss": 0.3044,
301
- "rewards/accuracies": 0.699999988079071,
302
- "rewards/chosen": 0.14470075070858002,
303
- "rewards/margins": 0.1431477963924408,
304
- "rewards/rejected": 0.0015529401134699583,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
- "logits/chosen": -1.9122215509414673,
311
- "logits/rejected": -1.9168663024902344,
312
- "logps/chosen": -31.424020767211914,
313
- "logps/rejected": -32.784080505371094,
314
- "loss": 0.3157,
315
- "rewards/accuracies": 0.699999988079071,
316
- "rewards/chosen": 0.16403506696224213,
317
- "rewards/margins": 0.1578713059425354,
318
- "rewards/rejected": 0.0061637843027710915,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.237051010131836,
324
- "eval_logits/rejected": -2.2321863174438477,
325
- "eval_logps/chosen": -34.018070220947266,
326
- "eval_logps/rejected": -37.51838684082031,
327
- "eval_loss": 0.3716273605823517,
328
- "eval_rewards/accuracies": 0.5245016813278198,
329
- "eval_rewards/chosen": 0.014834923669695854,
330
- "eval_rewards/margins": 0.016425320878624916,
331
- "eval_rewards/rejected": -0.0015903981402516365,
332
- "eval_runtime": 145.5151,
333
  "eval_samples_per_second": 2.357,
334
- "eval_steps_per_second": 0.296,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
- "logits/chosen": -2.023789882659912,
341
- "logits/rejected": -2.034484386444092,
342
- "logps/chosen": -31.767370223999023,
343
- "logps/rejected": -33.890621185302734,
344
- "loss": 0.275,
345
  "rewards/accuracies": 0.675000011920929,
346
- "rewards/chosen": 0.18354059755802155,
347
- "rewards/margins": 0.20867136120796204,
348
- "rewards/rejected": -0.025130782276391983,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
- "logits/chosen": -1.916685700416565,
355
- "logits/rejected": -1.9314892292022705,
356
- "logps/chosen": -29.956628799438477,
357
- "logps/rejected": -31.564035415649414,
358
- "loss": 0.2958,
359
- "rewards/accuracies": 0.6625000238418579,
360
- "rewards/chosen": 0.13942097127437592,
361
- "rewards/margins": 0.14469322562217712,
362
- "rewards/rejected": -0.005272268317639828,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
- "logits/chosen": -1.9737945795059204,
369
- "logits/rejected": -1.9777710437774658,
370
- "logps/chosen": -33.19129180908203,
371
- "logps/rejected": -31.5566463470459,
372
- "loss": 0.287,
373
- "rewards/accuracies": 0.6625000238418579,
374
- "rewards/chosen": 0.20355579257011414,
375
- "rewards/margins": 0.21016716957092285,
376
- "rewards/rejected": -0.0066113718785345554,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
- "logits/chosen": -1.9743419885635376,
383
- "logits/rejected": -1.9523779153823853,
384
- "logps/chosen": -33.9401969909668,
385
- "logps/rejected": -35.008758544921875,
386
- "loss": 0.3185,
387
- "rewards/accuracies": 0.6625000238418579,
388
- "rewards/chosen": 0.1921955645084381,
389
- "rewards/margins": 0.24436470866203308,
390
- "rewards/rejected": -0.052169155329465866,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
- "logits/chosen": -2.0161709785461426,
397
- "logits/rejected": -2.0128414630889893,
398
- "logps/chosen": -32.74829864501953,
399
- "logps/rejected": -36.236392974853516,
400
- "loss": 0.2847,
401
- "rewards/accuracies": 0.7250000238418579,
402
- "rewards/chosen": 0.15909257531166077,
403
- "rewards/margins": 0.1694144755601883,
404
- "rewards/rejected": -0.010321905836462975,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
- "logits/chosen": -1.8847742080688477,
411
- "logits/rejected": -1.8823268413543701,
412
- "logps/chosen": -34.01182174682617,
413
- "logps/rejected": -35.481346130371094,
414
- "loss": 0.3073,
415
- "rewards/accuracies": 0.612500011920929,
416
- "rewards/chosen": 0.1589193046092987,
417
- "rewards/margins": 0.13797220587730408,
418
- "rewards/rejected": 0.020947108045220375,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
- "logits/chosen": -1.8693536520004272,
425
- "logits/rejected": -1.866838812828064,
426
- "logps/chosen": -34.20549392700195,
427
- "logps/rejected": -31.726673126220703,
428
- "loss": 0.2733,
429
- "rewards/accuracies": 0.6875,
430
- "rewards/chosen": 0.17289286851882935,
431
- "rewards/margins": 0.1327010840177536,
432
- "rewards/rejected": 0.040191780775785446,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
- "logits/chosen": -1.9730831384658813,
439
- "logits/rejected": -1.962480902671814,
440
- "logps/chosen": -35.06049728393555,
441
- "logps/rejected": -31.792781829833984,
442
- "loss": 0.2523,
443
- "rewards/accuracies": 0.75,
444
- "rewards/chosen": 0.2665289342403412,
445
- "rewards/margins": 0.22314274311065674,
446
- "rewards/rejected": 0.04338619112968445,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
- "logits/chosen": -2.0680813789367676,
453
- "logits/rejected": -2.053079128265381,
454
- "logps/chosen": -30.738479614257812,
455
- "logps/rejected": -32.61243438720703,
456
- "loss": 0.3194,
457
- "rewards/accuracies": 0.6000000238418579,
458
- "rewards/chosen": 0.16707859933376312,
459
- "rewards/margins": 0.13161785900592804,
460
- "rewards/rejected": 0.035460732877254486,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
- "logits/chosen": -1.9398882389068604,
467
- "logits/rejected": -1.9373395442962646,
468
- "logps/chosen": -32.6181640625,
469
- "logps/rejected": -30.843700408935547,
470
- "loss": 0.2156,
471
- "rewards/accuracies": 0.75,
472
- "rewards/chosen": 0.2780519425868988,
473
- "rewards/margins": 0.3022567331790924,
474
- "rewards/rejected": -0.024204757064580917,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.2364187240600586,
480
- "eval_logits/rejected": -2.2315518856048584,
481
- "eval_logps/chosen": -34.01428985595703,
482
- "eval_logps/rejected": -37.496952056884766,
483
- "eval_loss": 0.38450533151626587,
484
- "eval_rewards/accuracies": 0.49335551261901855,
485
- "eval_rewards/chosen": 0.018236981704831123,
486
- "eval_rewards/margins": 0.0005384809919632971,
487
- "eval_rewards/rejected": 0.01769850216805935,
488
- "eval_runtime": 145.7485,
489
- "eval_samples_per_second": 2.353,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
- "logits/chosen": -1.922579050064087,
497
- "logits/rejected": -1.9193273782730103,
498
- "logps/chosen": -31.345911026000977,
499
- "logps/rejected": -33.72126007080078,
500
- "loss": 0.2917,
501
- "rewards/accuracies": 0.699999988079071,
502
- "rewards/chosen": 0.23169513046741486,
503
- "rewards/margins": 0.21289470791816711,
504
- "rewards/rejected": 0.018800420686602592,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
- "logits/chosen": -1.9754743576049805,
511
- "logits/rejected": -1.9631853103637695,
512
- "logps/chosen": -34.408077239990234,
513
- "logps/rejected": -33.58232879638672,
514
- "loss": 0.2394,
515
- "rewards/accuracies": 0.675000011920929,
516
- "rewards/chosen": 0.1691979616880417,
517
- "rewards/margins": 0.20833876729011536,
518
- "rewards/rejected": -0.03914082050323486,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
- "logits/chosen": -2.0105607509613037,
525
- "logits/rejected": -2.009115219116211,
526
- "logps/chosen": -33.31591033935547,
527
- "logps/rejected": -32.47368621826172,
528
- "loss": 0.2769,
529
- "rewards/accuracies": 0.6499999761581421,
530
- "rewards/chosen": 0.15289874374866486,
531
- "rewards/margins": 0.14759239554405212,
532
- "rewards/rejected": 0.005306343547999859,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
- "logits/chosen": -2.096872091293335,
539
- "logits/rejected": -2.0811073780059814,
540
- "logps/chosen": -33.87510681152344,
541
- "logps/rejected": -33.06427764892578,
542
- "loss": 0.2792,
543
- "rewards/accuracies": 0.737500011920929,
544
- "rewards/chosen": 0.27385497093200684,
545
- "rewards/margins": 0.2254684418439865,
546
- "rewards/rejected": 0.04838654398918152,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
- "logits/chosen": -1.969496488571167,
553
- "logits/rejected": -1.9685735702514648,
554
- "logps/chosen": -32.98945999145508,
555
- "logps/rejected": -32.4643440246582,
556
- "loss": 0.2958,
557
- "rewards/accuracies": 0.737500011920929,
558
- "rewards/chosen": 0.26337358355522156,
559
- "rewards/margins": 0.26264840364456177,
560
- "rewards/rejected": 0.0007252089562825859,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
- "logits/chosen": -1.9255645275115967,
567
- "logits/rejected": -1.9359004497528076,
568
- "logps/chosen": -32.013362884521484,
569
- "logps/rejected": -35.26326370239258,
570
- "loss": 0.3425,
571
- "rewards/accuracies": 0.637499988079071,
572
- "rewards/chosen": 0.1721937507390976,
573
- "rewards/margins": 0.14924712479114532,
574
- "rewards/rejected": 0.022946633398532867,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
- "logits/chosen": -2.0643956661224365,
581
- "logits/rejected": -2.057886838912964,
582
- "logps/chosen": -33.48772430419922,
583
- "logps/rejected": -29.191638946533203,
584
- "loss": 0.298,
585
- "rewards/accuracies": 0.625,
586
- "rewards/chosen": 0.14782151579856873,
587
- "rewards/margins": 0.11651048809289932,
588
- "rewards/rejected": 0.03131101652979851,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
- "logits/chosen": -1.9235725402832031,
595
- "logits/rejected": -1.9257465600967407,
596
- "logps/chosen": -33.965919494628906,
597
- "logps/rejected": -30.839218139648438,
598
- "loss": 0.2616,
599
- "rewards/accuracies": 0.699999988079071,
600
- "rewards/chosen": 0.24581687152385712,
601
- "rewards/margins": 0.2157471626996994,
602
- "rewards/rejected": 0.03006969951093197,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.31703355428460356,
610
- "train_runtime": 3251.5033,
611
- "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 1.0,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.8667490482330322,
29
+ "logits/rejected": -1.8710733652114868,
30
+ "logps/chosen": -36.97007369995117,
31
+ "logps/rejected": -33.66944885253906,
32
+ "loss": 0.9317,
33
+ "rewards/accuracies": 0.5694444179534912,
34
+ "rewards/chosen": 0.03287407010793686,
35
+ "rewards/margins": 0.06830974668264389,
36
+ "rewards/rejected": -0.03543568402528763,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9981460571289062,
43
+ "logits/rejected": -2.000789165496826,
44
+ "logps/chosen": -29.641231536865234,
45
+ "logps/rejected": -29.06744384765625,
46
+ "loss": 0.9955,
47
+ "rewards/accuracies": 0.44999998807907104,
48
+ "rewards/chosen": 0.0008672710391692817,
49
+ "rewards/margins": 0.004467610269784927,
50
+ "rewards/rejected": -0.0036003391724079847,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.920600175857544,
57
+ "logits/rejected": -1.917925238609314,
58
+ "logps/chosen": -31.395061492919922,
59
+ "logps/rejected": -33.240909576416016,
60
+ "loss": 0.9609,
61
+ "rewards/accuracies": 0.6000000238418579,
62
+ "rewards/chosen": 0.01893135905265808,
63
+ "rewards/margins": 0.03913776949048042,
64
+ "rewards/rejected": -0.020206410437822342,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.017815113067627,
71
+ "logits/rejected": -2.0090720653533936,
72
+ "logps/chosen": -32.5806884765625,
73
+ "logps/rejected": -32.515098571777344,
74
+ "loss": 1.0013,
75
+ "rewards/accuracies": 0.44999998807907104,
76
+ "rewards/chosen": -0.003494268748909235,
77
+ "rewards/margins": -0.0013132141903042793,
78
+ "rewards/rejected": -0.002181055024266243,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.8630876541137695,
85
+ "logits/rejected": -1.8523353338241577,
86
+ "logps/chosen": -33.549766540527344,
87
+ "logps/rejected": -35.46318435668945,
88
+ "loss": 0.9833,
89
+ "rewards/accuracies": 0.550000011920929,
90
+ "rewards/chosen": 0.006940312683582306,
91
+ "rewards/margins": 0.016694897785782814,
92
+ "rewards/rejected": -0.009754580445587635,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.9419746398925781,
99
+ "logits/rejected": -1.943914771080017,
100
+ "logps/chosen": -32.527896881103516,
101
+ "logps/rejected": -33.21547317504883,
102
+ "loss": 0.9153,
103
+ "rewards/accuracies": 0.625,
104
+ "rewards/chosen": 0.06505907326936722,
105
+ "rewards/margins": 0.10474522411823273,
106
+ "rewards/rejected": -0.03968615084886551,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.07257080078125,
113
+ "logits/rejected": -2.0775399208068848,
114
+ "logps/chosen": -34.00202560424805,
115
+ "logps/rejected": -36.622886657714844,
116
+ "loss": 0.9636,
117
+ "rewards/accuracies": 0.512499988079071,
118
+ "rewards/chosen": -0.012188142165541649,
119
+ "rewards/margins": 0.03642461448907852,
120
+ "rewards/rejected": -0.04861275106668472,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.9333629608154297,
127
+ "logits/rejected": -1.9364970922470093,
128
+ "logps/chosen": -34.302101135253906,
129
+ "logps/rejected": -34.63160705566406,
130
+ "loss": 0.862,
131
+ "rewards/accuracies": 0.5625,
132
+ "rewards/chosen": 0.1131378561258316,
133
+ "rewards/margins": 0.1484164148569107,
134
+ "rewards/rejected": -0.0352785661816597,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.9408857822418213,
141
+ "logits/rejected": -1.945412039756775,
142
+ "logps/chosen": -32.36528015136719,
143
+ "logps/rejected": -32.34526824951172,
144
+ "loss": 0.9225,
145
  "rewards/accuracies": 0.574999988079071,
146
+ "rewards/chosen": 0.09192506223917007,
147
+ "rewards/margins": 0.07964853197336197,
148
+ "rewards/rejected": 0.012276534922420979,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.037550210952759,
155
+ "logits/rejected": -2.0355725288391113,
156
+ "logps/chosen": -32.142730712890625,
157
+ "logps/rejected": -31.29366683959961,
158
+ "loss": 0.8913,
159
+ "rewards/accuracies": 0.574999988079071,
160
+ "rewards/chosen": 0.08829358220100403,
161
+ "rewards/margins": 0.1130049005150795,
162
+ "rewards/rejected": -0.02471131458878517,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.232161283493042,
168
+ "eval_logits/rejected": -2.2273108959198,
169
+ "eval_logps/chosen": -34.040714263916016,
170
+ "eval_logps/rejected": -37.54047775268555,
171
+ "eval_loss": 0.9844526052474976,
172
+ "eval_rewards/accuracies": 0.5195183157920837,
173
+ "eval_rewards/chosen": -0.005542654078453779,
174
+ "eval_rewards/margins": 0.015924591571092606,
175
+ "eval_rewards/rejected": -0.021467244252562523,
176
+ "eval_runtime": 145.9018,
177
  "eval_samples_per_second": 2.351,
178
  "eval_steps_per_second": 0.295,
179
  "step": 100
 
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -1.9926517009735107,
185
+ "logits/rejected": -1.9902753829956055,
186
+ "logps/chosen": -33.12412643432617,
187
+ "logps/rejected": -34.011417388916016,
188
+ "loss": 0.9361,
189
+ "rewards/accuracies": 0.625,
190
+ "rewards/chosen": 0.10772128403186798,
191
+ "rewards/margins": 0.09156213700771332,
192
+ "rewards/rejected": 0.016159160062670708,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.00441312789917,
199
+ "logits/rejected": -1.996093988418579,
200
+ "logps/chosen": -32.33955383300781,
201
+ "logps/rejected": -32.13432312011719,
202
+ "loss": 0.9401,
203
  "rewards/accuracies": 0.574999988079071,
204
+ "rewards/chosen": 0.09481850266456604,
205
+ "rewards/margins": 0.07035262137651443,
206
+ "rewards/rejected": 0.024465877562761307,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.0318965911865234,
213
+ "logits/rejected": -2.023927688598633,
214
+ "logps/chosen": -30.336984634399414,
215
+ "logps/rejected": -32.0634765625,
216
+ "loss": 0.9061,
217
+ "rewards/accuracies": 0.550000011920929,
218
+ "rewards/chosen": 0.12486012279987335,
219
+ "rewards/margins": 0.1342838853597641,
220
+ "rewards/rejected": -0.009423775598406792,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.9620994329452515,
227
+ "logits/rejected": -1.9723354578018188,
228
+ "logps/chosen": -31.222240447998047,
229
+ "logps/rejected": -32.57916259765625,
230
+ "loss": 0.795,
231
+ "rewards/accuracies": 0.6499999761581421,
232
+ "rewards/chosen": 0.17767605185508728,
233
+ "rewards/margins": 0.21753115952014923,
234
+ "rewards/rejected": -0.03985511139035225,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.8727748394012451,
241
+ "logits/rejected": -1.87395441532135,
242
+ "logps/chosen": -33.931861877441406,
243
+ "logps/rejected": -34.79869842529297,
244
+ "loss": 0.7946,
245
+ "rewards/accuracies": 0.6000000238418579,
246
+ "rewards/chosen": 0.23474939167499542,
247
+ "rewards/margins": 0.272051066160202,
248
+ "rewards/rejected": -0.03730170056223869,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.9248745441436768,
255
+ "logits/rejected": -1.9214649200439453,
256
+ "logps/chosen": -36.014469146728516,
257
+ "logps/rejected": -32.73783493041992,
258
+ "loss": 0.8532,
259
  "rewards/accuracies": 0.637499988079071,
260
+ "rewards/chosen": 0.14148668944835663,
261
+ "rewards/margins": 0.15506146848201752,
262
+ "rewards/rejected": -0.013574766926467419,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.025555372238159,
269
+ "logits/rejected": -2.01819109916687,
270
+ "logps/chosen": -33.50218200683594,
271
+ "logps/rejected": -31.41971206665039,
272
+ "loss": 0.7292,
273
+ "rewards/accuracies": 0.699999988079071,
274
+ "rewards/chosen": 0.26611366868019104,
275
+ "rewards/margins": 0.3271873891353607,
276
+ "rewards/rejected": -0.06107370927929878,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.0320167541503906,
283
+ "logits/rejected": -2.037261486053467,
284
+ "logps/chosen": -32.24850845336914,
285
+ "logps/rejected": -32.45344924926758,
286
+ "loss": 0.7865,
287
+ "rewards/accuracies": 0.637499988079071,
288
+ "rewards/chosen": 0.2666531801223755,
289
+ "rewards/margins": 0.23813048005104065,
290
+ "rewards/rejected": 0.028522688895463943,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.032525062561035,
297
+ "logits/rejected": -2.0297436714172363,
298
+ "logps/chosen": -31.313217163085938,
299
+ "logps/rejected": -31.349472045898438,
300
+ "loss": 0.8387,
301
+ "rewards/accuracies": 0.637499988079071,
302
+ "rewards/chosen": 0.1581628918647766,
303
+ "rewards/margins": 0.18629543483257294,
304
+ "rewards/rejected": -0.02813255414366722,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.902632713317871,
311
+ "logits/rejected": -1.907284140586853,
312
+ "logps/chosen": -31.320043563842773,
313
+ "logps/rejected": -32.85698699951172,
314
+ "loss": 0.7293,
315
+ "rewards/accuracies": 0.7124999761581421,
316
+ "rewards/chosen": 0.2576131224632263,
317
+ "rewards/margins": 0.31706100702285767,
318
+ "rewards/rejected": -0.05944784730672836,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.228637933731079,
324
+ "eval_logits/rejected": -2.2238004207611084,
325
+ "eval_logps/chosen": -34.053680419921875,
326
+ "eval_logps/rejected": -37.581058502197266,
327
+ "eval_loss": 0.9601577520370483,
328
+ "eval_rewards/accuracies": 0.5714285373687744,
329
+ "eval_rewards/chosen": -0.01721162348985672,
330
+ "eval_rewards/margins": 0.040783192962408066,
331
+ "eval_rewards/rejected": -0.057994820177555084,
332
+ "eval_runtime": 145.5388,
333
  "eval_samples_per_second": 2.357,
334
+ "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.0149245262145996,
341
+ "logits/rejected": -2.025560140609741,
342
+ "logps/chosen": -31.77438735961914,
343
+ "logps/rejected": -33.95419692993164,
344
+ "loss": 0.7666,
345
  "rewards/accuracies": 0.675000011920929,
346
+ "rewards/chosen": 0.17722666263580322,
347
+ "rewards/margins": 0.25957340002059937,
348
+ "rewards/rejected": -0.08234670013189316,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.906951904296875,
355
+ "logits/rejected": -1.9217418432235718,
356
+ "logps/chosen": -29.83829116821289,
357
+ "logps/rejected": -31.636096954345703,
358
+ "loss": 0.7204,
359
+ "rewards/accuracies": 0.737500011920929,
360
+ "rewards/chosen": 0.2459266185760498,
361
+ "rewards/margins": 0.3160557448863983,
362
+ "rewards/rejected": -0.07012919336557388,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.9629713296890259,
369
+ "logits/rejected": -1.9669532775878906,
370
+ "logps/chosen": -33.124656677246094,
371
+ "logps/rejected": -31.630443572998047,
372
+ "loss": 0.7348,
373
+ "rewards/accuracies": 0.699999988079071,
374
+ "rewards/chosen": 0.2635299265384674,
375
+ "rewards/margins": 0.3365571200847626,
376
+ "rewards/rejected": -0.07302714884281158,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.9614177942276,
383
+ "logits/rejected": -1.9395818710327148,
384
+ "logps/chosen": -33.87095260620117,
385
+ "logps/rejected": -35.10104751586914,
386
+ "loss": 0.6865,
387
+ "rewards/accuracies": 0.7250000238418579,
388
+ "rewards/chosen": 0.254514217376709,
389
+ "rewards/margins": 0.38974156975746155,
390
+ "rewards/rejected": -0.13522735238075256,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -2.0029492378234863,
397
+ "logits/rejected": -1.999629020690918,
398
+ "logps/chosen": -32.730865478515625,
399
+ "logps/rejected": -36.28009796142578,
400
+ "loss": 0.8055,
401
+ "rewards/accuracies": 0.6625000238418579,
402
+ "rewards/chosen": 0.17478153109550476,
403
+ "rewards/margins": 0.2244400531053543,
404
+ "rewards/rejected": -0.04965851828455925,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.8703190088272095,
411
+ "logits/rejected": -1.8679043054580688,
412
+ "logps/chosen": -33.98231887817383,
413
+ "logps/rejected": -35.54644775390625,
414
+ "loss": 0.8042,
415
+ "rewards/accuracies": 0.7250000238418579,
416
+ "rewards/chosen": 0.18547315895557404,
417
+ "rewards/margins": 0.22311437129974365,
418
+ "rewards/rejected": -0.03764120861887932,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8551464080810547,
425
+ "logits/rejected": -1.852746605873108,
426
+ "logps/chosen": -34.20850372314453,
427
+ "logps/rejected": -31.803356170654297,
428
+ "loss": 0.8096,
429
+ "rewards/accuracies": 0.675000011920929,
430
+ "rewards/chosen": 0.170187309384346,
431
+ "rewards/margins": 0.19900819659233093,
432
+ "rewards/rejected": -0.02882089652121067,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.9582526683807373,
439
+ "logits/rejected": -1.947749376296997,
440
+ "logps/chosen": -35.0114631652832,
441
+ "logps/rejected": -31.88564682006836,
442
+ "loss": 0.692,
443
+ "rewards/accuracies": 0.737500011920929,
444
+ "rewards/chosen": 0.3106640875339508,
445
+ "rewards/margins": 0.3508565425872803,
446
+ "rewards/rejected": -0.040192440152168274,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.053699493408203,
453
+ "logits/rejected": -2.038789749145508,
454
+ "logps/chosen": -30.727243423461914,
455
+ "logps/rejected": -32.641685485839844,
456
+ "loss": 0.8827,
457
+ "rewards/accuracies": 0.574999988079071,
458
+ "rewards/chosen": 0.17719359695911407,
459
+ "rewards/margins": 0.16805905103683472,
460
+ "rewards/rejected": 0.009134533815085888,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.9243850708007812,
467
+ "logits/rejected": -1.9218356609344482,
468
+ "logps/chosen": -32.43050003051758,
469
+ "logps/rejected": -30.8950138092041,
470
+ "loss": 0.6144,
471
+ "rewards/accuracies": 0.7749999761581421,
472
+ "rewards/chosen": 0.446951299905777,
473
+ "rewards/margins": 0.5173346400260925,
474
+ "rewards/rejected": -0.07038338482379913,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.2248916625976562,
480
+ "eval_logits/rejected": -2.2200686931610107,
481
+ "eval_logps/chosen": -34.08660125732422,
482
+ "eval_logps/rejected": -37.603153228759766,
483
+ "eval_loss": 0.9712583422660828,
484
+ "eval_rewards/accuracies": 0.5282392501831055,
485
+ "eval_rewards/chosen": -0.046843186020851135,
486
+ "eval_rewards/margins": 0.031036507338285446,
487
+ "eval_rewards/rejected": -0.07787969708442688,
488
+ "eval_runtime": 145.8399,
489
+ "eval_samples_per_second": 2.352,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.9094560146331787,
497
+ "logits/rejected": -1.9062097072601318,
498
+ "logps/chosen": -31.33370018005371,
499
+ "logps/rejected": -33.83475112915039,
500
+ "loss": 0.728,
501
+ "rewards/accuracies": 0.737500011920929,
502
+ "rewards/chosen": 0.24268333613872528,
503
+ "rewards/margins": 0.32601919770240784,
504
+ "rewards/rejected": -0.08333584666252136,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.9593706130981445,
511
+ "logits/rejected": -1.9471466541290283,
512
+ "logps/chosen": -34.3392333984375,
513
+ "logps/rejected": -33.68544387817383,
514
+ "loss": 0.6954,
515
+ "rewards/accuracies": 0.6875,
516
+ "rewards/chosen": 0.2311573475599289,
517
+ "rewards/margins": 0.36310091614723206,
518
+ "rewards/rejected": -0.13194358348846436,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -1.9940307140350342,
525
+ "logits/rejected": -1.9925845861434937,
526
+ "logps/chosen": -33.20854568481445,
527
+ "logps/rejected": -32.552764892578125,
528
+ "loss": 0.7359,
529
+ "rewards/accuracies": 0.762499988079071,
530
+ "rewards/chosen": 0.24952416121959686,
531
+ "rewards/margins": 0.31538745760917664,
532
+ "rewards/rejected": -0.06586329638957977,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.080763578414917,
539
+ "logits/rejected": -2.065063953399658,
540
+ "logps/chosen": -33.82006072998047,
541
+ "logps/rejected": -33.105167388916016,
542
+ "loss": 0.7407,
543
+ "rewards/accuracies": 0.6875,
544
+ "rewards/chosen": 0.32339948415756226,
545
+ "rewards/margins": 0.31181785464286804,
546
+ "rewards/rejected": 0.011581619270145893,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.9535402059555054,
553
+ "logits/rejected": -1.9526821374893188,
554
+ "logps/chosen": -32.8734130859375,
555
+ "logps/rejected": -32.565185546875,
556
+ "loss": 0.645,
557
+ "rewards/accuracies": 0.7124999761581421,
558
+ "rewards/chosen": 0.36781299114227295,
559
+ "rewards/margins": 0.4578477442264557,
560
+ "rewards/rejected": -0.09003473073244095,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.909014344215393,
567
+ "logits/rejected": -1.919298768043518,
568
+ "logps/chosen": -31.879894256591797,
569
+ "logps/rejected": -35.34550857543945,
570
+ "loss": 0.6942,
571
+ "rewards/accuracies": 0.7250000238418579,
572
+ "rewards/chosen": 0.29231759905815125,
573
+ "rewards/margins": 0.34339430928230286,
574
+ "rewards/rejected": -0.051076728850603104,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.0480055809020996,
581
+ "logits/rejected": -2.0415189266204834,
582
+ "logps/chosen": -33.37665939331055,
583
+ "logps/rejected": -29.24251937866211,
584
+ "loss": 0.7583,
585
+ "rewards/accuracies": 0.7124999761581421,
586
+ "rewards/chosen": 0.24777868390083313,
587
+ "rewards/margins": 0.262265145778656,
588
+ "rewards/rejected": -0.014486486092209816,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.9075886011123657,
595
+ "logits/rejected": -1.9097877740859985,
596
+ "logps/chosen": -33.8558464050293,
597
+ "logps/rejected": -30.982568740844727,
598
+ "loss": 0.6567,
599
+ "rewards/accuracies": 0.737500011920929,
600
+ "rewards/chosen": 0.3448841869831085,
601
+ "rewards/margins": 0.44383174180984497,
602
+ "rewards/rejected": -0.09894753992557526,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.8125879040012112,
610
+ "train_runtime": 3249.3137,
611
+ "train_samples_per_second": 0.948,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],