hugodk-sch commited on
Commit
1950075
1 Parent(s): 31c48fa

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +2 -15
  4. train_results.json +2 -2
  5. trainer_state.json +374 -374
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.4934
24
- - Rewards/chosen: 0.2139
25
- - Rewards/rejected: 0.1872
26
- - Rewards/accuracies: 0.5457
27
- - Rewards/margins: 0.0267
28
- - Logps/rejected: -37.2826
29
- - Logps/chosen: -33.7672
30
- - Logits/rejected: -2.2262
31
- - Logits/chosen: -2.2310
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.4749 | 0.26 | 100 | 0.4963 | 0.1467 | 0.1303 | 0.5336 | 0.0164 | -37.3537 | -33.8512 | -2.2327 | -2.2375 |
67
- | 0.4376 | 0.52 | 200 | 0.4956 | 0.1959 | 0.1769 | 0.5486 | 0.0191 | -37.2955 | -33.7896 | -2.2291 | -2.2339 |
68
- | 0.3835 | 0.78 | 300 | 0.4950 | 0.2045 | 0.1836 | 0.5245 | 0.0210 | -37.2872 | -33.7789 | -2.2264 | -2.2312 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.4464
21
+ - Rewards/chosen: 0.0179
22
+ - Rewards/rejected: -0.0124
23
+ - Rewards/accuracies: 0.5428
24
+ - Rewards/margins: 0.0304
25
+ - Logps/rejected: -37.5321
26
+ - Logps/chosen: -34.0121
27
+ - Logits/rejected: -2.2303
28
+ - Logits/chosen: -2.2352
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.3817 | 0.26 | 100 | 0.4287 | 0.0507 | 0.0294 | 0.5577 | 0.0213 | -37.4799 | -33.9712 | -2.2329 | -2.2378 |
64
+ | 0.3991 | 0.52 | 200 | 0.4536 | -0.0010 | -0.0197 | 0.5374 | 0.0187 | -37.5412 | -34.0358 | -2.2300 | -2.2349 |
65
+ | 0.2816 | 0.78 | 300 | 0.4464 | 0.0179 | -0.0124 | 0.5428 | 0.0304 | -37.5321 | -34.0121 | -2.2303 | -2.2352 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d5d4c9c7297ee45694cd2242c380ff348f1878684b2ee08ac91a39f76a9730d
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9323562170691969f33b21814d35e8b1eb7c2c35b3293abb03e7e74ca2e2815d
3
  size 176183216
all_results.json CHANGED
@@ -1,20 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.2309982776641846,
4
- "eval_logits/rejected": -2.2261931896209717,
5
- "eval_logps/chosen": -33.767173767089844,
6
- "eval_logps/rejected": -37.282623291015625,
7
- "eval_loss": 0.49339157342910767,
8
- "eval_rewards/accuracies": 0.5456810593605042,
9
- "eval_rewards/chosen": 0.21390400826931,
10
- "eval_rewards/margins": 0.026708098128437996,
11
- "eval_rewards/rejected": 0.18719588220119476,
12
- "eval_runtime": 145.5786,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.356,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.4538224170734356,
17
- "train_runtime": 3252.427,
18
  "train_samples": 3079,
19
  "train_samples_per_second": 0.947,
20
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.3756082092012678,
4
+ "train_runtime": 3251.4223,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.4538224170734356,
4
- "train_runtime": 3252.427,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.3756082092012678,
4
+ "train_runtime": 3251.4223,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 0.5,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,589 +25,589 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
- "logits/chosen": -1.866452693939209,
29
- "logits/rejected": -1.8707809448242188,
30
- "logps/chosen": -36.98366165161133,
31
- "logps/rejected": -33.666290283203125,
32
- "loss": 0.4914,
33
- "rewards/accuracies": 0.5555555820465088,
34
- "rewards/chosen": 0.01834939420223236,
35
- "rewards/margins": 0.04732292518019676,
36
- "rewards/rejected": -0.02897353284060955,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
- "logits/chosen": -1.9971332550048828,
43
- "logits/rejected": -1.999770164489746,
44
- "logps/chosen": -29.6362247467041,
45
- "logps/rejected": -29.044448852539062,
46
- "loss": 0.5009,
47
  "rewards/accuracies": 0.48750001192092896,
48
- "rewards/chosen": 0.004776014480739832,
49
- "rewards/margins": -0.01042011845856905,
50
- "rewards/rejected": 0.015196132473647594,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
- "logits/chosen": -1.9208379983901978,
57
- "logits/rejected": -1.9181442260742188,
58
- "logps/chosen": -31.425155639648438,
59
- "logps/rejected": -33.237945556640625,
60
- "loss": 0.4997,
61
- "rewards/accuracies": 0.512499988079071,
62
- "rewards/chosen": -0.007249836809933186,
63
- "rewards/margins": 0.008341209962964058,
64
- "rewards/rejected": -0.015591045841574669,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
- "logits/chosen": -2.0180625915527344,
71
- "logits/rejected": -2.009308338165283,
72
- "logps/chosen": -32.559749603271484,
73
- "logps/rejected": -32.524681091308594,
74
- "loss": 0.4962,
75
- "rewards/accuracies": 0.550000011920929,
76
- "rewards/chosen": 0.013646525330841541,
77
- "rewards/margins": 0.023250887170433998,
78
- "rewards/rejected": -0.009604359045624733,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
- "logits/chosen": -1.8639278411865234,
85
- "logits/rejected": -1.8531490564346313,
86
- "logps/chosen": -33.53788375854492,
87
- "logps/rejected": -35.42852020263672,
88
- "loss": 0.5015,
89
- "rewards/accuracies": 0.48750001192092896,
90
- "rewards/chosen": 0.015677569434046745,
91
- "rewards/margins": -0.0033827773295342922,
92
- "rewards/rejected": 0.019060343503952026,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
- "logits/chosen": -1.9435577392578125,
99
- "logits/rejected": -1.9454940557479858,
100
- "logps/chosen": -32.49355697631836,
101
- "logps/rejected": -33.1856575012207,
102
- "loss": 0.4817,
103
- "rewards/accuracies": 0.637499988079071,
104
- "rewards/chosen": 0.08530057966709137,
105
- "rewards/margins": 0.09672373533248901,
106
- "rewards/rejected": -0.011423162184655666,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
- "logits/chosen": -2.075368881225586,
113
- "logits/rejected": -2.0803427696228027,
114
- "logps/chosen": -33.89010238647461,
115
- "logps/rejected": -36.549957275390625,
116
- "loss": 0.4868,
117
- "rewards/accuracies": 0.5874999761581421,
118
- "rewards/chosen": 0.07870620489120483,
119
- "rewards/margins": 0.06357409805059433,
120
- "rewards/rejected": 0.015132094733417034,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
- "logits/chosen": -1.9372116327285767,
127
- "logits/rejected": -1.9403302669525146,
128
- "logps/chosen": -34.205604553222656,
129
- "logps/rejected": -34.49314880371094,
130
- "loss": 0.4788,
131
- "rewards/accuracies": 0.512499988079071,
132
- "rewards/chosen": 0.17776472866535187,
133
- "rewards/margins": 0.09835849702358246,
134
- "rewards/rejected": 0.0794062465429306,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
- "logits/chosen": -1.945416808128357,
141
- "logits/rejected": -1.9499365091323853,
142
- "logps/chosen": -32.26136016845703,
143
- "logps/rejected": -32.26020050048828,
144
- "loss": 0.4788,
145
- "rewards/accuracies": 0.5874999761581421,
146
- "rewards/chosen": 0.16484788060188293,
147
- "rewards/margins": 0.08588258177042007,
148
- "rewards/rejected": 0.07896529138088226,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
- "logits/chosen": -2.043586254119873,
155
- "logits/rejected": -2.0416104793548584,
156
- "logps/chosen": -31.968154907226562,
157
- "logps/rejected": -31.14252281188965,
158
- "loss": 0.4749,
159
- "rewards/accuracies": 0.625,
160
- "rewards/chosen": 0.21814338862895966,
161
- "rewards/margins": 0.11919467151165009,
162
- "rewards/rejected": 0.09894871711730957,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.2375268936157227,
168
- "eval_logits/rejected": -2.2326817512512207,
169
- "eval_logps/chosen": -33.851219177246094,
170
- "eval_logps/rejected": -37.3537483215332,
171
- "eval_loss": 0.49629145860671997,
172
- "eval_rewards/accuracies": 0.5336378812789917,
173
- "eval_rewards/chosen": 0.14666710793972015,
174
- "eval_rewards/margins": 0.016369162127375603,
175
- "eval_rewards/rejected": 0.1302979290485382,
176
- "eval_runtime": 145.7938,
177
- "eval_samples_per_second": 2.353,
178
- "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
- "logits/chosen": -1.998853087425232,
185
- "logits/rejected": -1.9964749813079834,
186
- "logps/chosen": -32.937965393066406,
187
- "logps/rejected": -33.846004486083984,
188
- "loss": 0.474,
189
- "rewards/accuracies": 0.637499988079071,
190
- "rewards/chosen": 0.2446821928024292,
191
- "rewards/margins": 0.09798828512430191,
192
- "rewards/rejected": 0.1466939002275467,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
- "logits/chosen": -2.0093610286712646,
199
- "logits/rejected": -2.0010437965393066,
200
- "logps/chosen": -32.1511116027832,
201
- "logps/rejected": -31.939533233642578,
202
- "loss": 0.4873,
203
- "rewards/accuracies": 0.612500011920929,
204
- "rewards/chosen": 0.2350343018770218,
205
- "rewards/margins": 0.057456426322460175,
206
- "rewards/rejected": 0.17757786810398102,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
- "logits/chosen": -2.0374419689178467,
213
- "logits/rejected": -2.029500961303711,
214
- "logps/chosen": -30.157424926757812,
215
- "logps/rejected": -31.90500831604004,
216
- "loss": 0.4716,
217
- "rewards/accuracies": 0.625,
218
- "rewards/chosen": 0.2546332776546478,
219
- "rewards/margins": 0.1362351030111313,
220
- "rewards/rejected": 0.11839816719293594,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
- "logits/chosen": -1.9675471782684326,
227
- "logits/rejected": -1.9777450561523438,
228
- "logps/chosen": -31.059162139892578,
229
- "logps/rejected": -32.384300231933594,
230
- "loss": 0.4616,
231
- "rewards/accuracies": 0.612500011920929,
232
- "rewards/chosen": 0.28839823603630066,
233
- "rewards/margins": 0.16793587803840637,
234
- "rewards/rejected": 0.12046238034963608,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
- "logits/chosen": -1.8793624639511108,
241
- "logits/rejected": -1.8805418014526367,
242
- "logps/chosen": -33.646915435791016,
243
- "logps/rejected": -34.56272888183594,
244
- "loss": 0.4385,
245
  "rewards/accuracies": 0.625,
246
- "rewards/chosen": 0.4366222321987152,
247
- "rewards/margins": 0.2809991240501404,
248
- "rewards/rejected": 0.15562310814857483,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
- "logits/chosen": -1.9307949542999268,
255
- "logits/rejected": -1.9274256229400635,
256
- "logps/chosen": -35.765926361083984,
257
- "logps/rejected": -32.48193359375,
258
- "loss": 0.4685,
259
- "rewards/accuracies": 0.637499988079071,
260
- "rewards/chosen": 0.32460084557533264,
261
- "rewards/margins": 0.13194666802883148,
262
- "rewards/rejected": 0.19265416264533997,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
- "logits/chosen": -2.0318870544433594,
269
- "logits/rejected": -2.0245723724365234,
270
- "logps/chosen": -33.25593566894531,
271
- "logps/rejected": -31.189266204833984,
272
- "loss": 0.4315,
273
  "rewards/accuracies": 0.6875,
274
- "rewards/chosen": 0.4335424304008484,
275
- "rewards/margins": 0.3034728467464447,
276
- "rewards/rejected": 0.1300695389509201,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
- "logits/chosen": -2.0387840270996094,
283
- "logits/rejected": -2.0439517498016357,
284
- "logps/chosen": -31.977941513061523,
285
- "logps/rejected": -32.183441162109375,
286
- "loss": 0.4499,
287
- "rewards/accuracies": 0.625,
288
- "rewards/chosen": 0.4534761905670166,
289
- "rewards/margins": 0.2121172845363617,
290
- "rewards/rejected": 0.2413589060306549,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
- "logits/chosen": -2.039411783218384,
297
- "logits/rejected": -2.036684513092041,
298
- "logps/chosen": -31.094594955444336,
299
- "logps/rejected": -31.10666275024414,
300
- "loss": 0.4653,
301
- "rewards/accuracies": 0.625,
302
- "rewards/chosen": 0.3154846429824829,
303
- "rewards/margins": 0.1462438404560089,
304
- "rewards/rejected": 0.169240802526474,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
- "logits/chosen": -1.909691572189331,
311
- "logits/rejected": -1.914358139038086,
312
- "logps/chosen": -31.08693504333496,
313
- "logps/rejected": -32.61079406738281,
314
- "loss": 0.4376,
315
  "rewards/accuracies": 0.6875,
316
- "rewards/chosen": 0.41547876596450806,
317
- "rewards/margins": 0.27136775851249695,
318
- "rewards/rejected": 0.14411096274852753,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.233895778656006,
324
- "eval_logits/rejected": -2.2290823459625244,
325
- "eval_logps/chosen": -33.78962326049805,
326
- "eval_logps/rejected": -37.29551696777344,
327
- "eval_loss": 0.4955826997756958,
328
- "eval_rewards/accuracies": 0.5485880374908447,
329
- "eval_rewards/chosen": 0.19594170153141022,
330
- "eval_rewards/margins": 0.019058095291256905,
331
- "eval_rewards/rejected": 0.17688362300395966,
332
- "eval_runtime": 145.7262,
333
- "eval_samples_per_second": 2.354,
334
- "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
- "logits/chosen": -2.0220179557800293,
341
- "logits/rejected": -2.0326101779937744,
342
- "logps/chosen": -31.51509666442871,
343
- "logps/rejected": -33.655662536621094,
344
- "loss": 0.4554,
345
- "rewards/accuracies": 0.637499988079071,
346
- "rewards/chosen": 0.36496439576148987,
347
- "rewards/margins": 0.19933317601680756,
348
- "rewards/rejected": 0.16563120484352112,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
- "logits/chosen": -1.9142780303955078,
355
- "logits/rejected": -1.9289791584014893,
356
- "logps/chosen": -29.601001739501953,
357
- "logps/rejected": -31.418704986572266,
358
- "loss": 0.4339,
359
- "rewards/accuracies": 0.762499988079071,
360
- "rewards/chosen": 0.4084321856498718,
361
- "rewards/margins": 0.2968555688858032,
362
- "rewards/rejected": 0.11157669872045517,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
- "logits/chosen": -1.9722967147827148,
369
- "logits/rejected": -1.9762967824935913,
370
- "logps/chosen": -32.825157165527344,
371
- "logps/rejected": -31.445331573486328,
372
- "loss": 0.4142,
373
- "rewards/accuracies": 0.699999988079071,
374
- "rewards/chosen": 0.47385087609291077,
375
- "rewards/margins": 0.39067354798316956,
376
- "rewards/rejected": 0.0831773579120636,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
- "logits/chosen": -1.9708006381988525,
383
- "logits/rejected": -1.9490963220596313,
384
- "logps/chosen": -33.599205017089844,
385
- "logps/rejected": -34.856910705566406,
386
- "loss": 0.4183,
387
- "rewards/accuracies": 0.6875,
388
- "rewards/chosen": 0.4436335563659668,
389
- "rewards/margins": 0.36852845549583435,
390
- "rewards/rejected": 0.07510510087013245,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
- "logits/chosen": -2.011307954788208,
397
- "logits/rejected": -2.008012533187866,
398
- "logps/chosen": -32.494956970214844,
399
- "logps/rejected": -35.98812484741211,
400
- "loss": 0.4641,
401
- "rewards/accuracies": 0.5625,
402
- "rewards/chosen": 0.3440879285335541,
403
- "rewards/margins": 0.15464913845062256,
404
- "rewards/rejected": 0.18943879008293152,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
- "logits/chosen": -1.8780018091201782,
411
- "logits/rejected": -1.8755817413330078,
412
- "logps/chosen": -33.72126388549805,
413
- "logps/rejected": -35.290069580078125,
414
- "loss": 0.4544,
415
- "rewards/accuracies": 0.675000011920929,
416
- "rewards/chosen": 0.37370795011520386,
417
- "rewards/margins": 0.20206721127033234,
418
- "rewards/rejected": 0.17164072394371033,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
- "logits/chosen": -1.8628206253051758,
425
- "logits/rejected": -1.8603187799453735,
426
- "logps/chosen": -33.929344177246094,
427
- "logps/rejected": -31.61127281188965,
428
- "loss": 0.4451,
429
- "rewards/accuracies": 0.637499988079071,
430
- "rewards/chosen": 0.3746057152748108,
431
- "rewards/margins": 0.246560737490654,
432
- "rewards/rejected": 0.128044992685318,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
- "logits/chosen": -1.9666433334350586,
439
- "logits/rejected": -1.9561984539031982,
440
- "logps/chosen": -34.74022674560547,
441
- "logps/rejected": -31.632495880126953,
442
- "loss": 0.4239,
443
- "rewards/accuracies": 0.7124999761581421,
444
- "rewards/chosen": 0.4931296706199646,
445
- "rewards/margins": 0.3263343572616577,
446
- "rewards/rejected": 0.16679534316062927,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
- "logits/chosen": -2.0619094371795654,
453
- "logits/rejected": -2.0470869541168213,
454
- "logps/chosen": -30.43317222595215,
455
- "logps/rejected": -32.3420524597168,
456
- "loss": 0.4679,
457
- "rewards/accuracies": 0.5249999761581421,
458
- "rewards/chosen": 0.39276057481765747,
459
- "rewards/margins": 0.1449340283870697,
460
- "rewards/rejected": 0.24782654643058777,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
- "logits/chosen": -1.9332126379013062,
467
- "logits/rejected": -1.9307750463485718,
468
- "logps/chosen": -32.100502014160156,
469
- "logps/rejected": -30.6780948638916,
470
- "loss": 0.3835,
471
  "rewards/accuracies": 0.75,
472
- "rewards/chosen": 0.6612862348556519,
473
- "rewards/margins": 0.5503143072128296,
474
- "rewards/rejected": 0.1109718531370163,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.231203556060791,
480
- "eval_logits/rejected": -2.226395606994629,
481
- "eval_logps/chosen": -33.778892517089844,
482
- "eval_logps/rejected": -37.28717803955078,
483
- "eval_loss": 0.4949820339679718,
484
- "eval_rewards/accuracies": 0.5245016813278198,
485
- "eval_rewards/chosen": 0.204526886343956,
486
- "eval_rewards/margins": 0.020971858873963356,
487
- "eval_rewards/rejected": 0.1835550218820572,
488
- "eval_runtime": 145.6189,
489
- "eval_samples_per_second": 2.355,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
- "logits/chosen": -1.9173246622085571,
497
- "logits/rejected": -1.9141733646392822,
498
- "logps/chosen": -31.059444427490234,
499
- "logps/rejected": -33.56504440307617,
500
- "loss": 0.4356,
501
- "rewards/accuracies": 0.7124999761581421,
502
- "rewards/chosen": 0.4351249635219574,
503
- "rewards/margins": 0.293440043926239,
504
- "rewards/rejected": 0.141684889793396,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
- "logits/chosen": -1.9676717519760132,
511
- "logits/rejected": -1.9555410146713257,
512
- "logps/chosen": -34.03219985961914,
513
- "logps/rejected": -33.442317962646484,
514
- "loss": 0.417,
515
- "rewards/accuracies": 0.7124999761581421,
516
- "rewards/chosen": 0.4510994851589203,
517
- "rewards/margins": 0.37388378381729126,
518
- "rewards/rejected": 0.07721573859453201,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
- "logits/chosen": -2.002480983734131,
525
- "logits/rejected": -2.0011117458343506,
526
- "logps/chosen": -32.882102966308594,
527
- "logps/rejected": -32.251502990722656,
528
- "loss": 0.4339,
529
  "rewards/accuracies": 0.6499999761581421,
530
- "rewards/chosen": 0.4829506278038025,
531
- "rewards/margins": 0.30048781633377075,
532
- "rewards/rejected": 0.18246281147003174,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
- "logits/chosen": -2.0899081230163574,
539
- "logits/rejected": -2.0742757320404053,
540
- "logps/chosen": -33.487709045410156,
541
- "logps/rejected": -32.8193359375,
542
- "loss": 0.4286,
543
- "rewards/accuracies": 0.675000011920929,
544
- "rewards/chosen": 0.5533460974693298,
545
- "rewards/margins": 0.3143841624259949,
546
- "rewards/rejected": 0.23896190524101257,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
- "logits/chosen": -1.9617509841918945,
553
- "logits/rejected": -1.9609159231185913,
554
- "logps/chosen": -32.60249710083008,
555
- "logps/rejected": -32.25555419921875,
556
- "loss": 0.418,
557
- "rewards/accuracies": 0.6875,
558
- "rewards/chosen": 0.5436802506446838,
559
- "rewards/margins": 0.37600547075271606,
560
- "rewards/rejected": 0.16767482459545135,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
- "logits/chosen": -1.9185073375701904,
567
- "logits/rejected": -1.9287922382354736,
568
- "logps/chosen": -31.57277488708496,
569
- "logps/rejected": -35.039085388183594,
570
- "loss": 0.4304,
571
- "rewards/accuracies": 0.675000011920929,
572
- "rewards/chosen": 0.505532443523407,
573
- "rewards/margins": 0.305793821811676,
574
- "rewards/rejected": 0.19973860681056976,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
- "logits/chosen": -2.0573890209198,
581
- "logits/rejected": -2.0508790016174316,
582
- "logps/chosen": -33.04835891723633,
583
- "logps/rejected": -28.99324607849121,
584
- "loss": 0.4308,
585
- "rewards/accuracies": 0.7124999761581421,
586
- "rewards/chosen": 0.4828890860080719,
587
- "rewards/margins": 0.2963466942310333,
588
- "rewards/rejected": 0.18654237687587738,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
- "logits/chosen": -1.9167007207870483,
595
- "logits/rejected": -1.9188674688339233,
596
- "logps/chosen": -33.65839385986328,
597
- "logps/rejected": -30.719829559326172,
598
- "loss": 0.4247,
599
- "rewards/accuracies": 0.699999988079071,
600
- "rewards/chosen": 0.46452397108078003,
601
- "rewards/margins": 0.34228652715682983,
602
- "rewards/rejected": 0.12223746627569199,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.4538224170734356,
610
- "train_runtime": 3252.427,
611
  "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 0.3906,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.8662821054458618,
29
+ "logits/rejected": -1.8706018924713135,
30
+ "logps/chosen": -36.98260498046875,
31
+ "logps/rejected": -33.66376876831055,
32
+ "loss": 0.3501,
33
+ "rewards/accuracies": 0.5694444179534912,
34
+ "rewards/chosen": 0.01919599249958992,
35
+ "rewards/margins": 0.04614981636404991,
36
+ "rewards/rejected": -0.02695382386445999,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9981708526611328,
43
+ "logits/rejected": -2.000824451446533,
44
+ "logps/chosen": -29.6453914642334,
45
+ "logps/rejected": -29.056461334228516,
46
+ "loss": 0.4314,
47
  "rewards/accuracies": 0.48750001192092896,
48
+ "rewards/chosen": -0.0025594178587198257,
49
+ "rewards/margins": -0.008148794062435627,
50
+ "rewards/rejected": 0.005589376203715801,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.9205211400985718,
57
+ "logits/rejected": -1.917824149131775,
58
+ "logps/chosen": -31.402332305908203,
59
+ "logps/rejected": -33.20569610595703,
60
+ "loss": 0.4366,
61
+ "rewards/accuracies": 0.574999988079071,
62
+ "rewards/chosen": 0.011007689870893955,
63
+ "rewards/margins": 0.0007972270250320435,
64
+ "rewards/rejected": 0.010210464708507061,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.0170958042144775,
71
+ "logits/rejected": -2.0083703994750977,
72
+ "logps/chosen": -32.547943115234375,
73
+ "logps/rejected": -32.49810791015625,
74
+ "loss": 0.4286,
75
+ "rewards/accuracies": 0.5375000238418579,
76
+ "rewards/chosen": 0.023093106225132942,
77
+ "rewards/margins": 0.011441526934504509,
78
+ "rewards/rejected": 0.011651577427983284,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.8613898754119873,
85
+ "logits/rejected": -1.8506320714950562,
86
+ "logps/chosen": -33.56399154663086,
87
+ "logps/rejected": -35.46310043334961,
88
+ "loss": 0.4498,
89
+ "rewards/accuracies": 0.4625000059604645,
90
+ "rewards/chosen": -0.005208463408052921,
91
+ "rewards/margins": 0.0033974028192460537,
92
+ "rewards/rejected": -0.008605867624282837,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.9389715194702148,
99
+ "logits/rejected": -1.9409143924713135,
100
+ "logps/chosen": -32.57710266113281,
101
+ "logps/rejected": -33.21430206298828,
102
+ "loss": 0.3914,
103
+ "rewards/accuracies": 0.612500011920929,
104
+ "rewards/chosen": 0.018466468900442123,
105
+ "rewards/margins": 0.05280427262187004,
106
+ "rewards/rejected": -0.03433779999613762,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.0723679065704346,
113
+ "logits/rejected": -2.0773472785949707,
114
+ "logps/chosen": -33.946128845214844,
115
+ "logps/rejected": -36.60929489135742,
116
+ "loss": 0.4672,
117
+ "rewards/accuracies": 0.5,
118
+ "rewards/chosen": 0.033884815871715546,
119
+ "rewards/margins": 0.0662197396159172,
120
+ "rewards/rejected": -0.03233493119478226,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.9366763830184937,
127
+ "logits/rejected": -1.9398372173309326,
128
+ "logps/chosen": -34.32460403442383,
129
+ "logps/rejected": -34.59550094604492,
130
+ "loss": 0.3932,
131
+ "rewards/accuracies": 0.5625,
132
+ "rewards/chosen": 0.0825641006231308,
133
+ "rewards/margins": 0.08503931760787964,
134
+ "rewards/rejected": -0.002475212560966611,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.9469963312149048,
141
+ "logits/rejected": -1.9515202045440674,
142
+ "logps/chosen": -32.401084899902344,
143
+ "logps/rejected": -32.32483673095703,
144
+ "loss": 0.4402,
145
+ "rewards/accuracies": 0.574999988079071,
146
+ "rewards/chosen": 0.05306895822286606,
147
+ "rewards/margins": 0.02581069990992546,
148
+ "rewards/rejected": 0.027258265763521194,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.0446877479553223,
155
+ "logits/rejected": -2.0427000522613525,
156
+ "logps/chosen": -32.168331146240234,
157
+ "logps/rejected": -31.263940811157227,
158
+ "loss": 0.3817,
159
+ "rewards/accuracies": 0.5375000238418579,
160
+ "rewards/chosen": 0.05800473690032959,
161
+ "rewards/margins": 0.056188035756349564,
162
+ "rewards/rejected": 0.0018166989320889115,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.2377800941467285,
168
+ "eval_logits/rejected": -2.2329328060150146,
169
+ "eval_logps/chosen": -33.97116470336914,
170
+ "eval_logps/rejected": -37.47990036010742,
171
+ "eval_loss": 0.42868927121162415,
172
+ "eval_rewards/accuracies": 0.5577242374420166,
173
+ "eval_rewards/chosen": 0.05071057379245758,
174
+ "eval_rewards/margins": 0.021336428821086884,
175
+ "eval_rewards/rejected": 0.029374146834015846,
176
+ "eval_runtime": 146.0732,
177
+ "eval_samples_per_second": 2.348,
178
+ "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -1.9993603229522705,
185
+ "logits/rejected": -1.996971845626831,
186
+ "logps/chosen": -33.112144470214844,
187
+ "logps/rejected": -33.986610412597656,
188
+ "loss": 0.5297,
189
+ "rewards/accuracies": 0.574999988079071,
190
+ "rewards/chosen": 0.105336032807827,
191
+ "rewards/margins": 0.07112538814544678,
192
+ "rewards/rejected": 0.03421063348650932,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.009766101837158,
199
+ "logits/rejected": -2.0014333724975586,
200
+ "logps/chosen": -32.351280212402344,
201
+ "logps/rejected": -32.099830627441406,
202
+ "loss": 0.5288,
203
+ "rewards/accuracies": 0.550000011920929,
204
+ "rewards/chosen": 0.07490243762731552,
205
+ "rewards/margins": 0.025563379749655724,
206
+ "rewards/rejected": 0.04933905601501465,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.0372567176818848,
213
+ "logits/rejected": -2.029240369796753,
214
+ "logps/chosen": -30.34758949279785,
215
+ "logps/rejected": -32.03752899169922,
216
+ "loss": 0.4171,
217
+ "rewards/accuracies": 0.550000011920929,
218
+ "rewards/chosen": 0.10250232368707657,
219
+ "rewards/margins": 0.09011942148208618,
220
+ "rewards/rejected": 0.01238289289176464,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.966684341430664,
227
+ "logits/rejected": -1.9769500494003296,
228
+ "logps/chosen": -31.219501495361328,
229
+ "logps/rejected": -32.558082580566406,
230
+ "loss": 0.4241,
231
+ "rewards/accuracies": 0.675000011920929,
232
+ "rewards/chosen": 0.1601306051015854,
233
+ "rewards/margins": 0.178691104054451,
234
+ "rewards/rejected": -0.018560513854026794,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.8799800872802734,
241
+ "logits/rejected": -1.8811286687850952,
242
+ "logps/chosen": -34.00492858886719,
243
+ "logps/rejected": -34.78579330444336,
244
+ "loss": 0.3742,
245
  "rewards/accuracies": 0.625,
246
+ "rewards/chosen": 0.15021029114723206,
247
+ "rewards/margins": 0.17304366827011108,
248
+ "rewards/rejected": -0.02283337712287903,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.932443618774414,
255
+ "logits/rejected": -1.9290469884872437,
256
+ "logps/chosen": -36.061256408691406,
257
+ "logps/rejected": -32.7225456237793,
258
+ "loss": 0.377,
259
+ "rewards/accuracies": 0.625,
260
+ "rewards/chosen": 0.0883389413356781,
261
+ "rewards/margins": 0.0881747230887413,
262
+ "rewards/rejected": 0.00016421750478912145,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.0334668159484863,
269
+ "logits/rejected": -2.026076078414917,
270
+ "logps/chosen": -33.546730041503906,
271
+ "logps/rejected": -31.355152130126953,
272
+ "loss": 0.3457,
273
  "rewards/accuracies": 0.6875,
274
+ "rewards/chosen": 0.2009061872959137,
275
+ "rewards/margins": 0.2035447061061859,
276
+ "rewards/rejected": -0.0026385621167719364,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.0394930839538574,
283
+ "logits/rejected": -2.044752359390259,
284
+ "logps/chosen": -32.33013153076172,
285
+ "logps/rejected": -32.470909118652344,
286
+ "loss": 0.2919,
287
+ "rewards/accuracies": 0.6875,
288
+ "rewards/chosen": 0.17172439396381378,
289
+ "rewards/margins": 0.1603413224220276,
290
+ "rewards/rejected": 0.01138305850327015,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.0403592586517334,
297
+ "logits/rejected": -2.0375704765319824,
298
+ "logps/chosen": -31.314708709716797,
299
+ "logps/rejected": -31.29607582092285,
300
+ "loss": 0.4191,
301
+ "rewards/accuracies": 0.612500011920929,
302
+ "rewards/chosen": 0.13939552009105682,
303
+ "rewards/margins": 0.12168798595666885,
304
+ "rewards/rejected": 0.017707547172904015,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.9107002019882202,
311
+ "logits/rejected": -1.915356993675232,
312
+ "logps/chosen": -31.39776611328125,
313
+ "logps/rejected": -32.7841682434082,
314
+ "loss": 0.3991,
315
  "rewards/accuracies": 0.6875,
316
+ "rewards/chosen": 0.1668148785829544,
317
+ "rewards/margins": 0.16140125691890717,
318
+ "rewards/rejected": 0.005413623061031103,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.234880208969116,
324
+ "eval_logits/rejected": -2.2300188541412354,
325
+ "eval_logps/chosen": -34.03581619262695,
326
+ "eval_logps/rejected": -37.54121017456055,
327
+ "eval_loss": 0.4536176025867462,
328
+ "eval_rewards/accuracies": 0.5373754501342773,
329
+ "eval_rewards/chosen": -0.00101160176564008,
330
+ "eval_rewards/margins": 0.01865854486823082,
331
+ "eval_rewards/rejected": -0.019670147448778152,
332
+ "eval_runtime": 145.4825,
333
+ "eval_samples_per_second": 2.358,
334
+ "eval_steps_per_second": 0.296,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.022460460662842,
341
+ "logits/rejected": -2.03314471244812,
342
+ "logps/chosen": -31.764179229736328,
343
+ "logps/rejected": -33.924598693847656,
344
+ "loss": 0.2827,
345
+ "rewards/accuracies": 0.75,
346
+ "rewards/chosen": 0.1656998097896576,
347
+ "rewards/margins": 0.21521887183189392,
348
+ "rewards/rejected": -0.049519073218107224,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.9155023097991943,
355
+ "logits/rejected": -1.9303117990493774,
356
+ "logps/chosen": -29.97748374938965,
357
+ "logps/rejected": -31.563989639282227,
358
+ "loss": 0.3625,
359
+ "rewards/accuracies": 0.6499999761581421,
360
+ "rewards/chosen": 0.10724345594644547,
361
+ "rewards/margins": 0.11189230531454086,
362
+ "rewards/rejected": -0.004648865200579166,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.9727697372436523,
369
+ "logits/rejected": -1.9767513275146484,
370
+ "logps/chosen": -33.23460006713867,
371
+ "logps/rejected": -31.581985473632812,
372
+ "loss": 0.3234,
373
+ "rewards/accuracies": 0.7250000238418579,
374
+ "rewards/chosen": 0.1462947130203247,
375
+ "rewards/margins": 0.17244157195091248,
376
+ "rewards/rejected": -0.026146870106458664,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.971836805343628,
383
+ "logits/rejected": -1.9498882293701172,
384
+ "logps/chosen": -33.9459228515625,
385
+ "logps/rejected": -35.02843475341797,
386
+ "loss": 0.3771,
387
+ "rewards/accuracies": 0.699999988079071,
388
+ "rewards/chosen": 0.1662607342004776,
389
+ "rewards/margins": 0.2283717393875122,
390
+ "rewards/rejected": -0.0621110200881958,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -2.0139522552490234,
397
+ "logits/rejected": -2.010632038116455,
398
+ "logps/chosen": -32.76514434814453,
399
+ "logps/rejected": -36.231590270996094,
400
+ "loss": 0.3601,
401
+ "rewards/accuracies": 0.637499988079071,
402
+ "rewards/chosen": 0.1279398649930954,
403
+ "rewards/margins": 0.1332733929157257,
404
+ "rewards/rejected": -0.005333536770194769,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.8817164897918701,
411
+ "logits/rejected": -1.8792623281478882,
412
+ "logps/chosen": -34.03675842285156,
413
+ "logps/rejected": -35.52077102661133,
414
+ "loss": 0.3873,
415
+ "rewards/accuracies": 0.699999988079071,
416
+ "rewards/chosen": 0.12131254374980927,
417
+ "rewards/margins": 0.13423141837120056,
418
+ "rewards/rejected": -0.012918862514197826,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8678665161132812,
425
+ "logits/rejected": -1.8653860092163086,
426
+ "logps/chosen": -34.24772262573242,
427
+ "logps/rejected": -31.75213050842285,
428
+ "loss": 0.3738,
429
+ "rewards/accuracies": 0.6499999761581421,
430
+ "rewards/chosen": 0.11989933252334595,
431
+ "rewards/margins": 0.1045403853058815,
432
+ "rewards/rejected": 0.015358942560851574,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.9715077877044678,
439
+ "logits/rejected": -1.9609073400497437,
440
+ "logps/chosen": -35.06243133544922,
441
+ "logps/rejected": -31.837047576904297,
442
+ "loss": 0.2652,
443
+ "rewards/accuracies": 0.7749999761581421,
444
+ "rewards/chosen": 0.2353697121143341,
445
+ "rewards/margins": 0.2322143018245697,
446
+ "rewards/rejected": 0.003155359299853444,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.0671467781066895,
453
+ "logits/rejected": -2.052150011062622,
454
+ "logps/chosen": -30.7277774810791,
455
+ "logps/rejected": -32.63950729370117,
456
+ "loss": 0.3926,
457
+ "rewards/accuracies": 0.625,
458
+ "rewards/chosen": 0.15707775950431824,
459
+ "rewards/margins": 0.14721640944480896,
460
+ "rewards/rejected": 0.009861335158348083,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.9383538961410522,
467
+ "logits/rejected": -1.935782790184021,
468
+ "logps/chosen": -32.601158142089844,
469
+ "logps/rejected": -30.8317813873291,
470
+ "loss": 0.2816,
471
  "rewards/accuracies": 0.75,
472
+ "rewards/chosen": 0.26075875759124756,
473
+ "rewards/margins": 0.2727365493774414,
474
+ "rewards/rejected": -0.011977789923548698,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.2351884841918945,
480
+ "eval_logits/rejected": -2.230333089828491,
481
+ "eval_logps/chosen": -34.01213836669922,
482
+ "eval_logps/rejected": -37.532142639160156,
483
+ "eval_loss": 0.4463596045970917,
484
+ "eval_rewards/accuracies": 0.5427741408348083,
485
+ "eval_rewards/chosen": 0.017930733039975166,
486
+ "eval_rewards/margins": 0.03035038523375988,
487
+ "eval_rewards/rejected": -0.012419654987752438,
488
+ "eval_runtime": 145.7115,
489
+ "eval_samples_per_second": 2.354,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.9204727411270142,
497
+ "logits/rejected": -1.9172074794769287,
498
+ "logps/chosen": -31.360204696655273,
499
+ "logps/rejected": -33.759212493896484,
500
+ "loss": 0.3011,
501
+ "rewards/accuracies": 0.75,
502
+ "rewards/chosen": 0.19451384246349335,
503
+ "rewards/margins": 0.20816302299499512,
504
+ "rewards/rejected": -0.01364919263869524,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.9719558954238892,
511
+ "logits/rejected": -1.959651231765747,
512
+ "logps/chosen": -34.44158935546875,
513
+ "logps/rejected": -33.596771240234375,
514
+ "loss": 0.2835,
515
+ "rewards/accuracies": 0.675000011920929,
516
+ "rewards/chosen": 0.12358621507883072,
517
+ "rewards/margins": 0.1699295938014984,
518
+ "rewards/rejected": -0.0463433675467968,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -2.0068559646606445,
525
+ "logits/rejected": -2.00539231300354,
526
+ "logps/chosen": -33.331153869628906,
527
+ "logps/rejected": -32.49522018432617,
528
+ "loss": 0.3707,
529
  "rewards/accuracies": 0.6499999761581421,
530
+ "rewards/chosen": 0.12371524423360825,
531
+ "rewards/margins": 0.13622507452964783,
532
+ "rewards/rejected": -0.012509837746620178,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.0939385890960693,
539
+ "logits/rejected": -2.0781521797180176,
540
+ "logps/chosen": -33.88405227661133,
541
+ "logps/rejected": -33.090179443359375,
542
+ "loss": 0.36,
543
+ "rewards/accuracies": 0.699999988079071,
544
+ "rewards/chosen": 0.23627004027366638,
545
+ "rewards/margins": 0.21398480236530304,
546
+ "rewards/rejected": 0.02228522300720215,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.9664535522460938,
553
+ "logits/rejected": -1.9655389785766602,
554
+ "logps/chosen": -32.96236038208008,
555
+ "logps/rejected": -32.478477478027344,
556
+ "loss": 0.3,
557
+ "rewards/accuracies": 0.737500011920929,
558
+ "rewards/chosen": 0.2557886242866516,
559
+ "rewards/margins": 0.2664529085159302,
560
+ "rewards/rejected": -0.0106642572209239,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.921805739402771,
567
+ "logits/rejected": -1.9321292638778687,
568
+ "logps/chosen": -32.022247314453125,
569
+ "logps/rejected": -35.31328582763672,
570
+ "loss": 0.3671,
571
+ "rewards/accuracies": 0.7124999761581421,
572
+ "rewards/chosen": 0.14595167338848114,
573
+ "rewards/margins": 0.16557307541370392,
574
+ "rewards/rejected": -0.01962139829993248,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.060645580291748,
581
+ "logits/rejected": -2.0541434288024902,
582
+ "logps/chosen": -33.4498405456543,
583
+ "logps/rejected": -29.211456298828125,
584
+ "loss": 0.3226,
585
+ "rewards/accuracies": 0.6875,
586
+ "rewards/chosen": 0.16169998049736023,
587
+ "rewards/margins": 0.1497238427400589,
588
+ "rewards/rejected": 0.011976108886301517,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.920902967453003,
595
+ "logits/rejected": -1.9230811595916748,
596
+ "logps/chosen": -33.97188186645508,
597
+ "logps/rejected": -30.862768173217773,
598
+ "loss": 0.3117,
599
+ "rewards/accuracies": 0.7124999761581421,
600
+ "rewards/chosen": 0.21373608708381653,
601
+ "rewards/margins": 0.20584869384765625,
602
+ "rewards/rejected": 0.00788736529648304,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.3756082092012678,
610
+ "train_runtime": 3251.4223,
611
  "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }