hugodk-sch commited on
Commit
191c1a5
1 Parent(s): d514169

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +2 -15
  4. train_results.json +2 -2
  5. trainer_state.json +372 -372
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.6928
24
- - Rewards/chosen: 0.0020
25
- - Rewards/rejected: 0.0008
26
- - Rewards/accuracies: 0.5100
27
- - Rewards/margins: 0.0011
28
- - Logps/rejected: -37.5125
29
- - Logps/chosen: -34.0248
30
- - Logits/rejected: -2.2389
31
- - Logits/chosen: -2.2438
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.6918 | 0.26 | 100 | 0.6936 | -0.0007 | -0.0002 | 0.5079 | -0.0006 | -37.5174 | -34.0382 | -2.2392 | -2.2441 |
67
- | 0.6908 | 0.52 | 200 | 0.6932 | 0.0034 | 0.0031 | 0.5112 | 0.0002 | -37.5009 | -34.0177 | -2.2392 | -2.2440 |
68
- | 0.6895 | 0.78 | 300 | 0.6937 | 0.0015 | 0.0022 | 0.5050 | -0.0007 | -37.5058 | -34.0273 | -2.2390 | -2.2438 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.6949
21
+ - Rewards/chosen: -0.0012
22
+ - Rewards/rejected: 0.0013
23
+ - Rewards/accuracies: 0.5162
24
+ - Rewards/margins: -0.0026
25
+ - Logps/rejected: -37.5122
26
+ - Logps/chosen: -34.0387
27
+ - Logits/rejected: -2.2399
28
+ - Logits/chosen: -2.2448
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.6886 | 0.26 | 100 | 0.6950 | 0.0021 | 0.0049 | 0.4817 | -0.0028 | -37.5001 | -34.0275 | -2.2393 | -2.2442 |
64
+ | 0.6909 | 0.52 | 200 | 0.6952 | 0.0005 | 0.0038 | 0.5191 | -0.0033 | -37.5039 | -34.0328 | -2.2391 | -2.2439 |
65
+ | 0.6869 | 0.78 | 300 | 0.6949 | -0.0012 | 0.0013 | 0.5162 | -0.0026 | -37.5122 | -34.0387 | -2.2399 | -2.2448 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a430fc5bf78793bfed9f2f01cf0c273a5b0f4e1551593252cae780aeb18a1984
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1c41272bfc20de1787ca81e4ccbee9baeb8b772de90ef90ec9ab66107b6cf9f
3
  size 176183216
all_results.json CHANGED
@@ -1,20 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.243804693222046,
4
- "eval_logits/rejected": -2.238922119140625,
5
- "eval_logps/chosen": -34.02475357055664,
6
- "eval_logps/rejected": -37.51246643066406,
7
- "eval_loss": 0.6927995681762695,
8
- "eval_rewards/accuracies": 0.5099667906761169,
9
- "eval_rewards/chosen": 0.0019583876710385084,
10
- "eval_rewards/margins": 0.0011271554976701736,
11
- "eval_rewards/rejected": 0.0008312325226143003,
12
- "eval_runtime": 145.6648,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.355,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.6919509800997647,
17
- "train_runtime": 3253.2141,
18
  "train_samples": 3079,
19
  "train_samples_per_second": 0.946,
20
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.691356255791404,
4
+ "train_runtime": 3254.7157,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.6919509800997647,
4
- "train_runtime": 3253.2141,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.691356255791404,
4
+ "train_runtime": 3254.7157,
5
  "train_samples": 3079,
6
  "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
trainer_state.json CHANGED
@@ -25,589 +25,589 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
- "logits/chosen": -1.8659842014312744,
29
- "logits/rejected": -1.8702926635742188,
30
- "logps/chosen": -36.97752380371094,
31
- "logps/rejected": -33.65974426269531,
32
- "loss": 0.6875,
33
- "rewards/accuracies": 0.5277777910232544,
34
- "rewards/chosen": 0.005815046839416027,
35
- "rewards/margins": 0.011749334633350372,
36
- "rewards/rejected": -0.005934285931289196,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
- "logits/chosen": -1.997287392616272,
43
- "logits/rejected": -1.9999300241470337,
44
- "logps/chosen": -29.62233543395996,
45
- "logps/rejected": -29.058269500732422,
46
- "loss": 0.6918,
47
- "rewards/accuracies": 0.550000011920929,
48
- "rewards/chosen": 0.003971663769334555,
49
- "rewards/margins": 0.002936883131042123,
50
- "rewards/rejected": 0.0010347809875383973,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
- "logits/chosen": -1.9199504852294922,
57
- "logits/rejected": -1.917252779006958,
58
- "logps/chosen": -31.414203643798828,
59
- "logps/rejected": -33.21183395385742,
60
- "loss": 0.6938,
61
- "rewards/accuracies": 0.5,
62
- "rewards/chosen": 0.0003786637098528445,
63
- "rewards/margins": -0.000946315354667604,
64
- "rewards/rejected": 0.001324978657066822,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
- "logits/chosen": -2.0170738697052,
71
- "logits/rejected": -2.008314847946167,
72
- "logps/chosen": -32.582008361816406,
73
- "logps/rejected": -32.49999237060547,
74
- "loss": 0.6951,
75
- "rewards/accuracies": 0.4625000059604645,
76
- "rewards/chosen": -0.0010401441249996424,
77
- "rewards/margins": -0.003575131995603442,
78
- "rewards/rejected": 0.0025349881034344435,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
- "logits/chosen": -1.8644828796386719,
85
- "logits/rejected": -1.8536993265151978,
86
- "logps/chosen": -33.572994232177734,
87
- "logps/rejected": -35.421836853027344,
88
- "loss": 0.6979,
89
- "rewards/accuracies": 0.4000000059604645,
90
- "rewards/chosen": -0.0031035186257213354,
91
- "rewards/margins": -0.009205068461596966,
92
- "rewards/rejected": 0.006101551000028849,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
- "logits/chosen": -1.9452167749404907,
99
- "logits/rejected": -1.9471642971038818,
100
- "logps/chosen": -32.57027816772461,
101
- "logps/rejected": -33.18465805053711,
102
- "loss": 0.689,
103
- "rewards/accuracies": 0.550000011920929,
104
- "rewards/chosen": 0.0059812054969370365,
105
- "rewards/margins": 0.008636695332825184,
106
- "rewards/rejected": -0.0026554896030575037,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
- "logits/chosen": -2.0795772075653076,
113
- "logits/rejected": -2.0845541954040527,
114
- "logps/chosen": -33.98872375488281,
115
- "logps/rejected": -36.58330535888672,
116
- "loss": 0.692,
117
- "rewards/accuracies": 0.4749999940395355,
118
- "rewards/chosen": -4.708058986579999e-05,
119
- "rewards/margins": 0.002839430468156934,
120
- "rewards/rejected": -0.002886510454118252,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
- "logits/chosen": -1.942291498184204,
127
- "logits/rejected": -1.945452094078064,
128
- "logps/chosen": -34.40483474731445,
129
- "logps/rejected": -34.58934020996094,
130
- "loss": 0.6914,
131
- "rewards/accuracies": 0.5375000238418579,
132
- "rewards/chosen": 0.004595070146024227,
133
- "rewards/margins": 0.003981433808803558,
134
- "rewards/rejected": 0.0006136370939202607,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
- "logits/chosen": -1.950508713722229,
141
- "logits/rejected": -1.9550243616104126,
142
- "logps/chosen": -32.44390106201172,
143
- "logps/rejected": -32.375,
144
- "loss": 0.6893,
145
- "rewards/accuracies": 0.625,
146
- "rewards/chosen": 0.004703683778643608,
147
- "rewards/margins": 0.007921957410871983,
148
- "rewards/rejected": -0.003218273166567087,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
- "logits/chosen": -2.048755645751953,
155
- "logits/rejected": -2.046764850616455,
156
- "logps/chosen": -32.22840118408203,
157
- "logps/rejected": -31.269317626953125,
158
- "loss": 0.6918,
159
- "rewards/accuracies": 0.512499988079071,
160
- "rewards/chosen": 0.0024873518850654364,
161
- "rewards/margins": 0.0031086006201803684,
162
- "rewards/rejected": -0.0006212486186996102,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.2440829277038574,
168
- "eval_logits/rejected": -2.2392072677612305,
169
- "eval_logps/chosen": -34.038169860839844,
170
- "eval_logps/rejected": -37.51740264892578,
171
- "eval_loss": 0.6936388611793518,
172
- "eval_rewards/accuracies": 0.5078904032707214,
173
- "eval_rewards/chosen": -0.0007231775089167058,
174
- "eval_rewards/margins": -0.0005666659562848508,
175
- "eval_rewards/rejected": -0.00015651114517822862,
176
- "eval_runtime": 146.0083,
177
  "eval_samples_per_second": 2.349,
178
- "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
- "logits/chosen": -2.0054333209991455,
185
- "logits/rejected": -2.0030055046081543,
186
- "logps/chosen": -33.228519439697266,
187
- "logps/rejected": -34.01723098754883,
188
- "loss": 0.693,
189
- "rewards/accuracies": 0.5,
190
- "rewards/chosen": 0.003059562761336565,
191
- "rewards/margins": 0.0006311066681519151,
192
- "rewards/rejected": 0.002428455278277397,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
- "logits/chosen": -2.0166068077087402,
199
- "logits/rejected": -2.0082287788391113,
200
- "logps/chosen": -32.45368194580078,
201
- "logps/rejected": -32.192543029785156,
202
- "loss": 0.6911,
203
- "rewards/accuracies": 0.574999988079071,
204
- "rewards/chosen": -0.0017549397889524698,
205
- "rewards/margins": 0.004452199675142765,
206
- "rewards/rejected": -0.006207138299942017,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
- "logits/chosen": -2.0461902618408203,
213
- "logits/rejected": -2.038154125213623,
214
- "logps/chosen": -30.499649047851562,
215
- "logps/rejected": -32.04579544067383,
216
- "loss": 0.6965,
217
- "rewards/accuracies": 0.48750001192092896,
218
- "rewards/chosen": -0.004786232020705938,
219
- "rewards/margins": -0.00622799526900053,
220
- "rewards/rejected": 0.0014417637139558792,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
- "logits/chosen": -1.976766586303711,
227
- "logits/rejected": -1.9870338439941406,
228
- "logps/chosen": -31.403955459594727,
229
- "logps/rejected": -32.556556701660156,
230
- "loss": 0.6896,
231
- "rewards/accuracies": 0.5874999761581421,
232
- "rewards/chosen": 0.0031408462673425674,
233
- "rewards/margins": 0.007476066239178181,
234
- "rewards/rejected": -0.004335219506174326,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
- "logits/chosen": -1.8904304504394531,
241
- "logits/rejected": -1.8915287256240845,
242
- "logps/chosen": -34.17884063720703,
243
- "logps/rejected": -34.76021957397461,
244
- "loss": 0.6917,
245
- "rewards/accuracies": 0.5249999761581421,
246
- "rewards/chosen": 0.0027701070066541433,
247
- "rewards/margins": 0.0033626866061240435,
248
- "rewards/rejected": -0.0005925801815465093,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
- "logits/chosen": -1.942710518836975,
255
- "logits/rejected": -1.939239263534546,
256
- "logps/chosen": -36.136741638183594,
257
- "logps/rejected": -32.73341751098633,
258
- "loss": 0.6888,
259
- "rewards/accuracies": 0.5874999761581421,
260
- "rewards/chosen": 0.006986373569816351,
261
- "rewards/margins": 0.009119498543441296,
262
- "rewards/rejected": -0.0021331266034394503,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
- "logits/chosen": -2.0422637462615967,
269
- "logits/rejected": -2.0348782539367676,
270
- "logps/chosen": -33.789520263671875,
271
- "logps/rejected": -31.35744285583496,
272
- "loss": 0.6919,
273
  "rewards/accuracies": 0.5249999761581421,
274
- "rewards/chosen": 0.001668277895078063,
275
- "rewards/margins": 0.002786512253805995,
276
- "rewards/rejected": -0.001118234358727932,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
- "logits/chosen": -2.0476248264312744,
283
- "logits/rejected": -2.052889347076416,
284
- "logps/chosen": -32.531455993652344,
285
- "logps/rejected": -32.49399185180664,
286
- "loss": 0.6911,
287
- "rewards/accuracies": 0.5,
288
- "rewards/chosen": 0.0026666896883398294,
289
- "rewards/margins": 0.0044365981593728065,
290
- "rewards/rejected": -0.0017699094023555517,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
- "logits/chosen": -2.048638105392456,
297
- "logits/rejected": -2.0458552837371826,
298
- "logps/chosen": -31.49679946899414,
299
- "logps/rejected": -31.326080322265625,
300
- "loss": 0.6933,
301
- "rewards/accuracies": 0.550000011920929,
302
- "rewards/chosen": -0.0015687488485127687,
303
- "rewards/margins": 4.425132829055656e-06,
304
- "rewards/rejected": -0.0015731739113107324,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
- "logits/chosen": -1.9184449911117554,
311
- "logits/rejected": -1.9231094121932983,
312
- "logps/chosen": -31.588008880615234,
313
- "logps/rejected": -32.797332763671875,
314
- "loss": 0.6908,
315
- "rewards/accuracies": 0.550000011920929,
316
- "rewards/chosen": 0.0036545295733958483,
317
- "rewards/margins": 0.0049341581761837006,
318
- "rewards/rejected": -0.001279628137126565,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.2440476417541504,
324
- "eval_logits/rejected": -2.239173650741577,
325
- "eval_logps/chosen": -34.017669677734375,
326
- "eval_logps/rejected": -37.50092697143555,
327
- "eval_loss": 0.6932498216629028,
328
- "eval_rewards/accuracies": 0.5112126469612122,
329
- "eval_rewards/chosen": 0.003376541193574667,
330
- "eval_rewards/margins": 0.00023789344413671643,
331
- "eval_rewards/rejected": 0.0031386471819132566,
332
- "eval_runtime": 145.8713,
333
- "eval_samples_per_second": 2.351,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
- "logits/chosen": -2.0318806171417236,
341
- "logits/rejected": -2.0425620079040527,
342
- "logps/chosen": -31.950469970703125,
343
- "logps/rejected": -33.88362503051758,
344
- "loss": 0.6892,
345
- "rewards/accuracies": 0.574999988079071,
346
- "rewards/chosen": 0.004167118109762669,
347
- "rewards/margins": 0.008351770229637623,
348
- "rewards/rejected": -0.00418465118855238,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
- "logits/chosen": -1.9253804683685303,
355
- "logits/rejected": -1.9402259588241577,
356
- "logps/chosen": -30.09176254272461,
357
- "logps/rejected": -31.568744659423828,
358
- "loss": 0.6903,
359
- "rewards/accuracies": 0.5625,
360
- "rewards/chosen": 0.003955656662583351,
361
- "rewards/margins": 0.006068618968129158,
362
- "rewards/rejected": -0.002112963469699025,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
- "logits/chosen": -1.983178734779358,
369
- "logits/rejected": -1.987159013748169,
370
- "logps/chosen": -33.3923454284668,
371
- "logps/rejected": -31.549015045166016,
372
- "loss": 0.6909,
373
- "rewards/accuracies": 0.574999988079071,
374
- "rewards/chosen": 0.00502450205385685,
375
- "rewards/margins": 0.004966902546584606,
376
- "rewards/rejected": 5.75995072722435e-05,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
- "logits/chosen": -1.982914686203003,
383
- "logits/rejected": -1.960942268371582,
384
- "logps/chosen": -34.15560531616211,
385
- "logps/rejected": -34.942718505859375,
386
- "loss": 0.6943,
387
  "rewards/accuracies": 0.5,
388
- "rewards/chosen": -0.0003707931318785995,
389
- "rewards/margins": -0.001986075658351183,
390
- "rewards/rejected": 0.0016152828466147184,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
- "logits/chosen": -2.024036407470703,
397
- "logits/rejected": -2.0207295417785645,
398
- "logps/chosen": -32.916961669921875,
399
- "logps/rejected": -36.2316780090332,
400
- "loss": 0.6919,
401
- "rewards/accuracies": 0.48750001192092896,
402
- "rewards/chosen": 0.0016211355105042458,
403
- "rewards/margins": 0.00297213951125741,
404
- "rewards/rejected": -0.0013510034186765552,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
- "logits/chosen": -1.891046166419983,
411
- "logits/rejected": -1.888594627380371,
412
- "logps/chosen": -34.18373489379883,
413
- "logps/rejected": -35.52981948852539,
414
- "loss": 0.6904,
415
  "rewards/accuracies": 0.5874999761581421,
416
- "rewards/chosen": 0.0009327814914286137,
417
- "rewards/margins": 0.005972124636173248,
418
- "rewards/rejected": -0.005039343610405922,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
- "logits/chosen": -1.876119613647461,
425
- "logits/rejected": -1.8735746145248413,
426
- "logps/chosen": -34.3980827331543,
427
- "logps/rejected": -31.744619369506836,
428
- "loss": 0.696,
429
- "rewards/accuracies": 0.4625000059604645,
430
- "rewards/chosen": -9.752502955961972e-05,
431
- "rewards/margins": -0.0054392144083976746,
432
- "rewards/rejected": 0.0053416891023516655,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
- "logits/chosen": -1.9795544147491455,
439
- "logits/rejected": -1.9689334630966187,
440
- "logps/chosen": -35.3333854675293,
441
- "logps/rejected": -31.85133934020996,
442
- "loss": 0.69,
443
- "rewards/accuracies": 0.550000011920929,
444
- "rewards/chosen": 0.004650552291423082,
445
- "rewards/margins": 0.006719636730849743,
446
- "rewards/rejected": -0.002069085370749235,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
- "logits/chosen": -2.075632333755493,
453
- "logits/rejected": -2.0606019496917725,
454
- "logps/chosen": -30.9345645904541,
455
- "logps/rejected": -32.642818450927734,
456
- "loss": 0.6952,
457
- "rewards/accuracies": 0.4375,
458
- "rewards/chosen": -0.0020882338285446167,
459
- "rewards/margins": -0.0038910664152354,
460
- "rewards/rejected": 0.0018028330523520708,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
- "logits/chosen": -1.9465770721435547,
467
- "logits/rejected": -1.9440364837646484,
468
- "logps/chosen": -32.8905143737793,
469
- "logps/rejected": -30.818241119384766,
470
- "loss": 0.6895,
471
- "rewards/accuracies": 0.574999988079071,
472
- "rewards/chosen": 0.007319308817386627,
473
- "rewards/margins": 0.007605412509292364,
474
- "rewards/rejected": -0.00028610360459424555,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.2438313961029053,
480
- "eval_logits/rejected": -2.2389516830444336,
481
- "eval_logps/chosen": -34.027259826660156,
482
- "eval_logps/rejected": -37.505775451660156,
483
- "eval_loss": 0.6937180161476135,
484
- "eval_rewards/accuracies": 0.5049834251403809,
485
- "eval_rewards/chosen": 0.0014584256568923593,
486
- "eval_rewards/margins": -0.0007105680997483432,
487
- "eval_rewards/rejected": 0.0021689936984330416,
488
- "eval_runtime": 145.7574,
489
- "eval_samples_per_second": 2.353,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
- "logits/chosen": -1.9287267923355103,
497
- "logits/rejected": -1.9254738092422485,
498
- "logps/chosen": -31.579418182373047,
499
- "logps/rejected": -33.74348068237305,
500
- "loss": 0.6908,
501
- "rewards/accuracies": 0.625,
502
- "rewards/chosen": 0.0047865137457847595,
503
- "rewards/margins": 0.005052282474935055,
504
- "rewards/rejected": -0.0002657676232047379,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
- "logits/chosen": -1.9805068969726562,
511
- "logits/rejected": -1.9681942462921143,
512
- "logps/chosen": -34.57032012939453,
513
- "logps/rejected": -33.55123519897461,
514
- "loss": 0.6895,
515
- "rewards/accuracies": 0.48750001192092896,
516
- "rewards/chosen": 0.005150976125150919,
517
- "rewards/margins": 0.007629568222910166,
518
- "rewards/rejected": -0.0024785916320979595,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
- "logits/chosen": -2.0162813663482666,
525
- "logits/rejected": -2.0148308277130127,
526
- "logps/chosen": -33.48984909057617,
527
- "logps/rejected": -32.48566818237305,
528
- "loss": 0.6931,
529
  "rewards/accuracies": 0.512499988079071,
530
- "rewards/chosen": -0.0008109404589049518,
531
- "rewards/margins": 0.0004063365049660206,
532
- "rewards/rejected": -0.001217277254909277,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
- "logits/chosen": -2.102883815765381,
539
- "logits/rejected": -2.087068796157837,
540
- "logps/chosen": -34.1587028503418,
541
- "logps/rejected": -33.081783294677734,
542
- "loss": 0.6949,
543
- "rewards/accuracies": 0.4749999940395355,
544
- "rewards/chosen": 0.0041370633989572525,
545
- "rewards/margins": -0.003113438840955496,
546
- "rewards/rejected": 0.007250501308590174,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
- "logits/chosen": -1.9753484725952148,
553
- "logits/rejected": -1.9743890762329102,
554
- "logps/chosen": -33.23331832885742,
555
- "logps/rejected": -32.45554733276367,
556
- "loss": 0.6895,
557
  "rewards/accuracies": 0.5,
558
- "rewards/chosen": 0.009755617938935757,
559
- "rewards/margins": 0.007835443131625652,
560
- "rewards/rejected": 0.0019201741088181734,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
- "logits/chosen": -1.9319757223129272,
567
- "logits/rejected": -1.9423389434814453,
568
- "logps/chosen": -32.21473693847656,
569
- "logps/rejected": -35.285762786865234,
570
- "loss": 0.6947,
571
- "rewards/accuracies": 0.44999998807907104,
572
- "rewards/chosen": -0.0020089317113161087,
573
- "rewards/margins": -0.0026077935472130775,
574
- "rewards/rejected": 0.0005988621269352734,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
- "logits/chosen": -2.070089340209961,
581
- "logits/rejected": -2.0635385513305664,
582
- "logps/chosen": -33.64582061767578,
583
- "logps/rejected": -29.22507095336914,
584
- "loss": 0.6929,
585
- "rewards/accuracies": 0.4749999940395355,
586
- "rewards/chosen": 0.0012301013339310884,
587
- "rewards/margins": 0.0009590888512320817,
588
- "rewards/rejected": 0.00027101286104880273,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
- "logits/chosen": -1.929181456565857,
595
- "logits/rejected": -1.9313499927520752,
596
- "logps/chosen": -34.237857818603516,
597
- "logps/rejected": -30.90460777282715,
598
- "loss": 0.69,
599
- "rewards/accuracies": 0.612500011920929,
600
- "rewards/chosen": 0.00023896321363281459,
601
- "rewards/margins": 0.006635263562202454,
602
- "rewards/rejected": -0.006396301090717316,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.6919509800997647,
610
- "train_runtime": 3253.2141,
611
  "train_samples_per_second": 0.946,
612
  "train_steps_per_second": 0.118
613
  }
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
+ "logits/chosen": -1.866300106048584,
29
+ "logits/rejected": -1.870603084564209,
30
+ "logps/chosen": -36.973060607910156,
31
+ "logps/rejected": -33.65780258178711,
32
+ "loss": 0.6843,
33
+ "rewards/accuracies": 0.5138888955116272,
34
+ "rewards/chosen": 0.010061310604214668,
35
+ "rewards/margins": 0.018380172550678253,
36
+ "rewards/rejected": -0.008318860083818436,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
+ "logits/chosen": -1.9976227283477783,
43
+ "logits/rejected": -2.0002670288085938,
44
+ "logps/chosen": -29.652048110961914,
45
+ "logps/rejected": -29.051151275634766,
46
+ "loss": 0.6969,
47
+ "rewards/accuracies": 0.375,
48
+ "rewards/chosen": -0.0029565312433987856,
49
+ "rewards/margins": -0.006644421722739935,
50
+ "rewards/rejected": 0.003687891410663724,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
+ "logits/chosen": -1.9202871322631836,
57
+ "logits/rejected": -1.9175989627838135,
58
+ "logps/chosen": -31.403018951416016,
59
+ "logps/rejected": -33.22191619873047,
60
+ "loss": 0.6911,
61
+ "rewards/accuracies": 0.5625,
62
+ "rewards/chosen": 0.0039227609522640705,
63
+ "rewards/margins": 0.004958181641995907,
64
+ "rewards/rejected": -0.0010354205733165145,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
+ "logits/chosen": -2.016842842102051,
71
+ "logits/rejected": -2.008082151412964,
72
+ "logps/chosen": -32.58789825439453,
73
+ "logps/rejected": -32.494510650634766,
74
+ "loss": 0.6978,
75
+ "rewards/accuracies": 0.44999998807907104,
76
+ "rewards/chosen": -0.003327915444970131,
77
+ "rewards/margins": -0.008775560185313225,
78
+ "rewards/rejected": 0.005447645206004381,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
+ "logits/chosen": -1.8639650344848633,
85
+ "logits/rejected": -1.8531978130340576,
86
+ "logps/chosen": -33.55598831176758,
87
+ "logps/rejected": -35.437034606933594,
88
+ "loss": 0.6956,
89
+ "rewards/accuracies": 0.4375,
90
+ "rewards/chosen": 0.0004471995052881539,
91
+ "rewards/margins": -0.004145150538533926,
92
+ "rewards/rejected": 0.00459235068410635,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
+ "logits/chosen": -1.9451839923858643,
99
+ "logits/rejected": -1.94712233543396,
100
+ "logps/chosen": -32.599273681640625,
101
+ "logps/rejected": -33.205684661865234,
102
+ "loss": 0.6882,
103
+ "rewards/accuracies": 0.574999988079071,
104
+ "rewards/chosen": 0.000273245939752087,
105
+ "rewards/margins": 0.010566400364041328,
106
+ "rewards/rejected": -0.010293153114616871,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
+ "logits/chosen": -2.0796267986297607,
113
+ "logits/rejected": -2.0845961570739746,
114
+ "logps/chosen": -33.98247528076172,
115
+ "logps/rejected": -36.595970153808594,
116
+ "loss": 0.6887,
117
+ "rewards/accuracies": 0.5874999761581421,
118
+ "rewards/chosen": 0.0018027916084975004,
119
+ "rewards/margins": 0.009932766668498516,
120
+ "rewards/rejected": -0.008129975758492947,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
+ "logits/chosen": -1.9422763586044312,
127
+ "logits/rejected": -1.9454386234283447,
128
+ "logps/chosen": -34.4161491394043,
129
+ "logps/rejected": -34.59540939331055,
130
+ "loss": 0.6915,
131
+ "rewards/accuracies": 0.512499988079071,
132
+ "rewards/chosen": 0.0034980489872395992,
133
+ "rewards/margins": 0.004399437457323074,
134
+ "rewards/rejected": -0.0009013883536681533,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
+ "logits/chosen": -1.950871467590332,
141
+ "logits/rejected": -1.9553781747817993,
142
+ "logps/chosen": -32.455718994140625,
143
+ "logps/rejected": -32.37923812866211,
144
+ "loss": 0.6888,
145
+ "rewards/accuracies": 0.5625,
146
+ "rewards/chosen": 0.003510843263939023,
147
+ "rewards/margins": 0.009610405191779137,
148
+ "rewards/rejected": -0.00609956169500947,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
+ "logits/chosen": -2.0493884086608887,
155
+ "logits/rejected": -2.0473878383636475,
156
+ "logps/chosen": -32.24263381958008,
157
+ "logps/rejected": -31.301403045654297,
158
+ "loss": 0.6886,
159
+ "rewards/accuracies": 0.4625000059604645,
160
+ "rewards/chosen": -0.0005389736033976078,
161
+ "rewards/margins": 0.010018542408943176,
162
+ "rewards/rejected": -0.010557514615356922,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.244236707687378,
168
+ "eval_logits/rejected": -2.2393476963043213,
169
+ "eval_logps/chosen": -34.027488708496094,
170
+ "eval_logps/rejected": -37.500125885009766,
171
+ "eval_loss": 0.6949825882911682,
172
+ "eval_rewards/accuracies": 0.48172760009765625,
173
+ "eval_rewards/chosen": 0.0021188759710639715,
174
+ "eval_rewards/margins": -0.0028304157312959433,
175
+ "eval_rewards/rejected": 0.0049492912366986275,
176
+ "eval_runtime": 146.0369,
177
  "eval_samples_per_second": 2.349,
178
+ "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
+ "logits/chosen": -2.005858898162842,
185
+ "logits/rejected": -2.0034613609313965,
186
+ "logps/chosen": -33.22675323486328,
187
+ "logps/rejected": -34.024375915527344,
188
+ "loss": 0.6918,
189
+ "rewards/accuracies": 0.44999998807907104,
190
+ "rewards/chosen": 0.0051184119656682014,
191
+ "rewards/margins": 0.0036206122022122145,
192
+ "rewards/rejected": 0.0014977991813793778,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
+ "logits/chosen": -2.0165371894836426,
199
+ "logits/rejected": -2.0081915855407715,
200
+ "logps/chosen": -32.43837356567383,
201
+ "logps/rejected": -32.1854362487793,
202
+ "loss": 0.689,
203
+ "rewards/accuracies": 0.512499988079071,
204
+ "rewards/chosen": 0.0019600242376327515,
205
+ "rewards/margins": 0.009139470756053925,
206
+ "rewards/rejected": -0.0071794455870985985,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
+ "logits/chosen": -2.046520709991455,
213
+ "logits/rejected": -2.0384721755981445,
214
+ "logps/chosen": -30.485082626342773,
215
+ "logps/rejected": -32.0345344543457,
216
+ "loss": 0.6976,
217
+ "rewards/accuracies": 0.38749998807907104,
218
+ "rewards/chosen": -0.002810357604175806,
219
+ "rewards/margins": -0.008352747187018394,
220
+ "rewards/rejected": 0.005542389117181301,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
+ "logits/chosen": -1.976843237876892,
227
+ "logits/rejected": -1.9871089458465576,
228
+ "logps/chosen": -31.4160099029541,
229
+ "logps/rejected": -32.535850524902344,
230
+ "loss": 0.6929,
231
+ "rewards/accuracies": 0.5249999761581421,
232
+ "rewards/chosen": 0.0010961029911413789,
233
+ "rewards/margins": 0.0013871133560314775,
234
+ "rewards/rejected": -0.0002910103357862681,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
+ "logits/chosen": -1.890434980392456,
241
+ "logits/rejected": -1.8915313482284546,
242
+ "logps/chosen": -34.165489196777344,
243
+ "logps/rejected": -34.74839782714844,
244
+ "loss": 0.691,
245
+ "rewards/accuracies": 0.5375000238418579,
246
+ "rewards/chosen": 0.008160250261425972,
247
+ "rewards/margins": 0.005502746906131506,
248
+ "rewards/rejected": 0.0026575028896331787,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
+ "logits/chosen": -1.942751169204712,
255
+ "logits/rejected": -1.9392738342285156,
256
+ "logps/chosen": -36.15436553955078,
257
+ "logps/rejected": -32.734371185302734,
258
+ "loss": 0.6893,
259
+ "rewards/accuracies": 0.512499988079071,
260
+ "rewards/chosen": 0.005194104742258787,
261
+ "rewards/margins": 0.008680562488734722,
262
+ "rewards/rejected": -0.0034864575136452913,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
+ "logits/chosen": -2.042160749435425,
269
+ "logits/rejected": -2.0347483158111572,
270
+ "logps/chosen": -33.785484313964844,
271
+ "logps/rejected": -31.344274520874023,
272
+ "loss": 0.6928,
273
  "rewards/accuracies": 0.5249999761581421,
274
+ "rewards/chosen": 0.003714872058480978,
275
+ "rewards/margins": 0.0014427527785301208,
276
+ "rewards/rejected": 0.002272119279950857,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
+ "logits/chosen": -2.0473780632019043,
283
+ "logits/rejected": -2.0526490211486816,
284
+ "logps/chosen": -32.52016830444336,
285
+ "logps/rejected": -32.495323181152344,
286
+ "loss": 0.6883,
287
+ "rewards/accuracies": 0.574999988079071,
288
+ "rewards/chosen": 0.007385374512523413,
289
+ "rewards/margins": 0.010440578684210777,
290
+ "rewards/rejected": -0.0030552041716873646,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
+ "logits/chosen": -2.0484580993652344,
297
+ "logits/rejected": -2.045682191848755,
298
+ "logps/chosen": -31.508682250976562,
299
+ "logps/rejected": -31.3348388671875,
300
+ "loss": 0.694,
301
+ "rewards/accuracies": 0.4749999940395355,
302
+ "rewards/chosen": -0.005918826907873154,
303
+ "rewards/margins": -0.0009324293350800872,
304
+ "rewards/rejected": -0.004986398387700319,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
+ "logits/chosen": -1.9187612533569336,
311
+ "logits/rejected": -1.9234310388565063,
312
+ "logps/chosen": -31.59661865234375,
313
+ "logps/rejected": -32.798248291015625,
314
+ "loss": 0.6909,
315
+ "rewards/accuracies": 0.574999988079071,
316
+ "rewards/chosen": 0.002899137092754245,
317
+ "rewards/margins": 0.00509423715993762,
318
+ "rewards/rejected": -0.002195100300014019,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.2439279556274414,
324
+ "eval_logits/rejected": -2.239055633544922,
325
+ "eval_logps/chosen": -34.03278350830078,
326
+ "eval_logps/rejected": -37.50389099121094,
327
+ "eval_loss": 0.6952112317085266,
328
+ "eval_rewards/accuracies": 0.5191029906272888,
329
+ "eval_rewards/chosen": 0.0005306191742420197,
330
+ "eval_rewards/margins": -0.0032888990826904774,
331
+ "eval_rewards/rejected": 0.0038195180241018534,
332
+ "eval_runtime": 145.8142,
333
+ "eval_samples_per_second": 2.352,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
+ "logits/chosen": -2.0318386554718018,
341
+ "logits/rejected": -2.0425145626068115,
342
+ "logps/chosen": -31.961750030517578,
343
+ "logps/rejected": -33.86240005493164,
344
+ "loss": 0.6923,
345
+ "rewards/accuracies": 0.5249999761581421,
346
+ "rewards/chosen": 0.0028654958587139845,
347
+ "rewards/margins": 0.002774373395368457,
348
+ "rewards/rejected": 9.112273983191699e-05,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
+ "logits/chosen": -1.9253981113433838,
355
+ "logits/rejected": -1.9402506351470947,
356
+ "logps/chosen": -30.093517303466797,
357
+ "logps/rejected": -31.571548461914062,
358
+ "loss": 0.6889,
359
+ "rewards/accuracies": 0.574999988079071,
360
+ "rewards/chosen": 0.005405673757195473,
361
+ "rewards/margins": 0.009417025372385979,
362
+ "rewards/rejected": -0.004011353012174368,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
+ "logits/chosen": -1.9839330911636353,
369
+ "logits/rejected": -1.9879001379013062,
370
+ "logps/chosen": -33.4061279296875,
371
+ "logps/rejected": -31.564762115478516,
372
+ "loss": 0.6896,
373
+ "rewards/accuracies": 0.550000011920929,
374
+ "rewards/chosen": 0.0034024070482701063,
375
+ "rewards/margins": 0.008040683344006538,
376
+ "rewards/rejected": -0.004638276062905788,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
+ "logits/chosen": -1.983189344406128,
383
+ "logits/rejected": -1.9612159729003906,
384
+ "logps/chosen": -34.15366744995117,
385
+ "logps/rejected": -34.95070266723633,
386
+ "loss": 0.6937,
387
  "rewards/accuracies": 0.5,
388
+ "rewards/chosen": 2.4808850866975263e-05,
389
+ "rewards/margins": -1.6275793086606427e-06,
390
+ "rewards/rejected": 2.643577681737952e-05,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
+ "logits/chosen": -2.0243947505950928,
397
+ "logits/rejected": -2.021087169647217,
398
+ "logps/chosen": -32.91602325439453,
399
+ "logps/rejected": -36.221763610839844,
400
+ "loss": 0.6928,
401
+ "rewards/accuracies": 0.4375,
402
+ "rewards/chosen": 0.002715445589274168,
403
+ "rewards/margins": 0.001767707639373839,
404
+ "rewards/rejected": 0.0009477367857471108,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
+ "logits/chosen": -1.8911654949188232,
411
+ "logits/rejected": -1.8887317180633545,
412
+ "logps/chosen": -34.18123245239258,
413
+ "logps/rejected": -35.515174865722656,
414
+ "loss": 0.691,
415
  "rewards/accuracies": 0.5874999761581421,
416
+ "rewards/chosen": 0.0021498873829841614,
417
+ "rewards/margins": 0.005315869115293026,
418
+ "rewards/rejected": -0.0031659831292927265,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
+ "logits/chosen": -1.875580072402954,
425
+ "logits/rejected": -1.8730493783950806,
426
+ "logps/chosen": -34.37896728515625,
427
+ "logps/rejected": -31.76068687438965,
428
+ "loss": 0.6925,
429
+ "rewards/accuracies": 0.5249999761581421,
430
+ "rewards/chosen": 0.005589136388152838,
431
+ "rewards/margins": 0.0023957251105457544,
432
+ "rewards/rejected": 0.0031934112776070833,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
+ "logits/chosen": -1.9798663854599,
439
+ "logits/rejected": -1.9692500829696655,
440
+ "logps/chosen": -35.32219696044922,
441
+ "logps/rejected": -31.857141494750977,
442
+ "loss": 0.6861,
443
+ "rewards/accuracies": 0.5625,
444
+ "rewards/chosen": 0.010332418605685234,
445
+ "rewards/margins": 0.015176350250840187,
446
+ "rewards/rejected": -0.004843929782509804,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
+ "logits/chosen": -2.076137065887451,
453
+ "logits/rejected": -2.061100721359253,
454
+ "logps/chosen": -30.907434463500977,
455
+ "logps/rejected": -32.638729095458984,
456
+ "loss": 0.693,
457
+ "rewards/accuracies": 0.4749999940395355,
458
+ "rewards/chosen": 0.00500696524977684,
459
+ "rewards/margins": 0.0010755129624158144,
460
+ "rewards/rejected": 0.0039314525201916695,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
+ "logits/chosen": -1.947422981262207,
467
+ "logits/rejected": -1.9448877573013306,
468
+ "logps/chosen": -32.89393997192383,
469
+ "logps/rejected": -30.828500747680664,
470
+ "loss": 0.6869,
471
+ "rewards/accuracies": 0.5375000238418579,
472
+ "rewards/chosen": 0.009951007552444935,
473
+ "rewards/margins": 0.013458291999995708,
474
+ "rewards/rejected": -0.003507285611703992,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.244764566421509,
480
+ "eval_logits/rejected": -2.2398788928985596,
481
+ "eval_logps/chosen": -34.038692474365234,
482
+ "eval_logps/rejected": -37.512168884277344,
483
+ "eval_loss": 0.6949278712272644,
484
+ "eval_rewards/accuracies": 0.5161960124969482,
485
+ "eval_rewards/chosen": -0.0012418677797541022,
486
+ "eval_rewards/margins": -0.0025783225428313017,
487
+ "eval_rewards/rejected": 0.0013364545302465558,
488
+ "eval_runtime": 145.8034,
489
+ "eval_samples_per_second": 2.352,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
+ "logits/chosen": -1.9299108982086182,
497
+ "logits/rejected": -1.9266481399536133,
498
+ "logps/chosen": -31.572988510131836,
499
+ "logps/rejected": -33.727210998535156,
500
+ "loss": 0.6912,
501
+ "rewards/accuracies": 0.5249999761581421,
502
+ "rewards/chosen": 0.009108476340770721,
503
+ "rewards/margins": 0.004626707639545202,
504
+ "rewards/rejected": 0.0044817691668868065,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
+ "logits/chosen": -1.9814649820327759,
511
+ "logits/rejected": -1.9691375494003296,
512
+ "logps/chosen": -34.551979064941406,
513
+ "logps/rejected": -33.541648864746094,
514
+ "loss": 0.6866,
515
+ "rewards/accuracies": 0.550000011920929,
516
+ "rewards/chosen": 0.013229632750153542,
517
+ "rewards/margins": 0.014072793535888195,
518
+ "rewards/rejected": -0.0008431615424342453,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
+ "logits/chosen": -2.017097234725952,
525
+ "logits/rejected": -2.0156359672546387,
526
+ "logps/chosen": -33.49595260620117,
527
+ "logps/rejected": -32.49913787841797,
528
+ "loss": 0.6922,
529
  "rewards/accuracies": 0.512499988079071,
530
+ "rewards/chosen": -0.003048144979402423,
531
+ "rewards/margins": 0.002819380722939968,
532
+ "rewards/rejected": -0.00586752500385046,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
+ "logits/chosen": -2.103210926055908,
539
+ "logits/rejected": -2.0874040126800537,
540
+ "logps/chosen": -34.16447830200195,
541
+ "logps/rejected": -33.07390594482422,
542
+ "loss": 0.6981,
543
+ "rewards/accuracies": 0.44999998807907104,
544
+ "rewards/chosen": 0.004473570734262466,
545
+ "rewards/margins": -0.008766504004597664,
546
+ "rewards/rejected": 0.01324007473886013,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
+ "logits/chosen": -1.9749380350112915,
553
+ "logits/rejected": -1.9740018844604492,
554
+ "logps/chosen": -33.25703048706055,
555
+ "logps/rejected": -32.47133255004883,
556
+ "loss": 0.689,
557
  "rewards/accuracies": 0.5,
558
+ "rewards/chosen": 0.00751916179433465,
559
+ "rewards/margins": 0.009374773129820824,
560
+ "rewards/rejected": -0.0018556114519014955,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
+ "logits/chosen": -1.9304084777832031,
567
+ "logits/rejected": -1.9407765865325928,
568
+ "logps/chosen": -32.21118927001953,
569
+ "logps/rejected": -35.31554412841797,
570
+ "loss": 0.6905,
571
+ "rewards/accuracies": 0.550000011920929,
572
+ "rewards/chosen": -0.0019482700154185295,
573
+ "rewards/margins": 0.0060885134153068066,
574
+ "rewards/rejected": -0.008036783896386623,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
+ "logits/chosen": -2.06982421875,
581
+ "logits/rejected": -2.0632567405700684,
582
+ "logps/chosen": -33.641204833984375,
583
+ "logps/rejected": -29.2253475189209,
584
+ "loss": 0.6921,
585
+ "rewards/accuracies": 0.5625,
586
+ "rewards/chosen": 0.003229249268770218,
587
+ "rewards/margins": 0.0029065334238111973,
588
+ "rewards/rejected": 0.000322715932270512,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
+ "logits/chosen": -1.929220199584961,
595
+ "logits/rejected": -1.9313905239105225,
596
+ "logps/chosen": -34.26762008666992,
597
+ "logps/rejected": -30.9031982421875,
598
+ "loss": 0.6934,
599
+ "rewards/accuracies": 0.5249999761581421,
600
+ "rewards/chosen": -0.00857237633317709,
601
+ "rewards/margins": 0.0005976069951429963,
602
+ "rewards/rejected": -0.009169982746243477,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.691356255791404,
610
+ "train_runtime": 3254.7157,
611
  "train_samples_per_second": 0.946,
612
  "train_steps_per_second": 0.118
613
  }