hugodk-sch commited on
Commit
8220ba4
1 Parent(s): 8d6d53d

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +3 -16
  4. train_results.json +3 -3
  5. trainer_state.json +373 -373
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.9658
24
- - Rewards/chosen: -0.0301
25
- - Rewards/rejected: -0.0652
26
- - Rewards/accuracies: 0.5278
27
- - Rewards/margins: 0.0352
28
- - Logps/rejected: -37.6098
29
- - Logps/chosen: -34.0775
30
- - Logits/rejected: -2.2182
31
- - Logits/chosen: -2.2231
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.8961 | 0.26 | 100 | 0.9767 | 0.0065 | -0.0170 | 0.5365 | 0.0235 | -37.5409 | -34.0252 | -2.2266 | -2.2315 |
67
- | 0.7699 | 0.52 | 200 | 0.9742 | -0.0141 | -0.0400 | 0.5303 | 0.0259 | -37.5737 | -34.0547 | -2.2234 | -2.2282 |
68
- | 0.6723 | 0.78 | 300 | 0.9761 | -0.0366 | -0.0616 | 0.5299 | 0.0250 | -37.6047 | -34.0868 | -2.2186 | -2.2234 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.6957
21
+ - Rewards/chosen: -0.0451
22
+ - Rewards/rejected: -0.0645
23
+ - Rewards/accuracies: 0.5399
24
+ - Rewards/margins: 0.0194
25
+ - Logps/rejected: -37.5973
26
+ - Logps/chosen: -34.0909
27
+ - Logits/rejected: -2.2246
28
+ - Logits/chosen: -2.2294
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.6538 | 0.26 | 100 | 0.6955 | 0.0035 | -0.0085 | 0.5108 | 0.0120 | -37.5273 | -34.0302 | -2.2290 | -2.2339 |
64
+ | 0.6015 | 0.52 | 200 | 0.6956 | -0.0209 | -0.0393 | 0.5249 | 0.0183 | -37.5657 | -34.0607 | -2.2274 | -2.2322 |
65
+ | 0.5385 | 0.78 | 300 | 0.6957 | -0.0451 | -0.0645 | 0.5399 | 0.0194 | -37.5973 | -34.0909 | -2.2246 | -2.2294 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b67eba741e2020a4180a0fd9b6b098a84c42623f479ebff7d23f8e34d03ceef8
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9f75024fb68f4db40ab121a28260c03b972557b8a61cd854612af6257348d54
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.2230520248413086,
4
- "eval_logits/rejected": -2.2182328701019287,
5
- "eval_logps/chosen": -34.07753372192383,
6
- "eval_logps/rejected": -37.60981750488281,
7
- "eval_loss": 0.9658033847808838,
8
- "eval_rewards/accuracies": 0.5278239250183105,
9
- "eval_rewards/chosen": -0.03008819743990898,
10
- "eval_rewards/margins": 0.03515118733048439,
11
- "eval_rewards/rejected": -0.06523937731981277,
12
- "eval_runtime": 145.6671,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.355,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.8438688600218142,
17
- "train_runtime": 3250.9917,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 0.947,
20
  "train_steps_per_second": 0.118
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6230236078237559,
4
+ "train_runtime": 3254.2307,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.8438688600218142,
4
- "train_runtime": 3250.9917,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 0.947,
7
  "train_steps_per_second": 0.118
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6230236078237559,
4
+ "train_runtime": 3254.2307,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 1.0,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,590 +25,590 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
- "logits/chosen": -1.8664777278900146,
29
- "logits/rejected": -1.8707849979400635,
30
- "logps/chosen": -36.99364471435547,
31
- "logps/rejected": -33.650604248046875,
32
- "loss": 0.9766,
33
- "rewards/accuracies": 0.5277777910232544,
34
- "rewards/chosen": 0.00906434003263712,
35
- "rewards/margins": 0.023435616865754128,
36
- "rewards/rejected": -0.014371277764439583,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
- "logits/chosen": -1.9982150793075562,
43
- "logits/rejected": -2.0008621215820312,
44
- "logps/chosen": -29.64394760131836,
45
- "logps/rejected": -29.04986000061035,
46
- "loss": 1.0107,
47
- "rewards/accuracies": 0.4375,
48
- "rewards/chosen": -0.0012267641723155975,
49
- "rewards/margins": -0.010734880343079567,
50
- "rewards/rejected": 0.00950811617076397,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
- "logits/chosen": -1.920768141746521,
57
- "logits/rejected": -1.9180870056152344,
58
- "logps/chosen": -31.416461944580078,
59
- "logps/rejected": -33.2098274230957,
60
- "loss": 1.0063,
61
  "rewards/accuracies": 0.512499988079071,
62
- "rewards/chosen": -0.000256747764069587,
63
- "rewards/margins": -0.0063001858070492744,
64
- "rewards/rejected": 0.00604343693703413,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
- "logits/chosen": -2.0177602767944336,
71
- "logits/rejected": -2.009014129638672,
72
- "logps/chosen": -32.56236267089844,
73
- "logps/rejected": -32.517822265625,
74
- "loss": 0.9863,
75
- "rewards/accuracies": 0.550000011920929,
76
- "rewards/chosen": 0.010110180824995041,
77
- "rewards/margins": 0.013717299327254295,
78
- "rewards/rejected": -0.003607118036597967,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
- "logits/chosen": -1.862694501876831,
85
- "logits/rejected": -1.8519262075424194,
86
- "logps/chosen": -33.541160583496094,
87
- "logps/rejected": -35.44048309326172,
88
- "loss": 0.9969,
89
- "rewards/accuracies": 0.5375000238418579,
90
- "rewards/chosen": 0.011423684656620026,
91
- "rewards/margins": 0.003121361369267106,
92
- "rewards/rejected": 0.00830232072621584,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
- "logits/chosen": -1.9417282342910767,
99
- "logits/rejected": -1.9436867237091064,
100
- "logps/chosen": -32.52958679199219,
101
- "logps/rejected": -33.216880798339844,
102
- "loss": 0.9296,
103
- "rewards/accuracies": 0.5874999761581421,
104
- "rewards/chosen": 0.04941769689321518,
105
- "rewards/margins": 0.08126799017190933,
106
- "rewards/rejected": -0.031850285828113556,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
- "logits/chosen": -2.0729386806488037,
113
- "logits/rejected": -2.0779125690460205,
114
- "logps/chosen": -33.99254608154297,
115
- "logps/rejected": -36.62586212158203,
116
- "loss": 0.9629,
117
- "rewards/accuracies": 0.550000011920929,
118
- "rewards/chosen": -0.0028400986921042204,
119
- "rewards/margins": 0.037055134773254395,
120
- "rewards/rejected": -0.03989524394273758,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
- "logits/chosen": -1.9335737228393555,
127
- "logits/rejected": -1.9366981983184814,
128
- "logps/chosen": -34.332157135009766,
129
- "logps/rejected": -34.641021728515625,
130
- "loss": 0.9026,
131
- "rewards/accuracies": 0.5625,
132
- "rewards/chosen": 0.06695752590894699,
133
- "rewards/margins": 0.10098665952682495,
134
- "rewards/rejected": -0.03402913734316826,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
- "logits/chosen": -1.9401309490203857,
141
- "logits/rejected": -1.9446433782577515,
142
- "logps/chosen": -32.37213897705078,
143
- "logps/rejected": -32.343849182128906,
144
- "loss": 0.9438,
145
- "rewards/accuracies": 0.637499988079071,
146
- "rewards/chosen": 0.06670050323009491,
147
- "rewards/margins": 0.05615914613008499,
148
- "rewards/rejected": 0.010541360825300217,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
- "logits/chosen": -2.037087917327881,
155
- "logits/rejected": -2.035101890563965,
156
- "logps/chosen": -32.13945388793945,
157
- "logps/rejected": -31.313283920288086,
158
- "loss": 0.8961,
159
- "rewards/accuracies": 0.6625000238418579,
160
- "rewards/chosen": 0.07096613943576813,
161
- "rewards/margins": 0.10391455888748169,
162
- "rewards/rejected": -0.032948415726423264,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.231482982635498,
168
- "eval_logits/rejected": -2.226637363433838,
169
- "eval_logps/chosen": -34.02524185180664,
170
- "eval_logps/rejected": -37.54085159301758,
171
- "eval_loss": 0.9766585230827332,
172
- "eval_rewards/accuracies": 0.5365448594093323,
173
- "eval_rewards/chosen": 0.006516099441796541,
174
- "eval_rewards/margins": 0.02347717247903347,
175
- "eval_rewards/rejected": -0.016961071640253067,
176
- "eval_runtime": 145.8279,
177
- "eval_samples_per_second": 2.352,
178
- "eval_steps_per_second": 0.295,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
- "logits/chosen": -1.9920060634613037,
185
- "logits/rejected": -1.9896419048309326,
186
- "logps/chosen": -33.146766662597656,
187
- "logps/rejected": -34.02008819580078,
188
- "loss": 0.9486,
189
- "rewards/accuracies": 0.5874999761581421,
190
- "rewards/chosen": 0.0679345652461052,
191
- "rewards/margins": 0.061437882483005524,
192
- "rewards/rejected": 0.006496679037809372,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
- "logits/chosen": -2.003952741622925,
199
- "logits/rejected": -1.9956319332122803,
200
- "logps/chosen": -32.33639144897461,
201
- "logps/rejected": -32.133079528808594,
202
- "loss": 0.9488,
203
- "rewards/accuracies": 0.612500011920929,
204
- "rewards/chosen": 0.07595954835414886,
205
- "rewards/margins": 0.05606143921613693,
206
- "rewards/rejected": 0.019898109138011932,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
- "logits/chosen": -2.0316150188446045,
213
- "logits/rejected": -2.0236544609069824,
214
- "logps/chosen": -30.298206329345703,
215
- "logps/rejected": -32.07080841064453,
216
- "loss": 0.8911,
217
- "rewards/accuracies": 0.574999988079071,
218
- "rewards/chosen": 0.12425784766674042,
219
- "rewards/margins": 0.1367165446281433,
220
- "rewards/rejected": -0.0124586820602417,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
- "logits/chosen": -1.962376356124878,
227
- "logits/rejected": -1.9726108312606812,
228
- "logps/chosen": -31.235275268554688,
229
- "logps/rejected": -32.56925964355469,
230
- "loss": 0.8524,
231
- "rewards/accuracies": 0.637499988079071,
232
- "rewards/chosen": 0.1290682703256607,
233
- "rewards/margins": 0.15313370525836945,
234
- "rewards/rejected": -0.024065453559160233,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
- "logits/chosen": -1.8732143640518188,
241
- "logits/rejected": -1.874371886253357,
242
- "logps/chosen": -33.8985481262207,
243
- "logps/rejected": -34.81908416748047,
244
- "loss": 0.786,
245
  "rewards/accuracies": 0.637499988079071,
246
- "rewards/chosen": 0.20590214431285858,
247
- "rewards/margins": 0.24918103218078613,
248
- "rewards/rejected": -0.04327889531850815,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
- "logits/chosen": -1.924538016319275,
255
- "logits/rejected": -1.921121597290039,
256
- "logps/chosen": -36.01353454589844,
257
- "logps/rejected": -32.723262786865234,
258
- "loss": 0.8894,
259
- "rewards/accuracies": 0.574999988079071,
260
- "rewards/chosen": 0.11069967597723007,
261
- "rewards/margins": 0.11105670034885406,
262
- "rewards/rejected": -0.0003570284752640873,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
- "logits/chosen": -2.0247209072113037,
269
- "logits/rejected": -2.017392635345459,
270
- "logps/chosen": -33.49879455566406,
271
- "logps/rejected": -31.44363784790039,
272
- "loss": 0.7434,
273
  "rewards/accuracies": 0.7124999761581421,
274
- "rewards/chosen": 0.2093469649553299,
275
- "rewards/margins": 0.2735980153083801,
276
- "rewards/rejected": -0.06425107270479202,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
- "logits/chosen": -2.031324863433838,
283
- "logits/rejected": -2.0365915298461914,
284
- "logps/chosen": -32.253074645996094,
285
- "logps/rejected": -32.45112609863281,
286
- "loss": 0.8312,
287
- "rewards/accuracies": 0.6499999761581421,
288
- "rewards/chosen": 0.20420043170452118,
289
- "rewards/margins": 0.18039169907569885,
290
- "rewards/rejected": 0.023808732628822327,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
- "logits/chosen": -2.0317559242248535,
297
- "logits/rejected": -2.0289719104766846,
298
- "logps/chosen": -31.279537200927734,
299
- "logps/rejected": -31.34115219116211,
300
- "loss": 0.8405,
301
- "rewards/accuracies": 0.612500011920929,
302
- "rewards/chosen": 0.14659160375595093,
303
- "rewards/margins": 0.16264860332012177,
304
- "rewards/rejected": -0.016057008877396584,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
- "logits/chosen": -1.9018064737319946,
311
- "logits/rejected": -1.9064457416534424,
312
- "logps/chosen": -31.301830291748047,
313
- "logps/rejected": -32.8339729309082,
314
- "loss": 0.7699,
315
- "rewards/accuracies": 0.737500011920929,
316
- "rewards/chosen": 0.21311891078948975,
317
- "rewards/margins": 0.2432461678981781,
318
- "rewards/rejected": -0.03012726828455925,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.2282192707061768,
324
- "eval_logits/rejected": -2.223379611968994,
325
- "eval_logps/chosen": -34.05467224121094,
326
- "eval_logps/rejected": -37.57374572753906,
327
- "eval_loss": 0.974229633808136,
328
- "eval_rewards/accuracies": 0.530315637588501,
329
- "eval_rewards/chosen": -0.01408342458307743,
330
- "eval_rewards/margins": 0.02590302750468254,
331
- "eval_rewards/rejected": -0.03998645395040512,
332
- "eval_runtime": 145.7826,
333
- "eval_samples_per_second": 2.353,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
- "logits/chosen": -2.014596462249756,
341
- "logits/rejected": -2.0252418518066406,
342
- "logps/chosen": -31.797557830810547,
343
- "logps/rejected": -33.982398986816406,
344
- "loss": 0.8139,
345
- "rewards/accuracies": 0.625,
346
- "rewards/chosen": 0.12162177264690399,
347
- "rewards/margins": 0.20541362464427948,
348
- "rewards/rejected": -0.08379185199737549,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
- "logits/chosen": -1.9068737030029297,
355
- "logits/rejected": -1.9216482639312744,
356
- "logps/chosen": -29.838830947875977,
357
- "logps/rejected": -31.62994956970215,
358
- "loss": 0.766,
359
- "rewards/accuracies": 0.699999988079071,
360
- "rewards/chosen": 0.1908968687057495,
361
- "rewards/margins": 0.24113738536834717,
362
- "rewards/rejected": -0.05024053901433945,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
- "logits/chosen": -1.9636850357055664,
369
- "logits/rejected": -1.967655897140503,
370
- "logps/chosen": -33.12433624267578,
371
- "logps/rejected": -31.637094497680664,
372
- "loss": 0.7833,
373
- "rewards/accuracies": 0.6499999761581421,
374
- "rewards/chosen": 0.20519034564495087,
375
- "rewards/margins": 0.2666449546813965,
376
- "rewards/rejected": -0.06145460531115532,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
- "logits/chosen": -1.9611847400665283,
383
- "logits/rejected": -1.9393657445907593,
384
- "logps/chosen": -33.867958068847656,
385
- "logps/rejected": -35.12390899658203,
386
- "loss": 0.737,
387
- "rewards/accuracies": 0.675000011920929,
388
- "rewards/chosen": 0.2000524252653122,
389
- "rewards/margins": 0.32123422622680664,
390
- "rewards/rejected": -0.12118180096149445,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
- "logits/chosen": -2.001960277557373,
397
- "logits/rejected": -1.9986454248428345,
398
- "logps/chosen": -32.73499298095703,
399
- "logps/rejected": -36.28093719482422,
400
- "loss": 0.8434,
401
- "rewards/accuracies": 0.625,
402
- "rewards/chosen": 0.13304933905601501,
403
- "rewards/margins": 0.17225751280784607,
404
- "rewards/rejected": -0.03920816630125046,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
- "logits/chosen": -1.8687576055526733,
411
- "logits/rejected": -1.866320013999939,
412
- "logps/chosen": -33.981781005859375,
413
- "logps/rejected": -35.54584503173828,
414
- "loss": 0.8296,
415
- "rewards/accuracies": 0.6499999761581421,
416
- "rewards/chosen": 0.14463281631469727,
417
- "rewards/margins": 0.1734902262687683,
418
- "rewards/rejected": -0.028857415542006493,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
- "logits/chosen": -1.853939414024353,
425
- "logits/rejected": -1.8515437841415405,
426
- "logps/chosen": -34.234046936035156,
427
- "logps/rejected": -31.837631225585938,
428
- "loss": 0.8487,
429
- "rewards/accuracies": 0.612500011920929,
430
- "rewards/chosen": 0.11448518931865692,
431
- "rewards/margins": 0.16089434921741486,
432
- "rewards/rejected": -0.04640916362404823,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
- "logits/chosen": -1.9570705890655518,
439
- "logits/rejected": -1.9465986490249634,
440
- "logps/chosen": -35.030006408691406,
441
- "logps/rejected": -31.88030433654785,
442
- "loss": 0.7553,
443
- "rewards/accuracies": 0.7250000238418579,
444
- "rewards/chosen": 0.22864773869514465,
445
- "rewards/margins": 0.2561652660369873,
446
- "rewards/rejected": -0.02751758135855198,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
- "logits/chosen": -2.0522685050964355,
453
- "logits/rejected": -2.0373730659484863,
454
- "logps/chosen": -30.7352352142334,
455
- "logps/rejected": -32.61699676513672,
456
- "loss": 0.9072,
457
  "rewards/accuracies": 0.6000000238418579,
458
- "rewards/chosen": 0.13222160935401917,
459
- "rewards/margins": 0.10783363878726959,
460
- "rewards/rejected": 0.024387964978814125,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
- "logits/chosen": -1.9228973388671875,
467
- "logits/rejected": -1.9203764200210571,
468
- "logps/chosen": -32.44710159301758,
469
- "logps/rejected": -30.934436798095703,
470
- "loss": 0.6723,
471
  "rewards/accuracies": 0.762499988079071,
472
- "rewards/chosen": 0.3360074460506439,
473
- "rewards/margins": 0.418344646692276,
474
- "rewards/rejected": -0.08233721554279327,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.2234153747558594,
480
- "eval_logits/rejected": -2.218602180480957,
481
- "eval_logps/chosen": -34.08680725097656,
482
- "eval_logps/rejected": -37.60466003417969,
483
- "eval_loss": 0.976102888584137,
484
- "eval_rewards/accuracies": 0.529900312423706,
485
- "eval_rewards/chosen": -0.036580219864845276,
486
- "eval_rewards/margins": 0.02504708059132099,
487
- "eval_rewards/rejected": -0.06162729859352112,
488
- "eval_runtime": 145.7665,
489
- "eval_samples_per_second": 2.353,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
- "logits/chosen": -1.9082481861114502,
497
- "logits/rejected": -1.9050118923187256,
498
- "logps/chosen": -31.349285125732422,
499
- "logps/rejected": -33.84658432006836,
500
- "loss": 0.7796,
501
- "rewards/accuracies": 0.7124999761581421,
502
- "rewards/chosen": 0.17784307897090912,
503
- "rewards/margins": 0.2509470283985138,
504
- "rewards/rejected": -0.07310393452644348,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
- "logits/chosen": -1.9580894708633423,
511
- "logits/rejected": -1.9458973407745361,
512
- "logps/chosen": -34.3031005859375,
513
- "logps/rejected": -33.67659378051758,
514
- "loss": 0.7302,
515
- "rewards/accuracies": 0.7250000238418579,
516
- "rewards/chosen": 0.2050826996564865,
517
- "rewards/margins": 0.3015114367008209,
518
- "rewards/rejected": -0.09642868489027023,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
- "logits/chosen": -1.9932842254638672,
525
- "logits/rejected": -1.9918495416641235,
526
- "logps/chosen": -33.17847442626953,
527
- "logps/rejected": -32.54157638549805,
528
- "loss": 0.7677,
529
- "rewards/accuracies": 0.7250000238418579,
530
- "rewards/chosen": 0.21512338519096375,
531
- "rewards/margins": 0.2585209906101227,
532
- "rewards/rejected": -0.04339758679270744,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
- "logits/chosen": -2.0800719261169434,
539
- "logits/rejected": -2.064396381378174,
540
- "logps/chosen": -33.80484390258789,
541
- "logps/rejected": -33.1123046875,
542
- "loss": 0.7636,
543
- "rewards/accuracies": 0.6625000238418579,
544
- "rewards/chosen": 0.2621825039386749,
545
- "rewards/margins": 0.25817227363586426,
546
- "rewards/rejected": 0.004010227043181658,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
- "logits/chosen": -1.9522559642791748,
553
- "logits/rejected": -1.951424241065979,
554
- "logps/chosen": -32.8499755859375,
555
- "logps/rejected": -32.56407165527344,
556
- "loss": 0.6881,
557
  "rewards/accuracies": 0.7124999761581421,
558
- "rewards/chosen": 0.3024839758872986,
559
- "rewards/margins": 0.3717316687107086,
560
- "rewards/rejected": -0.06924761831760406,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
- "logits/chosen": -1.9075695276260376,
567
- "logits/rejected": -1.917851209640503,
568
- "logps/chosen": -31.882221221923828,
569
- "logps/rejected": -35.31555938720703,
570
- "loss": 0.7689,
571
- "rewards/accuracies": 0.699999988079071,
572
- "rewards/chosen": 0.22572879493236542,
573
- "rewards/margins": 0.24449090659618378,
574
- "rewards/rejected": -0.018762132152915,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
- "logits/chosen": -2.0478641986846924,
581
- "logits/rejected": -2.041414737701416,
582
- "logps/chosen": -33.331912994384766,
583
- "logps/rejected": -29.259756088256836,
584
- "loss": 0.7658,
585
  "rewards/accuracies": 0.75,
586
- "rewards/chosen": 0.2240387201309204,
587
- "rewards/margins": 0.2473684549331665,
588
- "rewards/rejected": -0.02332974039018154,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
- "logits/chosen": -1.9081246852874756,
595
- "logits/rejected": -1.9103105068206787,
596
- "logps/chosen": -33.882568359375,
597
- "logps/rejected": -30.96805191040039,
598
- "loss": 0.741,
599
- "rewards/accuracies": 0.7250000238418579,
600
- "rewards/chosen": 0.24953576922416687,
601
- "rewards/margins": 0.3163323998451233,
602
- "rewards/rejected": -0.06679664552211761,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.8438688600218142,
610
- "train_runtime": 3250.9917,
611
- "train_samples_per_second": 0.947,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 0.6931,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.282051282051282e-06,
28
+ "logits/chosen": -1.8663169145584106,
29
+ "logits/rejected": -1.870638370513916,
30
+ "logps/chosen": -36.98221206665039,
31
+ "logps/rejected": -33.6473503112793,
32
+ "loss": 0.6788,
33
+ "rewards/accuracies": 0.5416666865348816,
34
+ "rewards/chosen": 0.01950961910188198,
35
+ "rewards/margins": 0.03332838416099548,
36
+ "rewards/rejected": -0.013818766921758652,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.564102564102564e-06,
42
+ "logits/chosen": -1.9978923797607422,
43
+ "logits/rejected": -2.0005345344543457,
44
+ "logps/chosen": -29.642324447631836,
45
+ "logps/rejected": -29.048343658447266,
46
+ "loss": 0.7013,
47
+ "rewards/accuracies": 0.4749999940395355,
48
+ "rewards/chosen": -0.00010492801811778918,
49
+ "rewards/margins": -0.012185259722173214,
50
+ "rewards/rejected": 0.012080332264304161,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.846153846153847e-06,
56
+ "logits/chosen": -1.9209339618682861,
57
+ "logits/rejected": -1.9182507991790771,
58
+ "logps/chosen": -31.401519775390625,
59
+ "logps/rejected": -33.22309875488281,
60
+ "loss": 0.6891,
61
  "rewards/accuracies": 0.512499988079071,
62
+ "rewards/chosen": 0.011660982854664326,
63
+ "rewards/margins": 0.015369392931461334,
64
+ "rewards/rejected": -0.003708411008119583,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438434e-06,
70
+ "logits/chosen": -2.0180399417877197,
71
+ "logits/rejected": -2.009289264678955,
72
+ "logps/chosen": -32.559410095214844,
73
+ "logps/rejected": -32.52582550048828,
74
+ "loss": 0.6849,
75
+ "rewards/accuracies": 0.574999988079071,
76
+ "rewards/chosen": 0.013918718323111534,
77
+ "rewards/margins": 0.02444135770201683,
78
+ "rewards/rejected": -0.010522643104195595,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542187e-06,
84
+ "logits/chosen": -1.8629716634750366,
85
+ "logits/rejected": -1.8522107601165771,
86
+ "logps/chosen": -33.554229736328125,
87
+ "logps/rejected": -35.44757080078125,
88
+ "loss": 0.6987,
89
+ "rewards/accuracies": 0.4749999940395355,
90
+ "rewards/chosen": 0.002600290346890688,
91
+ "rewards/margins": -0.0012185067171230912,
92
+ "rewards/rejected": 0.003818795783445239,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941119e-06,
98
+ "logits/chosen": -1.9408414363861084,
99
+ "logits/rejected": -1.9427950382232666,
100
+ "logps/chosen": -32.56097412109375,
101
+ "logps/rejected": -33.213417053222656,
102
+ "loss": 0.6725,
103
+ "rewards/accuracies": 0.612500011920929,
104
+ "rewards/chosen": 0.03136637061834335,
105
+ "rewards/margins": 0.06499636918306351,
106
+ "rewards/rejected": -0.03362999111413956,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413549e-06,
112
+ "logits/chosen": -2.072221517562866,
113
+ "logits/rejected": -2.077198028564453,
114
+ "logps/chosen": -33.974578857421875,
115
+ "logps/rejected": -36.629173278808594,
116
+ "loss": 0.6793,
117
+ "rewards/accuracies": 0.5249999761581421,
118
+ "rewards/chosen": 0.011124782264232635,
119
+ "rewards/margins": 0.059367585927248,
120
+ "rewards/rejected": -0.04824279993772507,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-06,
126
+ "logits/chosen": -1.9338233470916748,
127
+ "logits/rejected": -1.9369605779647827,
128
+ "logps/chosen": -34.30416488647461,
129
+ "logps/rejected": -34.634437561035156,
130
+ "loss": 0.6423,
131
+ "rewards/accuracies": 0.612500011920929,
132
+ "rewards/chosen": 0.09891629219055176,
133
+ "rewards/margins": 0.132537841796875,
134
+ "rewards/rejected": -0.03362155705690384,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.7367166013034295e-06,
140
+ "logits/chosen": -1.9418385028839111,
141
+ "logits/rejected": -1.9463545083999634,
142
+ "logps/chosen": -32.39947509765625,
143
+ "logps/rejected": -32.35419464111328,
144
+ "loss": 0.6785,
145
+ "rewards/accuracies": 0.6000000238418579,
146
+ "rewards/chosen": 0.05435952544212341,
147
+ "rewards/margins": 0.05058818310499191,
148
+ "rewards/rejected": 0.0037713423371315002,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.626245458345211e-06,
154
+ "logits/chosen": -2.039783000946045,
155
+ "logits/rejected": -2.037789821624756,
156
+ "logps/chosen": -32.164188385009766,
157
+ "logps/rejected": -31.309520721435547,
158
+ "loss": 0.6538,
159
+ "rewards/accuracies": 0.625,
160
+ "rewards/chosen": 0.061319977045059204,
161
+ "rewards/margins": 0.09596750140190125,
162
+ "rewards/rejected": -0.03464752808213234,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.2338831424713135,
168
+ "eval_logits/rejected": -2.229041814804077,
169
+ "eval_logps/chosen": -34.03020095825195,
170
+ "eval_logps/rejected": -37.52727127075195,
171
+ "eval_loss": 0.6954607963562012,
172
+ "eval_rewards/accuracies": 0.510797381401062,
173
+ "eval_rewards/chosen": 0.0034842013847082853,
174
+ "eval_rewards/margins": 0.012004716321825981,
175
+ "eval_rewards/rejected": -0.008520514704287052,
176
+ "eval_runtime": 146.115,
177
+ "eval_samples_per_second": 2.347,
178
+ "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.498257201263691e-06,
184
+ "logits/chosen": -1.9946181774139404,
185
+ "logits/rejected": -1.992236852645874,
186
+ "logps/chosen": -33.117286682128906,
187
+ "logps/rejected": -34.00868225097656,
188
+ "loss": 0.6852,
189
+ "rewards/accuracies": 0.612500011920929,
190
+ "rewards/chosen": 0.10122231394052505,
191
+ "rewards/margins": 0.0846698135137558,
192
+ "rewards/rejected": 0.016552483662962914,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777678e-06,
198
+ "logits/chosen": -2.006875514984131,
199
+ "logits/rejected": -1.9985148906707764,
200
+ "logps/chosen": -32.336421966552734,
201
+ "logps/rejected": -32.137081146240234,
202
+ "loss": 0.675,
203
+ "rewards/accuracies": 0.5375000238418579,
204
+ "rewards/chosen": 0.08678452670574188,
205
+ "rewards/margins": 0.06724556535482407,
206
+ "rewards/rejected": 0.019538963213562965,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.1940827077152755e-06,
212
+ "logits/chosen": -2.0351502895355225,
213
+ "logits/rejected": -2.02717661857605,
214
+ "logps/chosen": -30.30923843383789,
215
+ "logps/rejected": -32.08501434326172,
216
+ "loss": 0.6398,
217
+ "rewards/accuracies": 0.6625000238418579,
218
+ "rewards/chosen": 0.13318516314029694,
219
+ "rewards/margins": 0.15878939628601074,
220
+ "rewards/rejected": -0.025604233145713806,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.0204024186666215e-06,
226
+ "logits/chosen": -1.966170072555542,
227
+ "logits/rejected": -1.9764087200164795,
228
+ "logps/chosen": -31.215194702148438,
229
+ "logps/rejected": -32.55674743652344,
230
+ "loss": 0.627,
231
+ "rewards/accuracies": 0.6000000238418579,
232
+ "rewards/chosen": 0.16357474029064178,
233
+ "rewards/margins": 0.18106886744499207,
234
+ "rewards/rejected": -0.017494117841124535,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.834196265035119e-06,
240
+ "logits/chosen": -1.8775428533554077,
241
+ "logits/rejected": -1.8786998987197876,
242
+ "logps/chosen": -33.92055130004883,
243
+ "logps/rejected": -34.77721405029297,
244
+ "loss": 0.6168,
245
  "rewards/accuracies": 0.637499988079071,
246
+ "rewards/chosen": 0.21771302819252014,
247
+ "rewards/margins": 0.23367898166179657,
248
+ "rewards/rejected": -0.015965968370437622,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800573e-06,
254
+ "logits/chosen": -1.9291296005249023,
255
+ "logits/rejected": -1.9257177114486694,
256
+ "logps/chosen": -36.01557922363281,
257
+ "logps/rejected": -32.72490692138672,
258
+ "loss": 0.6444,
259
+ "rewards/accuracies": 0.6625000238418579,
260
+ "rewards/chosen": 0.12488000094890594,
261
+ "rewards/margins": 0.12660440802574158,
262
+ "rewards/rejected": -0.0017244067275896668,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.4304331721118078e-06,
268
+ "logits/chosen": -2.0295331478118896,
269
+ "logits/rejected": -2.0221762657165527,
270
+ "logps/chosen": -33.48248291015625,
271
+ "logps/rejected": -31.408077239990234,
272
+ "loss": 0.5816,
273
  "rewards/accuracies": 0.7124999761581421,
274
+ "rewards/chosen": 0.25230517983436584,
275
+ "rewards/margins": 0.2972865700721741,
276
+ "rewards/rejected": -0.04498137906193733,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.2162026428305436e-06,
282
+ "logits/chosen": -2.036190986633301,
283
+ "logits/rejected": -2.0414373874664307,
284
+ "logps/chosen": -32.22594451904297,
285
+ "logps/rejected": -32.46149444580078,
286
+ "loss": 0.5993,
287
+ "rewards/accuracies": 0.637499988079071,
288
+ "rewards/chosen": 0.2550733685493469,
289
+ "rewards/margins": 0.2361568957567215,
290
+ "rewards/rejected": 0.018916476517915726,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.996071664294641e-06,
296
+ "logits/chosen": -2.0371222496032715,
297
+ "logits/rejected": -2.0343565940856934,
298
+ "logps/chosen": -31.28468894958496,
299
+ "logps/rejected": -31.336734771728516,
300
+ "loss": 0.6307,
301
+ "rewards/accuracies": 0.6000000238418579,
302
+ "rewards/chosen": 0.16341081261634827,
303
+ "rewards/margins": 0.17822694778442383,
304
+ "rewards/rejected": -0.014816122129559517,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.7718537898066833e-06,
310
+ "logits/chosen": -1.9067039489746094,
311
+ "logits/rejected": -1.91135573387146,
312
+ "logps/chosen": -31.312374114990234,
313
+ "logps/rejected": -32.82074737548828,
314
+ "loss": 0.6015,
315
+ "rewards/accuracies": 0.7124999761581421,
316
+ "rewards/chosen": 0.2351258099079132,
317
+ "rewards/margins": 0.25897616147994995,
318
+ "rewards/rejected": -0.0238503310829401,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.232191324234009,
324
+ "eval_logits/rejected": -2.22735333442688,
325
+ "eval_logps/chosen": -34.060691833496094,
326
+ "eval_logps/rejected": -37.56568908691406,
327
+ "eval_loss": 0.695566713809967,
328
+ "eval_rewards/accuracies": 0.52491694688797,
329
+ "eval_rewards/chosen": -0.020911961793899536,
330
+ "eval_rewards/margins": 0.01834380254149437,
331
+ "eval_rewards/rejected": -0.039255764335393906,
332
+ "eval_runtime": 145.8849,
333
+ "eval_samples_per_second": 2.351,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402006e-06,
340
+ "logits/chosen": -2.0197176933288574,
341
+ "logits/rejected": -2.0303761959075928,
342
+ "logps/chosen": -31.751026153564453,
343
+ "logps/rejected": -33.96234893798828,
344
+ "loss": 0.5969,
345
+ "rewards/accuracies": 0.6625000238418579,
346
+ "rewards/chosen": 0.17622438073158264,
347
+ "rewards/margins": 0.25594404339790344,
348
+ "rewards/rejected": -0.0797196701169014,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.3185646976551794e-06,
354
+ "logits/chosen": -1.9122480154037476,
355
+ "logits/rejected": -1.9270412921905518,
356
+ "logps/chosen": -29.86123275756836,
357
+ "logps/rejected": -31.612594604492188,
358
+ "loss": 0.6025,
359
+ "rewards/accuracies": 0.737500011920929,
360
+ "rewards/chosen": 0.20024582743644714,
361
+ "rewards/margins": 0.24377915263175964,
362
+ "rewards/rejected": -0.04353334754705429,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.0932279108998323e-06,
368
+ "logits/chosen": -1.9689687490463257,
369
+ "logits/rejected": -1.9729585647583008,
370
+ "logps/chosen": -33.11440658569336,
371
+ "logps/rejected": -31.650421142578125,
372
+ "loss": 0.5809,
373
+ "rewards/accuracies": 0.6875,
374
+ "rewards/chosen": 0.2424498349428177,
375
+ "rewards/margins": 0.3233444094657898,
376
+ "rewards/rejected": -0.08089461922645569,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279358e-06,
382
+ "logits/chosen": -1.9670231342315674,
383
+ "logits/rejected": -1.9451711177825928,
384
+ "logps/chosen": -33.830162048339844,
385
+ "logps/rejected": -35.1173095703125,
386
+ "loss": 0.558,
387
+ "rewards/accuracies": 0.737500011920929,
388
+ "rewards/chosen": 0.2588713765144348,
389
+ "rewards/margins": 0.3920826315879822,
390
+ "rewards/rejected": -0.13321125507354736,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.6544367689701824e-06,
396
+ "logits/chosen": -2.0084152221679688,
397
+ "logits/rejected": -2.005080223083496,
398
+ "logps/chosen": -32.70518493652344,
399
+ "logps/rejected": -36.280517578125,
400
+ "loss": 0.6104,
401
+ "rewards/accuracies": 0.6875,
402
+ "rewards/chosen": 0.1759072244167328,
403
+ "rewards/margins": 0.22038432955741882,
404
+ "rewards/rejected": -0.044477105140686035,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.4445974030621963e-06,
410
+ "logits/chosen": -1.8755052089691162,
411
+ "logits/rejected": -1.8730967044830322,
412
+ "logps/chosen": -33.984092712402344,
413
+ "logps/rejected": -35.538455963134766,
414
+ "loss": 0.622,
415
+ "rewards/accuracies": 0.75,
416
+ "rewards/chosen": 0.16344432532787323,
417
+ "rewards/margins": 0.19051328301429749,
418
+ "rewards/rejected": -0.02706894651055336,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.243452991757889e-06,
424
+ "logits/chosen": -1.8608484268188477,
425
+ "logits/rejected": -1.8584181070327759,
426
+ "logps/chosen": -34.17797088623047,
427
+ "logps/rejected": -31.830347061157227,
428
+ "loss": 0.6124,
429
+ "rewards/accuracies": 0.7124999761581421,
430
+ "rewards/chosen": 0.1757020801305771,
431
+ "rewards/margins": 0.22291450202465057,
432
+ "rewards/rejected": -0.047212425619363785,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603523e-06,
438
+ "logits/chosen": -1.9641139507293701,
439
+ "logits/rejected": -1.9535942077636719,
440
+ "logps/chosen": -35.01939010620117,
441
+ "logps/rejected": -31.871440887451172,
442
+ "loss": 0.5852,
443
+ "rewards/accuracies": 0.699999988079071,
444
+ "rewards/chosen": 0.26980119943618774,
445
+ "rewards/margins": 0.29416200518608093,
446
+ "rewards/rejected": -0.024360809475183487,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071455e-07,
452
+ "logits/chosen": -2.0593204498291016,
453
+ "logits/rejected": -2.0443997383117676,
454
+ "logps/chosen": -30.722980499267578,
455
+ "logps/rejected": -32.61235809326172,
456
+ "loss": 0.6599,
457
  "rewards/accuracies": 0.6000000238418579,
458
+ "rewards/chosen": 0.16091887652873993,
459
+ "rewards/margins": 0.12933868169784546,
460
+ "rewards/rejected": 0.031580209732055664,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-07,
466
+ "logits/chosen": -1.930450201034546,
467
+ "logits/rejected": -1.9279005527496338,
468
+ "logps/chosen": -32.415870666503906,
469
+ "logps/rejected": -30.882410049438477,
470
+ "loss": 0.5385,
471
  "rewards/accuracies": 0.762499988079071,
472
+ "rewards/chosen": 0.4089924395084381,
473
+ "rewards/margins": 0.4614754319190979,
474
+ "rewards/rejected": -0.05248301103711128,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.229433536529541,
480
+ "eval_logits/rejected": -2.22458553314209,
481
+ "eval_logps/chosen": -34.090904235839844,
482
+ "eval_logps/rejected": -37.59726333618164,
483
+ "eval_loss": 0.69569993019104,
484
+ "eval_rewards/accuracies": 0.5398671627044678,
485
+ "eval_rewards/chosen": -0.04508008435368538,
486
+ "eval_rewards/margins": 0.019436603412032127,
487
+ "eval_rewards/rejected": -0.06451668590307236,
488
+ "eval_runtime": 145.8403,
489
+ "eval_samples_per_second": 2.352,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589035e-07,
496
+ "logits/chosen": -1.9148633480072021,
497
+ "logits/rejected": -1.9115928411483765,
498
+ "logps/chosen": -31.324920654296875,
499
+ "logps/rejected": -33.81542205810547,
500
+ "loss": 0.5949,
501
+ "rewards/accuracies": 0.699999988079071,
502
+ "rewards/chosen": 0.22274336218833923,
503
+ "rewards/margins": 0.28135946393013,
504
+ "rewards/rejected": -0.05861610919237137,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380913e-07,
510
+ "logits/chosen": -1.9647932052612305,
511
+ "logits/rejected": -1.9525552988052368,
512
+ "logps/chosen": -34.34864044189453,
513
+ "logps/rejected": -33.66791915893555,
514
+ "loss": 0.5831,
515
+ "rewards/accuracies": 0.612500011920929,
516
+ "rewards/chosen": 0.19794727861881256,
517
+ "rewards/margins": 0.3012133836746216,
518
+ "rewards/rejected": -0.10326610505580902,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-07,
524
+ "logits/chosen": -2.0001468658447266,
525
+ "logits/rejected": -1.9986999034881592,
526
+ "logps/chosen": -33.18779373168945,
527
+ "logps/rejected": -32.54129409790039,
528
+ "loss": 0.5895,
529
+ "rewards/accuracies": 0.6875,
530
+ "rewards/chosen": 0.23840396106243134,
531
+ "rewards/margins": 0.28777408599853516,
532
+ "rewards/rejected": -0.04937009885907173,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.0579377374915805e-07,
538
+ "logits/chosen": -2.0865254402160645,
539
+ "logits/rejected": -2.0707924365997314,
540
+ "logps/chosen": -33.81252670288086,
541
+ "logps/rejected": -33.110015869140625,
542
+ "loss": 0.5883,
543
+ "rewards/accuracies": 0.7124999761581421,
544
+ "rewards/chosen": 0.2934878468513489,
545
+ "rewards/margins": 0.28707200288772583,
546
+ "rewards/rejected": 0.006415897514671087,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.2518018074041684e-07,
552
+ "logits/chosen": -1.9590953588485718,
553
+ "logits/rejected": -1.9582574367523193,
554
+ "logps/chosen": -32.849937438964844,
555
+ "logps/rejected": -32.53525161743164,
556
+ "loss": 0.5603,
557
  "rewards/accuracies": 0.7124999761581421,
558
+ "rewards/chosen": 0.34572547674179077,
559
+ "rewards/margins": 0.4018074870109558,
560
+ "rewards/rejected": -0.056082069873809814,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-08,
566
+ "logits/chosen": -1.914807677268982,
567
+ "logits/rejected": -1.9251015186309814,
568
+ "logps/chosen": -31.8874454498291,
569
+ "logps/rejected": -35.34430694580078,
570
+ "loss": 0.588,
571
+ "rewards/accuracies": 0.6875,
572
+ "rewards/chosen": 0.2537948489189148,
573
+ "rewards/margins": 0.2982342541217804,
574
+ "rewards/rejected": -0.0444394052028656,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050325e-08,
580
+ "logits/chosen": -2.054311513900757,
581
+ "logits/rejected": -2.047823429107666,
582
+ "logps/chosen": -33.35334396362305,
583
+ "logps/rejected": -29.280254364013672,
584
+ "loss": 0.5847,
585
  "rewards/accuracies": 0.75,
586
+ "rewards/chosen": 0.23889848589897156,
587
+ "rewards/margins": 0.2819606363773346,
588
+ "rewards/rejected": -0.04306213930249214,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-09,
594
+ "logits/chosen": -1.9139082431793213,
595
+ "logits/rejected": -1.9161239862442017,
596
+ "logps/chosen": -33.855018615722656,
597
+ "logps/rejected": -30.981037139892578,
598
+ "loss": 0.5466,
599
+ "rewards/accuracies": 0.762499988079071,
600
+ "rewards/chosen": 0.3072236478328705,
601
+ "rewards/margins": 0.3939489424228668,
602
+ "rewards/rejected": -0.08672530204057693,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.6230236078237559,
610
+ "train_runtime": 3254.2307,
611
+ "train_samples_per_second": 0.946,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],