hugodk-sch commited on
Commit
0d53d98
1 Parent(s): 53109e2

Model save

Browse files
Files changed (5) hide show
  1. README.md +13 -16
  2. adapter_model.safetensors +1 -1
  3. all_results.json +3 -16
  4. train_results.json +3 -3
  5. trainer_state.json +374 -374
README.md CHANGED
@@ -1,13 +1,10 @@
1
  ---
2
  library_name: peft
3
  tags:
4
- - alignment-handbook
5
  - trl
6
  - dpo
7
  - generated_from_trainer
8
  base_model: NbAiLab/nb-gpt-j-6B-v2
9
- datasets:
10
- - hugodk-sch/aftonposten_title_prefs
11
  model-index:
12
  - name: aftonposten-6b-align-scan
13
  results: []
@@ -18,17 +15,17 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # aftonposten-6b-align-scan
20
 
21
- This model is a fine-tuned version of [data/ap-gpt-j-6b-sft-qlora-04-08](https://huggingface.co/data/ap-gpt-j-6b-sft-qlora-04-08) on the hugodk-sch/aftonposten_title_prefs dataset.
22
  It achieves the following results on the evaluation set:
23
- - Loss: 0.7359
24
- - Rewards/chosen: 0.0075
25
- - Rewards/rejected: 0.0073
26
- - Rewards/accuracies: 0.5311
27
- - Rewards/margins: 0.0003
28
- - Logps/rejected: -37.5045
29
- - Logps/chosen: -34.0220
30
- - Logits/rejected: -2.2382
31
- - Logits/chosen: -2.2431
32
 
33
  ## Model description
34
 
@@ -63,9 +60,9 @@ The following hyperparameters were used during training:
63
 
64
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
65
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
66
- | 0.7731 | 0.26 | 100 | 0.7157 | 0.0126 | 0.0053 | 0.5216 | 0.0073 | -37.5079 | -34.0136 | -2.2387 | -2.2436 |
67
- | 0.7072 | 0.52 | 200 | 0.7401 | 0.0040 | 0.0081 | 0.4963 | -0.0041 | -37.5031 | -34.0279 | -2.2387 | -2.2436 |
68
- | 0.6516 | 0.78 | 300 | 0.7443 | 0.0055 | 0.0094 | 0.4958 | -0.0039 | -37.5009 | -34.0254 | -2.2391 | -2.2439 |
69
 
70
 
71
  ### Framework versions
 
1
  ---
2
  library_name: peft
3
  tags:
 
4
  - trl
5
  - dpo
6
  - generated_from_trainer
7
  base_model: NbAiLab/nb-gpt-j-6B-v2
 
 
8
  model-index:
9
  - name: aftonposten-6b-align-scan
10
  results: []
 
15
 
16
  # aftonposten-6b-align-scan
17
 
18
+ This model is a fine-tuned version of [NbAiLab/nb-gpt-j-6B-v2](https://huggingface.co/NbAiLab/nb-gpt-j-6B-v2) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.5652
21
+ - Rewards/chosen: 0.0047
22
+ - Rewards/rejected: 0.0129
23
+ - Rewards/accuracies: 0.4809
24
+ - Rewards/margins: -0.0083
25
+ - Logps/rejected: -37.4981
26
+ - Logps/chosen: -34.0279
27
+ - Logits/rejected: -2.2391
28
+ - Logits/chosen: -2.2439
29
 
30
  ## Model description
31
 
 
60
 
61
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
62
  |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
63
+ | 0.5589 | 0.26 | 100 | 0.5107 | 0.0178 | 0.0027 | 0.5573 | 0.0151 | -37.5127 | -34.0091 | -2.2393 | -2.2442 |
64
+ | 0.522 | 0.52 | 200 | 0.5423 | 0.0040 | 0.0035 | 0.4689 | 0.0005 | -37.5116 | -34.0288 | -2.2385 | -2.2433 |
65
+ | 0.4616 | 0.78 | 300 | 0.5652 | 0.0047 | 0.0129 | 0.4809 | -0.0083 | -37.4981 | -34.0279 | -2.2391 | -2.2439 |
66
 
67
 
68
  ### Framework versions
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e41ff2836264cd148e707d27674ceb166bf309aad16e50617f9808693d4e8288
3
  size 176183216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e6b2a60197aedd47c7b976865f1da979dedbbc97bd5ad68d342cc0acb779f92
3
  size 176183216
all_results.json CHANGED
@@ -1,21 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_logits/chosen": -2.24311900138855,
4
- "eval_logits/rejected": -2.2382466793060303,
5
- "eval_logps/chosen": -34.021976470947266,
6
- "eval_logps/rejected": -37.50453567504883,
7
- "eval_loss": 0.7359318137168884,
8
- "eval_rewards/accuracies": 0.531146228313446,
9
- "eval_rewards/chosen": 0.0075458260253071785,
10
- "eval_rewards/margins": 0.0002943648141808808,
11
- "eval_rewards/rejected": 0.007251461502164602,
12
- "eval_runtime": 145.7677,
13
- "eval_samples": 343,
14
- "eval_samples_per_second": 2.353,
15
- "eval_steps_per_second": 0.295,
16
- "train_loss": 0.7160632399769572,
17
- "train_runtime": 3256.7737,
18
  "train_samples": 3079,
19
- "train_samples_per_second": 0.945,
20
  "train_steps_per_second": 0.118
21
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.5317366457604743,
4
+ "train_runtime": 3254.062,
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
8
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.7160632399769572,
4
- "train_runtime": 3256.7737,
5
  "train_samples": 3079,
6
- "train_samples_per_second": 0.945,
7
  "train_steps_per_second": 0.118
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.5317366457604743,
4
+ "train_runtime": 3254.062,
5
  "train_samples": 3079,
6
+ "train_samples_per_second": 0.946,
7
  "train_steps_per_second": 0.118
8
  }
trainer_state.json CHANGED
@@ -15,7 +15,7 @@
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
- "loss": 0.6944,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
@@ -25,590 +25,590 @@
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
- "logits/chosen": -1.8663125038146973,
29
- "logits/rejected": -1.8706269264221191,
30
- "logps/chosen": -37.00217819213867,
31
- "logps/rejected": -33.66637420654297,
32
- "loss": 0.662,
33
- "rewards/accuracies": 0.4861111044883728,
34
- "rewards/chosen": 0.0026538786478340626,
35
- "rewards/margins": 0.024431757628917694,
36
- "rewards/rejected": -0.021777881309390068,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
- "logits/chosen": -1.997361183166504,
43
- "logits/rejected": -2.0000081062316895,
44
- "logps/chosen": -29.635391235351562,
45
- "logps/rejected": -29.05219078063965,
46
- "loss": 0.7317,
47
  "rewards/accuracies": 0.5249999761581421,
48
- "rewards/chosen": 0.004081652499735355,
49
- "rewards/margins": -0.002670821500942111,
50
- "rewards/rejected": 0.006752474699169397,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
- "logits/chosen": -1.9197700023651123,
57
- "logits/rejected": -1.917066216468811,
58
- "logps/chosen": -31.422348022460938,
59
- "logps/rejected": -33.22368240356445,
60
- "loss": 0.7319,
61
- "rewards/accuracies": 0.512499988079071,
62
- "rewards/chosen": -0.003753386437892914,
63
- "rewards/margins": -0.0006187178078107536,
64
- "rewards/rejected": -0.0031346683390438557,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
- "logits/chosen": -2.0170438289642334,
71
- "logits/rejected": -2.0082850456237793,
72
- "logps/chosen": -32.59685516357422,
73
- "logps/rejected": -32.49561309814453,
74
- "loss": 0.7894,
75
- "rewards/accuracies": 0.4000000059604645,
76
- "rewards/chosen": -0.012029164470732212,
77
- "rewards/margins": -0.022262731567025185,
78
- "rewards/rejected": 0.010233565233647823,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
- "logits/chosen": -1.8647209405899048,
85
- "logits/rejected": -1.853936791419983,
86
- "logps/chosen": -33.57290267944336,
87
- "logps/rejected": -35.4411735534668,
88
- "loss": 0.7695,
89
- "rewards/accuracies": 0.38749998807907104,
90
- "rewards/chosen": -0.009253564290702343,
91
- "rewards/margins": -0.015956666320562363,
92
- "rewards/rejected": 0.006703103426843882,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
- "logits/chosen": -1.9456017017364502,
99
- "logits/rejected": -1.9475319385528564,
100
- "logps/chosen": -32.578392028808594,
101
- "logps/rejected": -33.17626190185547,
102
- "loss": 0.6746,
103
- "rewards/accuracies": 0.6000000238418579,
104
- "rewards/chosen": 0.013076215982437134,
105
- "rewards/margins": 0.0160076767206192,
106
- "rewards/rejected": -0.00293146213516593,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
- "logits/chosen": -2.079268217086792,
113
- "logits/rejected": -2.084235668182373,
114
- "logps/chosen": -33.979801177978516,
115
- "logps/rejected": -36.58202362060547,
116
- "loss": 0.7098,
117
- "rewards/accuracies": 0.5249999761581421,
118
- "rewards/chosen": 0.005212540738284588,
119
- "rewards/margins": 0.013104838319122791,
120
- "rewards/rejected": -0.007892299443483353,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
- "logits/chosen": -1.9421241283416748,
127
- "logits/rejected": -1.9452680349349976,
128
- "logps/chosen": -34.39490509033203,
129
- "logps/rejected": -34.577003479003906,
130
- "loss": 0.7084,
131
- "rewards/accuracies": 0.512499988079071,
132
- "rewards/chosen": 0.019745096564292908,
133
- "rewards/margins": 0.010502790100872517,
134
- "rewards/rejected": 0.009242306463420391,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
- "logits/chosen": -1.950796365737915,
141
- "logits/rejected": -1.9553086757659912,
142
- "logps/chosen": -32.45103073120117,
143
- "logps/rejected": -32.358036041259766,
144
- "loss": 0.7023,
145
- "rewards/accuracies": 0.550000011920929,
146
- "rewards/chosen": 0.009835213422775269,
147
- "rewards/margins": 0.009310262277722359,
148
- "rewards/rejected": 0.0005249513196758926,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
- "logits/chosen": -2.0491089820861816,
155
- "logits/rejected": -2.0471129417419434,
156
- "logps/chosen": -32.255008697509766,
157
- "logps/rejected": -31.249691009521484,
158
- "loss": 0.7731,
159
- "rewards/accuracies": 0.4375,
160
- "rewards/chosen": -0.008502892218530178,
161
- "rewards/margins": -0.018415190279483795,
162
- "rewards/rejected": 0.009912299923598766,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
- "eval_logits/chosen": -2.2435789108276367,
168
- "eval_logits/rejected": -2.2386937141418457,
169
- "eval_logps/chosen": -34.01361083984375,
170
- "eval_logps/rejected": -37.507869720458984,
171
- "eval_loss": 0.7156652212142944,
172
- "eval_rewards/accuracies": 0.5215947031974792,
173
- "eval_rewards/chosen": 0.012563038617372513,
174
- "eval_rewards/margins": 0.007312912028282881,
175
- "eval_rewards/rejected": 0.005250126589089632,
176
- "eval_runtime": 146.1753,
177
- "eval_samples_per_second": 2.346,
178
  "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
- "logits/chosen": -2.0054879188537598,
185
- "logits/rejected": -2.0030770301818848,
186
- "logps/chosen": -33.253334045410156,
187
- "logps/rejected": -34.042930603027344,
188
- "loss": 0.7285,
189
- "rewards/accuracies": 0.5,
190
- "rewards/chosen": -0.005710972007364035,
191
- "rewards/margins": 0.0024225921370089054,
192
- "rewards/rejected": -0.00813356600701809,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
- "logits/chosen": -2.0168392658233643,
199
- "logits/rejected": -2.008460760116577,
200
- "logps/chosen": -32.45271301269531,
201
- "logps/rejected": -32.185306549072266,
202
- "loss": 0.7003,
203
- "rewards/accuracies": 0.512499988079071,
204
- "rewards/chosen": -0.004686274565756321,
205
- "rewards/margins": 0.009594673290848732,
206
- "rewards/rejected": -0.014280945062637329,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
- "logits/chosen": -2.0460398197174072,
213
- "logits/rejected": -2.0379834175109863,
214
- "logps/chosen": -30.48455238342285,
215
- "logps/rejected": -32.02381134033203,
216
- "loss": 0.7899,
217
  "rewards/accuracies": 0.44999998807907104,
218
- "rewards/chosen": -0.005300796125084162,
219
- "rewards/margins": -0.022818461060523987,
220
- "rewards/rejected": 0.017517665401101112,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
- "logits/chosen": -1.9768435955047607,
227
- "logits/rejected": -1.9871145486831665,
228
- "logps/chosen": -31.402883529663086,
229
- "logps/rejected": -32.54676818847656,
230
- "loss": 0.6858,
231
- "rewards/accuracies": 0.5375000238418579,
232
- "rewards/chosen": 0.010066190734505653,
233
- "rewards/margins": 0.017196927219629288,
234
- "rewards/rejected": -0.007130734622478485,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
- "logits/chosen": -1.8907657861709595,
241
- "logits/rejected": -1.891840934753418,
242
- "logps/chosen": -34.175559997558594,
243
- "logps/rejected": -34.763084411621094,
244
- "loss": 0.702,
245
- "rewards/accuracies": 0.5249999761581421,
246
- "rewards/chosen": 0.010278819128870964,
247
- "rewards/margins": 0.013775828294456005,
248
- "rewards/rejected": -0.003497010562568903,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
- "logits/chosen": -1.94234299659729,
255
- "logits/rejected": -1.9388611316680908,
256
- "logps/chosen": -36.15486526489258,
257
- "logps/rejected": -32.73242950439453,
258
- "loss": 0.6916,
259
  "rewards/accuracies": 0.5,
260
- "rewards/chosen": 0.010085567831993103,
261
- "rewards/margins": 0.015891926363110542,
262
- "rewards/rejected": -0.005806358531117439,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
- "logits/chosen": -2.0420398712158203,
269
- "logits/rejected": -2.0346436500549316,
270
- "logps/chosen": -33.77216720581055,
271
- "logps/rejected": -31.360408782958984,
272
- "loss": 0.6733,
273
- "rewards/accuracies": 0.6000000238418579,
274
- "rewards/chosen": 0.015415417030453682,
275
- "rewards/margins": 0.020550500601530075,
276
- "rewards/rejected": -0.005135083571076393,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
- "logits/chosen": -2.047333002090454,
283
- "logits/rejected": -2.052605628967285,
284
- "logps/chosen": -32.51974105834961,
285
- "logps/rejected": -32.49883270263672,
286
- "loss": 0.6574,
287
- "rewards/accuracies": 0.574999988079071,
288
- "rewards/chosen": 0.015028724446892738,
289
- "rewards/margins": 0.023243196308612823,
290
- "rewards/rejected": -0.008214469067752361,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
- "logits/chosen": -2.0488243103027344,
297
- "logits/rejected": -2.046041488647461,
298
- "logps/chosen": -31.49191665649414,
299
- "logps/rejected": -31.331462860107422,
300
- "loss": 0.7128,
301
- "rewards/accuracies": 0.512499988079071,
302
- "rewards/chosen": -0.0017768737161532044,
303
- "rewards/margins": 0.006172674708068371,
304
- "rewards/rejected": -0.00794955063611269,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
- "logits/chosen": -1.9186776876449585,
311
- "logits/rejected": -1.9233558177947998,
312
- "logps/chosen": -31.598682403564453,
313
- "logps/rejected": -32.798866271972656,
314
- "loss": 0.7072,
315
- "rewards/accuracies": 0.512499988079071,
316
- "rewards/chosen": 0.004562787711620331,
317
- "rewards/margins": 0.009323189035058022,
318
- "rewards/rejected": -0.004760399926453829,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
- "eval_logits/chosen": -2.2435684204101562,
324
- "eval_logits/rejected": -2.2386868000030518,
325
- "eval_logps/chosen": -34.02791213989258,
326
- "eval_logps/rejected": -37.50313949584961,
327
- "eval_loss": 0.7401300072669983,
328
- "eval_rewards/accuracies": 0.49626249074935913,
329
- "eval_rewards/chosen": 0.003984179813414812,
330
- "eval_rewards/margins": -0.004102990496903658,
331
- "eval_rewards/rejected": 0.00808717031031847,
332
- "eval_runtime": 145.9773,
333
- "eval_samples_per_second": 2.35,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
- "logits/chosen": -2.0318801403045654,
341
- "logits/rejected": -2.042569398880005,
342
- "logps/chosen": -31.933334350585938,
343
- "logps/rejected": -33.868324279785156,
344
- "loss": 0.6562,
345
- "rewards/accuracies": 0.512499988079071,
346
- "rewards/chosen": 0.022781556472182274,
347
- "rewards/margins": 0.026157278567552567,
348
- "rewards/rejected": -0.003375719068571925,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
- "logits/chosen": -1.9248113632202148,
355
- "logits/rejected": -1.9396663904190063,
356
- "logps/chosen": -30.091327667236328,
357
- "logps/rejected": -31.557119369506836,
358
- "loss": 0.6992,
359
- "rewards/accuracies": 0.550000011920929,
360
- "rewards/chosen": 0.012127770110964775,
361
- "rewards/margins": 0.011495334096252918,
362
- "rewards/rejected": 0.000632436596788466,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
- "logits/chosen": -1.9829256534576416,
369
- "logits/rejected": -1.9868942499160767,
370
- "logps/chosen": -33.38996505737305,
371
- "logps/rejected": -31.552440643310547,
372
- "loss": 0.6919,
373
- "rewards/accuracies": 0.5625,
374
- "rewards/chosen": 0.016500946134328842,
375
- "rewards/margins": 0.01838388293981552,
376
- "rewards/rejected": -0.001882936805486679,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
- "logits/chosen": -1.9827773571014404,
383
- "logits/rejected": -1.9607956409454346,
384
- "logps/chosen": -34.12545394897461,
385
- "logps/rejected": -34.96198272705078,
386
- "loss": 0.6654,
387
- "rewards/accuracies": 0.637499988079071,
388
- "rewards/chosen": 0.01697678305208683,
389
- "rewards/margins": 0.02368919551372528,
390
- "rewards/rejected": -0.0067124143242836,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
- "logits/chosen": -2.0244033336639404,
397
- "logits/rejected": -2.021089553833008,
398
- "logps/chosen": -32.914833068847656,
399
- "logps/rejected": -36.214698791503906,
400
- "loss": 0.7396,
401
- "rewards/accuracies": 0.4749999940395355,
402
- "rewards/chosen": 0.006139112636446953,
403
- "rewards/margins": 6.052106527931755e-06,
404
- "rewards/rejected": 0.006133060436695814,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
- "logits/chosen": -1.8913761377334595,
411
- "logits/rejected": -1.8889095783233643,
412
- "logps/chosen": -34.184478759765625,
413
- "logps/rejected": -35.503787994384766,
414
- "loss": 0.7346,
415
- "rewards/accuracies": 0.5,
416
- "rewards/chosen": 0.0023515145294368267,
417
- "rewards/margins": 0.0018522702157497406,
418
- "rewards/rejected": 0.0004992469912394881,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
- "logits/chosen": -1.8754098415374756,
425
- "logits/rejected": -1.8728771209716797,
426
- "logps/chosen": -34.38546371459961,
427
- "logps/rejected": -31.757741928100586,
428
- "loss": 0.729,
429
  "rewards/accuracies": 0.5249999761581421,
430
- "rewards/chosen": 0.0072813271544873714,
431
- "rewards/margins": -0.0008724328363314271,
432
- "rewards/rejected": 0.008153757080435753,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
- "logits/chosen": -1.9793100357055664,
439
- "logits/rejected": -1.9686877727508545,
440
- "logps/chosen": -35.343257904052734,
441
- "logps/rejected": -31.828807830810547,
442
- "loss": 0.728,
443
- "rewards/accuracies": 0.48750001192092896,
444
- "rewards/chosen": 0.00802794098854065,
445
- "rewards/margins": 0.0007186919683590531,
446
- "rewards/rejected": 0.007309247739613056,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
- "logits/chosen": -2.0753531455993652,
453
- "logits/rejected": -2.0603315830230713,
454
- "logps/chosen": -30.929107666015625,
455
- "logps/rejected": -32.63794708251953,
456
- "loss": 0.7688,
457
  "rewards/accuracies": 0.4625000059604645,
458
- "rewards/chosen": -0.0029905380215495825,
459
- "rewards/margins": -0.0113234743475914,
460
- "rewards/rejected": 0.008332937955856323,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
- "logits/chosen": -1.9466259479522705,
467
- "logits/rejected": -1.9440994262695312,
468
- "logps/chosen": -32.894100189208984,
469
- "logps/rejected": -30.831714630126953,
470
- "loss": 0.6516,
471
- "rewards/accuracies": 0.5375000238418579,
472
- "rewards/chosen": 0.019804339855909348,
473
- "rewards/margins": 0.028747867792844772,
474
- "rewards/rejected": -0.008943530730903149,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
- "eval_logits/chosen": -2.243934392929077,
480
- "eval_logits/rejected": -2.2390530109405518,
481
- "eval_logps/chosen": -34.02543640136719,
482
- "eval_logps/rejected": -37.50093460083008,
483
- "eval_loss": 0.7443073391914368,
484
- "eval_rewards/accuracies": 0.4958471655845642,
485
- "eval_rewards/chosen": 0.005467735230922699,
486
- "eval_rewards/margins": -0.0039433627389371395,
487
- "eval_rewards/rejected": 0.009411096572875977,
488
- "eval_runtime": 145.938,
489
- "eval_samples_per_second": 2.35,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
- "logits/chosen": -1.9287230968475342,
497
- "logits/rejected": -1.9254610538482666,
498
- "logps/chosen": -31.579919815063477,
499
- "logps/rejected": -33.720069885253906,
500
- "loss": 0.7291,
501
- "rewards/accuracies": 0.550000011920929,
502
- "rewards/chosen": 0.014056472107768059,
503
- "rewards/margins": 0.0008082970743998885,
504
- "rewards/rejected": 0.013248175382614136,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
- "logits/chosen": -1.9804229736328125,
511
- "logits/rejected": -1.9681212902069092,
512
- "logps/chosen": -34.572120666503906,
513
- "logps/rejected": -33.557823181152344,
514
- "loss": 0.6635,
515
- "rewards/accuracies": 0.5625,
516
- "rewards/chosen": 0.014373106881976128,
517
- "rewards/margins": 0.025763820856809616,
518
- "rewards/rejected": -0.01139071211218834,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
- "logits/chosen": -2.0162224769592285,
525
- "logits/rejected": -2.014760971069336,
526
- "logps/chosen": -33.47685241699219,
527
- "logps/rejected": -32.48881530761719,
528
- "loss": 0.7084,
529
- "rewards/accuracies": 0.5375000238418579,
530
- "rewards/chosen": 0.005364089272916317,
531
- "rewards/margins": 0.010902222245931625,
532
- "rewards/rejected": -0.0055381315760314465,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
- "logits/chosen": -2.1029350757598877,
539
- "logits/rejected": -2.0871243476867676,
540
- "logps/chosen": -34.16062545776367,
541
- "logps/rejected": -33.069419860839844,
542
- "loss": 0.7783,
543
- "rewards/accuracies": 0.48750001192092896,
544
- "rewards/chosen": 0.011257719248533249,
545
- "rewards/margins": -0.017912957817316055,
546
- "rewards/rejected": 0.029170680791139603,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
- "logits/chosen": -1.9747432470321655,
553
- "logits/rejected": -1.9738028049468994,
554
- "logps/chosen": -33.259681701660156,
555
- "logps/rejected": -32.44579315185547,
556
- "loss": 0.7418,
557
- "rewards/accuracies": 0.512499988079071,
558
- "rewards/chosen": 0.013451650738716125,
559
- "rewards/margins": 0.001838967902585864,
560
- "rewards/rejected": 0.011612680740654469,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
- "logits/chosen": -1.9304958581924438,
567
- "logits/rejected": -1.9408457279205322,
568
- "logps/chosen": -32.20914840698242,
569
- "logps/rejected": -35.29241180419922,
570
- "loss": 0.739,
571
- "rewards/accuracies": 0.5375000238418579,
572
- "rewards/chosen": -0.002673571230843663,
573
- "rewards/margins": -0.0004814237472601235,
574
- "rewards/rejected": -0.0021921484731137753,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
- "logits/chosen": -2.0693862438201904,
581
- "logits/rejected": -2.0628304481506348,
582
- "logps/chosen": -33.65240478515625,
583
- "logps/rejected": -29.189916610717773,
584
- "loss": 0.7913,
585
- "rewards/accuracies": 0.4375,
586
- "rewards/chosen": -0.0002606172056403011,
587
- "rewards/margins": -0.022166749462485313,
588
- "rewards/rejected": 0.021906131878495216,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
- "logits/chosen": -1.9290335178375244,
595
- "logits/rejected": -1.931208848953247,
596
- "logps/chosen": -34.243492126464844,
597
- "logps/rejected": -30.905447006225586,
598
- "loss": 0.6779,
599
- "rewards/accuracies": 0.550000011920929,
600
- "rewards/chosen": -0.0026660130824893713,
601
- "rewards/margins": 0.01702696830034256,
602
- "rewards/rejected": -0.019692981615662575,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
- "train_loss": 0.7160632399769572,
610
- "train_runtime": 3256.7737,
611
- "train_samples_per_second": 0.945,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],
 
15
  "logits/rejected": -1.7377450466156006,
16
  "logps/chosen": -29.553977966308594,
17
  "logps/rejected": -42.813133239746094,
18
+ "loss": 0.5102,
19
  "rewards/accuracies": 0.0,
20
  "rewards/chosen": 0.0,
21
  "rewards/margins": 0.0,
 
25
  {
26
  "epoch": 0.03,
27
  "learning_rate": 1.2820512820512818e-07,
28
+ "logits/chosen": -1.8663597106933594,
29
+ "logits/rejected": -1.8706719875335693,
30
+ "logps/chosen": -36.990386962890625,
31
+ "logps/rejected": -33.658267974853516,
32
+ "loss": 0.4771,
33
+ "rewards/accuracies": 0.5416666865348816,
34
+ "rewards/chosen": 0.011347133666276932,
35
+ "rewards/margins": 0.031080076470971107,
36
+ "rewards/rejected": -0.019732946529984474,
37
  "step": 10
38
  },
39
  {
40
  "epoch": 0.05,
41
  "learning_rate": 2.5641025641025636e-07,
42
+ "logits/chosen": -1.9978214502334595,
43
+ "logits/rejected": -2.000457286834717,
44
+ "logps/chosen": -29.62484359741211,
45
+ "logps/rejected": -29.059850692749023,
46
+ "loss": 0.5261,
47
  "rewards/accuracies": 0.5249999761581421,
48
+ "rewards/chosen": 0.012144992128014565,
49
+ "rewards/margins": 0.009631244465708733,
50
+ "rewards/rejected": 0.0025137457996606827,
51
  "step": 20
52
  },
53
  {
54
  "epoch": 0.08,
55
  "learning_rate": 3.8461538461538463e-07,
56
+ "logits/chosen": -1.9197276830673218,
57
+ "logits/rejected": -1.9170366525650024,
58
+ "logps/chosen": -31.404308319091797,
59
+ "logps/rejected": -33.229034423828125,
60
+ "loss": 0.5076,
61
+ "rewards/accuracies": 0.574999988079071,
62
+ "rewards/chosen": 0.008251860737800598,
63
+ "rewards/margins": 0.015654325485229492,
64
+ "rewards/rejected": -0.007402463350445032,
65
  "step": 30
66
  },
67
  {
68
  "epoch": 0.1,
69
  "learning_rate": 4.999896948438433e-07,
70
+ "logits/chosen": -2.0169289112091064,
71
+ "logits/rejected": -2.008176803588867,
72
+ "logps/chosen": -32.55079650878906,
73
+ "logps/rejected": -32.502708435058594,
74
+ "loss": 0.535,
75
+ "rewards/accuracies": 0.512499988079071,
76
+ "rewards/chosen": 0.018207356333732605,
77
+ "rewards/margins": 0.011235545389354229,
78
+ "rewards/rejected": 0.006971807684749365,
79
  "step": 40
80
  },
81
  {
82
  "epoch": 0.13,
83
  "learning_rate": 4.987541037542186e-07,
84
+ "logits/chosen": -1.86395263671875,
85
+ "logits/rejected": -1.8531732559204102,
86
+ "logps/chosen": -33.559814453125,
87
+ "logps/rejected": -35.43522262573242,
88
+ "loss": 0.5718,
89
+ "rewards/accuracies": 0.4625000059604645,
90
+ "rewards/chosen": -0.0016335565596818924,
91
+ "rewards/margins": -0.013616559095680714,
92
+ "rewards/rejected": 0.011983001604676247,
93
  "step": 50
94
  },
95
  {
96
  "epoch": 0.16,
97
  "learning_rate": 4.954691471941118e-07,
98
+ "logits/chosen": -1.9451929330825806,
99
+ "logits/rejected": -1.9471458196640015,
100
+ "logps/chosen": -32.59247589111328,
101
+ "logps/rejected": -33.19312286376953,
102
+ "loss": 0.4982,
103
+ "rewards/accuracies": 0.574999988079071,
104
+ "rewards/chosen": 0.005393369123339653,
105
+ "rewards/margins": 0.020615221932530403,
106
+ "rewards/rejected": -0.015221851877868176,
107
  "step": 60
108
  },
109
  {
110
  "epoch": 0.18,
111
  "learning_rate": 4.901618883413548e-07,
112
+ "logits/chosen": -2.0797886848449707,
113
+ "logits/rejected": -2.08477783203125,
114
+ "logps/chosen": -33.99976348876953,
115
+ "logps/rejected": -36.57415771484375,
116
+ "loss": 0.575,
117
+ "rewards/accuracies": 0.44999998807907104,
118
+ "rewards/chosen": -0.007893012836575508,
119
+ "rewards/margins": -0.004194633569568396,
120
+ "rewards/rejected": -0.0036983792670071125,
121
  "step": 70
122
  },
123
  {
124
  "epoch": 0.21,
125
  "learning_rate": 4.828760511501322e-07,
126
+ "logits/chosen": -1.9419677257537842,
127
+ "logits/rejected": -1.94512939453125,
128
+ "logps/chosen": -34.39982986450195,
129
+ "logps/rejected": -34.590721130371094,
130
+ "loss": 0.5441,
131
+ "rewards/accuracies": 0.5625,
132
+ "rewards/chosen": 0.019587691873311996,
133
+ "rewards/margins": 0.01840476132929325,
134
+ "rewards/rejected": 0.0011829293798655272,
135
  "step": 80
136
  },
137
  {
138
  "epoch": 0.23,
139
  "learning_rate": 4.736716601303429e-07,
140
+ "logits/chosen": -1.9507859945297241,
141
+ "logits/rejected": -1.9552862644195557,
142
+ "logps/chosen": -32.467891693115234,
143
+ "logps/rejected": -32.358272552490234,
144
+ "loss": 0.5391,
145
+ "rewards/accuracies": 0.5375000238418579,
146
+ "rewards/chosen": -0.00033024101867340505,
147
+ "rewards/margins": -0.0007759220898151398,
148
+ "rewards/rejected": 0.00044568348675966263,
149
  "step": 90
150
  },
151
  {
152
  "epoch": 0.26,
153
  "learning_rate": 4.62624545834521e-07,
154
+ "logits/chosen": -2.049070358276367,
155
+ "logits/rejected": -2.047074794769287,
156
+ "logps/chosen": -32.25985336303711,
157
+ "logps/rejected": -31.274398803710938,
158
+ "loss": 0.5589,
159
+ "rewards/accuracies": 0.5,
160
+ "rewards/chosen": -0.013313899748027325,
161
+ "rewards/margins": -0.007583809085190296,
162
+ "rewards/rejected": -0.005730087868869305,
163
  "step": 100
164
  },
165
  {
166
  "epoch": 0.26,
167
+ "eval_logits/chosen": -2.244201421737671,
168
+ "eval_logits/rejected": -2.239315986633301,
169
+ "eval_logps/chosen": -34.009124755859375,
170
+ "eval_logps/rejected": -37.5127067565918,
171
+ "eval_loss": 0.5106843709945679,
172
+ "eval_rewards/accuracies": 0.5573089718818665,
173
+ "eval_rewards/chosen": 0.017796913161873817,
174
+ "eval_rewards/margins": 0.015057443641126156,
175
+ "eval_rewards/rejected": 0.0027394662611186504,
176
+ "eval_runtime": 146.0293,
177
+ "eval_samples_per_second": 2.349,
178
  "eval_steps_per_second": 0.294,
179
  "step": 100
180
  },
181
  {
182
  "epoch": 0.29,
183
  "learning_rate": 4.4982572012636904e-07,
184
+ "logits/chosen": -2.005356550216675,
185
+ "logits/rejected": -2.0029516220092773,
186
+ "logps/chosen": -33.2292366027832,
187
+ "logps/rejected": -34.054996490478516,
188
+ "loss": 0.4957,
189
+ "rewards/accuracies": 0.5874999761581421,
190
+ "rewards/chosen": 0.01020804513245821,
191
+ "rewards/margins": 0.028141701593995094,
192
+ "rewards/rejected": -0.01793365553021431,
193
  "step": 110
194
  },
195
  {
196
  "epoch": 0.31,
197
  "learning_rate": 4.353806263777677e-07,
198
+ "logits/chosen": -2.0166029930114746,
199
+ "logits/rejected": -2.0082242488861084,
200
+ "logps/chosen": -32.45597457885742,
201
+ "logps/rejected": -32.18632507324219,
202
+ "loss": 0.5282,
203
+ "rewards/accuracies": 0.550000011920929,
204
+ "rewards/chosen": -0.007746054325252771,
205
+ "rewards/margins": 0.009627602994441986,
206
+ "rewards/rejected": -0.01737365685403347,
207
  "step": 120
208
  },
209
  {
210
  "epoch": 0.34,
211
  "learning_rate": 4.194082707715275e-07,
212
+ "logits/chosen": -2.046288251876831,
213
+ "logits/rejected": -2.038238525390625,
214
+ "logps/chosen": -30.492712020874023,
215
+ "logps/rejected": -32.042259216308594,
216
+ "loss": 0.5777,
217
  "rewards/accuracies": 0.44999998807907104,
218
+ "rewards/chosen": -0.011898026801645756,
219
+ "rewards/margins": -0.01942119374871254,
220
+ "rewards/rejected": 0.007523166947066784,
221
  "step": 130
222
  },
223
  {
224
  "epoch": 0.36,
225
  "learning_rate": 4.020402418666621e-07,
226
+ "logits/chosen": -1.976900339126587,
227
+ "logits/rejected": -1.9871864318847656,
228
+ "logps/chosen": -31.39472007751465,
229
+ "logps/rejected": -32.55016326904297,
230
+ "loss": 0.4864,
231
+ "rewards/accuracies": 0.5874999761581421,
232
+ "rewards/chosen": 0.01745815947651863,
233
+ "rewards/margins": 0.028155237436294556,
234
+ "rewards/rejected": -0.01069707702845335,
235
  "step": 140
236
  },
237
  {
238
  "epoch": 0.39,
239
  "learning_rate": 3.8341962650351185e-07,
240
+ "logits/chosen": -1.8908016681671143,
241
+ "logits/rejected": -1.8918870687484741,
242
+ "logps/chosen": -34.199378967285156,
243
+ "logps/rejected": -34.751861572265625,
244
+ "loss": 0.5621,
245
+ "rewards/accuracies": 0.4625000059604645,
246
+ "rewards/chosen": -0.004678909666836262,
247
+ "rewards/margins": -0.008454290218651295,
248
+ "rewards/rejected": 0.0037753782235085964,
249
  "step": 150
250
  },
251
  {
252
  "epoch": 0.42,
253
  "learning_rate": 3.636998309800572e-07,
254
+ "logits/chosen": -1.9425878524780273,
255
+ "logits/rejected": -1.9391052722930908,
256
+ "logps/chosen": -36.136619567871094,
257
+ "logps/rejected": -32.712371826171875,
258
+ "loss": 0.5148,
259
  "rewards/accuracies": 0.5,
260
+ "rewards/chosen": 0.02454109489917755,
261
+ "rewards/margins": 0.01727622002363205,
262
+ "rewards/rejected": 0.007264876272529364,
263
  "step": 160
264
  },
265
  {
266
  "epoch": 0.44,
267
  "learning_rate": 3.430433172111807e-07,
268
+ "logits/chosen": -2.042212963104248,
269
+ "logits/rejected": -2.0348353385925293,
270
+ "logps/chosen": -33.78378677368164,
271
+ "logps/rejected": -31.365230560302734,
272
+ "loss": 0.5133,
273
+ "rewards/accuracies": 0.5249999761581421,
274
+ "rewards/chosen": 0.009854594245553017,
275
+ "rewards/margins": 0.01922372169792652,
276
+ "rewards/rejected": -0.009369125589728355,
277
  "step": 170
278
  },
279
  {
280
  "epoch": 0.47,
281
  "learning_rate": 3.216202642830543e-07,
282
+ "logits/chosen": -2.047489881515503,
283
+ "logits/rejected": -2.0527498722076416,
284
+ "logps/chosen": -32.54710388183594,
285
+ "logps/rejected": -32.50310134887695,
286
+ "loss": 0.5261,
287
+ "rewards/accuracies": 0.4625000059604645,
288
+ "rewards/chosen": -0.001622129580937326,
289
+ "rewards/margins": 0.010947163216769695,
290
+ "rewards/rejected": -0.01256929337978363,
291
  "step": 180
292
  },
293
  {
294
  "epoch": 0.49,
295
  "learning_rate": 2.9960716642946403e-07,
296
+ "logits/chosen": -2.0485682487487793,
297
+ "logits/rejected": -2.04580020904541,
298
+ "logps/chosen": -31.48910903930664,
299
+ "logps/rejected": -31.318958282470703,
300
+ "loss": 0.5482,
301
+ "rewards/accuracies": 0.550000011920929,
302
+ "rewards/chosen": -0.00010873023711610585,
303
+ "rewards/margins": 0.0004080161452293396,
304
+ "rewards/rejected": -0.000516746542416513,
305
  "step": 190
306
  },
307
  {
308
  "epoch": 0.52,
309
  "learning_rate": 2.771853789806683e-07,
310
+ "logits/chosen": -1.9184706211090088,
311
+ "logits/rejected": -1.9231430292129517,
312
+ "logps/chosen": -31.597286224365234,
313
+ "logps/rejected": -32.80516815185547,
314
+ "loss": 0.522,
315
+ "rewards/accuracies": 0.5,
316
+ "rewards/chosen": 0.006296842359006405,
317
+ "rewards/margins": 0.016262350603938103,
318
+ "rewards/rejected": -0.009965506382286549,
319
  "step": 200
320
  },
321
  {
322
  "epoch": 0.52,
323
+ "eval_logits/chosen": -2.2433319091796875,
324
+ "eval_logits/rejected": -2.238459825515747,
325
+ "eval_logps/chosen": -34.028812408447266,
326
+ "eval_logps/rejected": -37.51158905029297,
327
+ "eval_loss": 0.5422906875610352,
328
+ "eval_rewards/accuracies": 0.46885380148887634,
329
+ "eval_rewards/chosen": 0.00401716772466898,
330
+ "eval_rewards/margins": 0.0004931418807245791,
331
+ "eval_rewards/rejected": 0.0035240259021520615,
332
+ "eval_runtime": 145.7818,
333
+ "eval_samples_per_second": 2.353,
334
  "eval_steps_per_second": 0.295,
335
  "step": 200
336
  },
337
  {
338
  "epoch": 0.55,
339
  "learning_rate": 2.5453962426402e-07,
340
+ "logits/chosen": -2.0319957733154297,
341
+ "logits/rejected": -2.0426838397979736,
342
+ "logps/chosen": -31.943634033203125,
343
+ "logps/rejected": -33.902008056640625,
344
+ "loss": 0.4594,
345
+ "rewards/accuracies": 0.612500011920929,
346
+ "rewards/chosen": 0.01936788484454155,
347
+ "rewards/margins": 0.046881090849637985,
348
+ "rewards/rejected": -0.027513209730386734,
349
  "step": 210
350
  },
351
  {
352
  "epoch": 0.57,
353
  "learning_rate": 2.318564697655179e-07,
354
+ "logits/chosen": -1.9252302646636963,
355
+ "logits/rejected": -1.9400790929794312,
356
+ "logps/chosen": -30.092998504638672,
357
+ "logps/rejected": -31.55881690979004,
358
+ "loss": 0.5269,
359
+ "rewards/accuracies": 0.5625,
360
+ "rewards/chosen": 0.012976284138858318,
361
+ "rewards/margins": 0.013424187898635864,
362
+ "rewards/rejected": -0.0004478988121263683,
363
  "step": 220
364
  },
365
  {
366
  "epoch": 0.6,
367
  "learning_rate": 2.093227910899832e-07,
368
+ "logits/chosen": -1.983025312423706,
369
+ "logits/rejected": -1.9869884252548218,
370
+ "logps/chosen": -33.37274932861328,
371
+ "logps/rejected": -31.550548553466797,
372
+ "loss": 0.496,
373
+ "rewards/accuracies": 0.512499988079071,
374
+ "rewards/chosen": 0.03130333498120308,
375
+ "rewards/margins": 0.03217558190226555,
376
+ "rewards/rejected": -0.0008722454076632857,
377
  "step": 230
378
  },
379
  {
380
  "epoch": 0.62,
381
  "learning_rate": 1.8712423238279356e-07,
382
+ "logits/chosen": -1.983493447303772,
383
+ "logits/rejected": -1.9615182876586914,
384
+ "logps/chosen": -34.13421630859375,
385
+ "logps/rejected": -34.97159957885742,
386
+ "loss": 0.4915,
387
+ "rewards/accuracies": 0.512499988079071,
388
+ "rewards/chosen": 0.01367081981152296,
389
+ "rewards/margins": 0.02823723293840885,
390
+ "rewards/rejected": -0.014566412195563316,
391
  "step": 240
392
  },
393
  {
394
  "epoch": 0.65,
395
  "learning_rate": 1.654436768970182e-07,
396
+ "logits/chosen": -2.02471661567688,
397
+ "logits/rejected": -2.0214104652404785,
398
+ "logps/chosen": -32.94930648803711,
399
+ "logps/rejected": -36.21381759643555,
400
+ "loss": 0.6084,
401
+ "rewards/accuracies": 0.4000000059604645,
402
+ "rewards/chosen": -0.016968127340078354,
403
+ "rewards/margins": -0.02474220283329487,
404
+ "rewards/rejected": 0.007774075958877802,
405
  "step": 250
406
  },
407
  {
408
  "epoch": 0.68,
409
  "learning_rate": 1.444597403062196e-07,
410
+ "logits/chosen": -1.8912674188613892,
411
+ "logits/rejected": -1.8888145685195923,
412
+ "logps/chosen": -34.208099365234375,
413
+ "logps/rejected": -35.50531005859375,
414
+ "loss": 0.5753,
415
+ "rewards/accuracies": 0.44999998807907104,
416
+ "rewards/chosen": -0.013791380450129509,
417
+ "rewards/margins": -0.013306483626365662,
418
+ "rewards/rejected": -0.00048489533946849406,
419
  "step": 260
420
  },
421
  {
422
  "epoch": 0.7,
423
  "learning_rate": 1.2434529917578887e-07,
424
+ "logits/chosen": -1.8766494989395142,
425
+ "logits/rejected": -1.8741207122802734,
426
+ "logps/chosen": -34.38150405883789,
427
+ "logps/rejected": -31.744686126708984,
428
+ "loss": 0.5631,
429
  "rewards/accuracies": 0.5249999761581421,
430
+ "rewards/chosen": 0.011268051341176033,
431
+ "rewards/margins": -0.007382377982139587,
432
+ "rewards/rejected": 0.018650425598025322,
433
  "step": 270
434
  },
435
  {
436
  "epoch": 0.73,
437
  "learning_rate": 1.0526606671603521e-07,
438
+ "logits/chosen": -1.9800630807876587,
439
+ "logits/rejected": -1.9694359302520752,
440
+ "logps/chosen": -35.32728958129883,
441
+ "logps/rejected": -31.843835830688477,
442
+ "loss": 0.4967,
443
+ "rewards/accuracies": 0.550000011920929,
444
+ "rewards/chosen": 0.02054680697619915,
445
+ "rewards/margins": 0.022537903860211372,
446
+ "rewards/rejected": -0.001991095719859004,
447
  "step": 280
448
  },
449
  {
450
  "epoch": 0.75,
451
  "learning_rate": 8.737922755071453e-08,
452
+ "logits/chosen": -2.0754363536834717,
453
+ "logits/rejected": -2.060412645339966,
454
+ "logps/chosen": -30.902868270874023,
455
+ "logps/rejected": -32.63262939453125,
456
+ "loss": 0.5486,
457
  "rewards/accuracies": 0.4625000059604645,
458
+ "rewards/chosen": 0.014878431335091591,
459
+ "rewards/margins": 0.0014350058045238256,
460
+ "rewards/rejected": 0.013443423435091972,
461
  "step": 290
462
  },
463
  {
464
  "epoch": 0.78,
465
  "learning_rate": 7.08321427484816e-08,
466
+ "logits/chosen": -1.9468863010406494,
467
+ "logits/rejected": -1.9443439245224,
468
+ "logps/chosen": -32.88282775878906,
469
+ "logps/rejected": -30.835247039794922,
470
+ "loss": 0.4616,
471
+ "rewards/accuracies": 0.637499988079071,
472
+ "rewards/chosen": 0.03099890425801277,
473
+ "rewards/margins": 0.04390609636902809,
474
+ "rewards/rejected": -0.012907189317047596,
475
  "step": 300
476
  },
477
  {
478
  "epoch": 0.78,
479
+ "eval_logits/chosen": -2.243941307067871,
480
+ "eval_logits/rejected": -2.2390596866607666,
481
+ "eval_logps/chosen": -34.027870178222656,
482
+ "eval_logps/rejected": -37.49814224243164,
483
+ "eval_loss": 0.5651828050613403,
484
+ "eval_rewards/accuracies": 0.4808970093727112,
485
+ "eval_rewards/chosen": 0.004676156677305698,
486
+ "eval_rewards/margins": -0.008257162757217884,
487
+ "eval_rewards/rejected": 0.012933320365846157,
488
+ "eval_runtime": 145.8956,
489
+ "eval_samples_per_second": 2.351,
490
  "eval_steps_per_second": 0.295,
491
  "step": 300
492
  },
493
  {
494
  "epoch": 0.81,
495
  "learning_rate": 5.576113578589034e-08,
496
+ "logits/chosen": -1.928396224975586,
497
+ "logits/rejected": -1.9251337051391602,
498
+ "logps/chosen": -31.597158432006836,
499
+ "logps/rejected": -33.751304626464844,
500
+ "loss": 0.5251,
501
+ "rewards/accuracies": 0.512499988079071,
502
+ "rewards/chosen": 0.004333490040153265,
503
+ "rewards/margins": 0.01073872484266758,
504
+ "rewards/rejected": -0.006405232939869165,
505
  "step": 310
506
  },
507
  {
508
  "epoch": 0.83,
509
  "learning_rate": 4.229036944380912e-08,
510
+ "logits/chosen": -1.9806029796600342,
511
+ "logits/rejected": -1.9683090448379517,
512
+ "logps/chosen": -34.563812255859375,
513
+ "logps/rejected": -33.55340576171875,
514
+ "loss": 0.4803,
515
+ "rewards/accuracies": 0.5874999761581421,
516
+ "rewards/chosen": 0.022582050412893295,
517
+ "rewards/margins": 0.03277861326932907,
518
+ "rewards/rejected": -0.010196560993790627,
519
  "step": 320
520
  },
521
  {
522
  "epoch": 0.86,
523
  "learning_rate": 3.053082288996112e-08,
524
+ "logits/chosen": -2.0163512229919434,
525
+ "logits/rejected": -2.0149030685424805,
526
+ "logps/chosen": -33.46595764160156,
527
+ "logps/rejected": -32.46282196044922,
528
+ "loss": 0.5443,
529
+ "rewards/accuracies": 0.48750001192092896,
530
+ "rewards/chosen": 0.013885289430618286,
531
+ "rewards/margins": 0.0021549216471612453,
532
+ "rewards/rejected": 0.011730367317795753,
533
  "step": 330
534
  },
535
  {
536
  "epoch": 0.88,
537
  "learning_rate": 2.05793773749158e-08,
538
+ "logits/chosen": -2.1030476093292236,
539
+ "logits/rejected": -2.0872480869293213,
540
+ "logps/chosen": -34.173221588134766,
541
+ "logps/rejected": -33.08686447143555,
542
+ "loss": 0.5853,
543
+ "rewards/accuracies": 0.5,
544
+ "rewards/chosen": 0.004321468528360128,
545
+ "rewards/margins": -0.01749541237950325,
546
+ "rewards/rejected": 0.021816883236169815,
547
  "step": 340
548
  },
549
  {
550
  "epoch": 0.91,
551
  "learning_rate": 1.251801807404168e-08,
552
+ "logits/chosen": -1.9748103618621826,
553
+ "logits/rejected": -1.9738647937774658,
554
+ "logps/chosen": -33.254066467285156,
555
+ "logps/rejected": -32.4422721862793,
556
+ "loss": 0.5603,
557
+ "rewards/accuracies": 0.38749998807907104,
558
+ "rewards/chosen": 0.019621744751930237,
559
+ "rewards/margins": 0.0036095953546464443,
560
+ "rewards/rejected": 0.016012147068977356,
561
  "step": 350
562
  },
563
  {
564
  "epoch": 0.94,
565
  "learning_rate": 6.41315865106129e-09,
566
+ "logits/chosen": -1.9305530786514282,
567
+ "logits/rejected": -1.9409068822860718,
568
+ "logps/chosen": -32.2257080078125,
569
+ "logps/rejected": -35.29142379760742,
570
+ "loss": 0.5729,
571
+ "rewards/accuracies": 0.512499988079071,
572
+ "rewards/chosen": -0.0147119564935565,
573
+ "rewards/margins": -0.012842650525271893,
574
+ "rewards/rejected": -0.0018693048041313887,
575
  "step": 360
576
  },
577
  {
578
  "epoch": 0.96,
579
  "learning_rate": 2.3150941078050324e-09,
580
+ "logits/chosen": -2.069432497024536,
581
+ "logits/rejected": -2.0628814697265625,
582
+ "logps/chosen": -33.65316390991211,
583
+ "logps/rejected": -29.218481063842773,
584
+ "loss": 0.5602,
585
+ "rewards/accuracies": 0.48750001192092896,
586
+ "rewards/chosen": -0.000836326158605516,
587
+ "rewards/margins": -0.006397420074790716,
588
+ "rewards/rejected": 0.005561096128076315,
589
  "step": 370
590
  },
591
  {
592
  "epoch": 0.99,
593
  "learning_rate": 2.575864278703266e-10,
594
+ "logits/chosen": -1.9289767742156982,
595
+ "logits/rejected": -1.9311527013778687,
596
+ "logps/chosen": -34.243995666503906,
597
+ "logps/rejected": -30.892364501953125,
598
+ "loss": 0.5254,
599
+ "rewards/accuracies": 0.5249999761581421,
600
+ "rewards/chosen": -0.0034601963125169277,
601
+ "rewards/margins": 0.010354455560445786,
602
+ "rewards/rejected": -0.013814652338624,
603
  "step": 380
604
  },
605
  {
606
  "epoch": 1.0,
607
  "step": 385,
608
  "total_flos": 0.0,
609
+ "train_loss": 0.5317366457604743,
610
+ "train_runtime": 3254.062,
611
+ "train_samples_per_second": 0.946,
612
  "train_steps_per_second": 0.118
613
  }
614
  ],