File size: 13,821 Bytes
aea0428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.549186676994578,
  "eval_steps": 500,
  "global_step": 500,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.061967467079783116,
      "grad_norm": 0.31789204478263855,
      "learning_rate": 0.0001,
      "logits/chosen": -2.037724018096924,
      "logits/rejected": -1.4958155155181885,
      "logps/chosen": -366.8481750488281,
      "logps/rejected": -216.5625,
      "loss": 0.7398,
      "rewards/accuracies": 0.596875011920929,
      "rewards/chosen": 0.47608914971351624,
      "rewards/margins": 0.7042624950408936,
      "rewards/rejected": -0.2281733751296997,
      "step": 20
    },
    {
      "epoch": 0.12393493415956623,
      "grad_norm": 0.6692605018615723,
      "learning_rate": 0.0002,
      "logits/chosen": -2.0875039100646973,
      "logits/rejected": -1.591808795928955,
      "logps/chosen": -343.7860412597656,
      "logps/rejected": -225.52999877929688,
      "loss": 0.1009,
      "rewards/accuracies": 0.987500011920929,
      "rewards/chosen": 1.8180320262908936,
      "rewards/margins": 4.439352989196777,
      "rewards/rejected": -2.6213204860687256,
      "step": 40
    },
    {
      "epoch": 0.18590240123934934,
      "grad_norm": 0.05950424447655678,
      "learning_rate": 0.0003,
      "logits/chosen": -2.024808168411255,
      "logits/rejected": -1.550957441329956,
      "logps/chosen": -337.0711364746094,
      "logps/rejected": -244.19100952148438,
      "loss": 0.0377,
      "rewards/accuracies": 0.9906250238418579,
      "rewards/chosen": 1.4306976795196533,
      "rewards/margins": 6.377615928649902,
      "rewards/rejected": -4.94691801071167,
      "step": 60
    },
    {
      "epoch": 0.24786986831913246,
      "grad_norm": 0.23692739009857178,
      "learning_rate": 0.0004,
      "logits/chosen": -1.8400609493255615,
      "logits/rejected": -1.322597861289978,
      "logps/chosen": -346.5595397949219,
      "logps/rejected": -269.07080078125,
      "loss": 0.0183,
      "rewards/accuracies": 0.996874988079071,
      "rewards/chosen": 0.668520987033844,
      "rewards/margins": 8.514095306396484,
      "rewards/rejected": -7.845574378967285,
      "step": 80
    },
    {
      "epoch": 0.30983733539891556,
      "grad_norm": 0.14662492275238037,
      "learning_rate": 0.0005,
      "logits/chosen": -1.7353684902191162,
      "logits/rejected": -1.286170244216919,
      "logps/chosen": -343.4979248046875,
      "logps/rejected": -293.38458251953125,
      "loss": 0.0154,
      "rewards/accuracies": 0.9937499761581421,
      "rewards/chosen": -1.1620686054229736,
      "rewards/margins": 9.110270500183105,
      "rewards/rejected": -10.272339820861816,
      "step": 100
    },
    {
      "epoch": 0.3718048024786987,
      "grad_norm": 0.061400506645441055,
      "learning_rate": 0.0004969220851487844,
      "logits/chosen": -1.9946537017822266,
      "logits/rejected": -1.5800933837890625,
      "logps/chosen": -379.28070068359375,
      "logps/rejected": -327.15374755859375,
      "loss": 0.0258,
      "rewards/accuracies": 0.9906250238418579,
      "rewards/chosen": -1.6450515985488892,
      "rewards/margins": 9.554536819458008,
      "rewards/rejected": -11.19958782196045,
      "step": 120
    },
    {
      "epoch": 0.4337722695584818,
      "grad_norm": 0.017976779490709305,
      "learning_rate": 0.0004877641290737884,
      "logits/chosen": -2.088599443435669,
      "logits/rejected": -1.5488828420639038,
      "logps/chosen": -364.15057373046875,
      "logps/rejected": -305.5300598144531,
      "loss": 0.0109,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -0.5407935380935669,
      "rewards/margins": 9.945794105529785,
      "rewards/rejected": -10.486587524414062,
      "step": 140
    },
    {
      "epoch": 0.4957397366382649,
      "grad_norm": 0.0334528349339962,
      "learning_rate": 0.00047275163104709196,
      "logits/chosen": -1.9635769128799438,
      "logits/rejected": -1.3838129043579102,
      "logps/chosen": -364.9557189941406,
      "logps/rejected": -315.8921813964844,
      "loss": 0.01,
      "rewards/accuracies": 0.996874988079071,
      "rewards/chosen": -1.1100990772247314,
      "rewards/margins": 10.210677146911621,
      "rewards/rejected": -11.320775985717773,
      "step": 160
    },
    {
      "epoch": 0.557707203718048,
      "grad_norm": 0.18209697306156158,
      "learning_rate": 0.0004522542485937369,
      "logits/chosen": -1.9845554828643799,
      "logits/rejected": -1.381089448928833,
      "logps/chosen": -362.4132080078125,
      "logps/rejected": -319.27923583984375,
      "loss": 0.0136,
      "rewards/accuracies": 0.996874988079071,
      "rewards/chosen": -1.4699629545211792,
      "rewards/margins": 10.683820724487305,
      "rewards/rejected": -12.153783798217773,
      "step": 180
    },
    {
      "epoch": 0.6196746707978311,
      "grad_norm": 0.4615195095539093,
      "learning_rate": 0.00042677669529663686,
      "logits/chosen": -1.8291380405426025,
      "logits/rejected": -1.2410638332366943,
      "logps/chosen": -388.328125,
      "logps/rejected": -355.81683349609375,
      "loss": 0.0155,
      "rewards/accuracies": 0.9906250238418579,
      "rewards/chosen": -2.9048476219177246,
      "rewards/margins": 11.032526969909668,
      "rewards/rejected": -13.93737506866455,
      "step": 200
    },
    {
      "epoch": 0.6816421378776143,
      "grad_norm": 0.008454305119812489,
      "learning_rate": 0.0003969463130731183,
      "logits/chosen": -1.880886435508728,
      "logits/rejected": -1.2066755294799805,
      "logps/chosen": -373.3675231933594,
      "logps/rejected": -329.75762939453125,
      "loss": 0.0088,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -1.4875344038009644,
      "rewards/margins": 11.527753829956055,
      "rewards/rejected": -13.015289306640625,
      "step": 220
    },
    {
      "epoch": 0.7436096049573974,
      "grad_norm": 0.0020042492542415857,
      "learning_rate": 0.00036349762493488667,
      "logits/chosen": -1.827252984046936,
      "logits/rejected": -1.122102975845337,
      "logps/chosen": -353.72039794921875,
      "logps/rejected": -332.6769104003906,
      "loss": 0.0046,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -1.0817521810531616,
      "rewards/margins": 12.253175735473633,
      "rewards/rejected": -13.334927558898926,
      "step": 240
    },
    {
      "epoch": 0.8055770720371804,
      "grad_norm": 0.021263889968395233,
      "learning_rate": 0.00032725424859373687,
      "logits/chosen": -1.9819982051849365,
      "logits/rejected": -1.4159529209136963,
      "logps/chosen": -357.13189697265625,
      "logps/rejected": -341.510986328125,
      "loss": 0.0116,
      "rewards/accuracies": 0.984375,
      "rewards/chosen": -0.6232878565788269,
      "rewards/margins": 11.093478202819824,
      "rewards/rejected": -11.716766357421875,
      "step": 260
    },
    {
      "epoch": 0.8675445391169636,
      "grad_norm": 0.002243664814159274,
      "learning_rate": 0.00028910861626005774,
      "logits/chosen": -1.9268354177474976,
      "logits/rejected": -1.3426536321640015,
      "logps/chosen": -355.0523681640625,
      "logps/rejected": -325.38604736328125,
      "loss": 0.0083,
      "rewards/accuracies": 0.996874988079071,
      "rewards/chosen": -0.8373309969902039,
      "rewards/margins": 11.25169563293457,
      "rewards/rejected": -12.089027404785156,
      "step": 280
    },
    {
      "epoch": 0.9295120061967467,
      "grad_norm": 0.004915986675769091,
      "learning_rate": 0.00025,
      "logits/chosen": -1.7971941232681274,
      "logits/rejected": -1.1745736598968506,
      "logps/chosen": -369.9619140625,
      "logps/rejected": -328.6756286621094,
      "loss": 0.0072,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -1.5830962657928467,
      "rewards/margins": 11.627036094665527,
      "rewards/rejected": -13.210131645202637,
      "step": 300
    },
    {
      "epoch": 0.9914794732765299,
      "grad_norm": 0.012169785797595978,
      "learning_rate": 0.00021089138373994224,
      "logits/chosen": -1.7294784784317017,
      "logits/rejected": -1.1395697593688965,
      "logps/chosen": -374.56390380859375,
      "logps/rejected": -331.4615478515625,
      "loss": 0.0052,
      "rewards/accuracies": 0.996874988079071,
      "rewards/chosen": -1.92715322971344,
      "rewards/margins": 11.8142671585083,
      "rewards/rejected": -13.741419792175293,
      "step": 320
    },
    {
      "epoch": 1.053446940356313,
      "grad_norm": 0.0052313837222754955,
      "learning_rate": 0.00017274575140626317,
      "logits/chosen": -1.749742865562439,
      "logits/rejected": -1.1283290386199951,
      "logps/chosen": -393.4547119140625,
      "logps/rejected": -364.1820983886719,
      "loss": 0.0063,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -1.6951110363006592,
      "rewards/margins": 12.771199226379395,
      "rewards/rejected": -14.466310501098633,
      "step": 340
    },
    {
      "epoch": 1.115414407436096,
      "grad_norm": 0.0015018833801150322,
      "learning_rate": 0.00013650237506511331,
      "logits/chosen": -1.7724621295928955,
      "logits/rejected": -1.0941975116729736,
      "logps/chosen": -374.0537414550781,
      "logps/rejected": -344.38726806640625,
      "loss": 0.0031,
      "rewards/accuracies": 0.9937499761581421,
      "rewards/chosen": -2.0093319416046143,
      "rewards/margins": 13.258508682250977,
      "rewards/rejected": -15.267840385437012,
      "step": 360
    },
    {
      "epoch": 1.1773818745158793,
      "grad_norm": 0.0007948160055093467,
      "learning_rate": 0.00010305368692688174,
      "logits/chosen": -1.8663429021835327,
      "logits/rejected": -1.2882705926895142,
      "logps/chosen": -377.31768798828125,
      "logps/rejected": -347.67315673828125,
      "loss": 0.0032,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -1.45194411277771,
      "rewards/margins": 12.697643280029297,
      "rewards/rejected": -14.149587631225586,
      "step": 380
    },
    {
      "epoch": 1.2393493415956622,
      "grad_norm": 0.0005872417241334915,
      "learning_rate": 7.322330470336314e-05,
      "logits/chosen": -1.8675537109375,
      "logits/rejected": -1.2785203456878662,
      "logps/chosen": -373.61773681640625,
      "logps/rejected": -339.9030456542969,
      "loss": 0.0039,
      "rewards/accuracies": 0.996874988079071,
      "rewards/chosen": -1.034354329109192,
      "rewards/margins": 12.89478874206543,
      "rewards/rejected": -13.929142951965332,
      "step": 400
    },
    {
      "epoch": 1.3013168086754454,
      "grad_norm": 0.01801152527332306,
      "learning_rate": 4.7745751406263163e-05,
      "logits/chosen": -1.8331152200698853,
      "logits/rejected": -1.235565423965454,
      "logps/chosen": -366.6528625488281,
      "logps/rejected": -348.25146484375,
      "loss": 0.0031,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -1.2253501415252686,
      "rewards/margins": 13.49341869354248,
      "rewards/rejected": -14.718768119812012,
      "step": 420
    },
    {
      "epoch": 1.3632842757552286,
      "grad_norm": 0.004230986814945936,
      "learning_rate": 2.7248368952908055e-05,
      "logits/chosen": -1.8243134021759033,
      "logits/rejected": -1.223825216293335,
      "logps/chosen": -374.45404052734375,
      "logps/rejected": -363.57244873046875,
      "loss": 0.0009,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -1.9178287982940674,
      "rewards/margins": 13.485855102539062,
      "rewards/rejected": -15.403681755065918,
      "step": 440
    },
    {
      "epoch": 1.4252517428350115,
      "grad_norm": 0.0037465775385499,
      "learning_rate": 1.2235870926211617e-05,
      "logits/chosen": -1.8060734272003174,
      "logits/rejected": -1.2161097526550293,
      "logps/chosen": -367.4268798828125,
      "logps/rejected": -350.69482421875,
      "loss": 0.0019,
      "rewards/accuracies": 1.0,
      "rewards/chosen": -1.0722858905792236,
      "rewards/margins": 13.695907592773438,
      "rewards/rejected": -14.768194198608398,
      "step": 460
    },
    {
      "epoch": 1.4872192099147947,
      "grad_norm": 0.004549082834273577,
      "learning_rate": 3.077914851215585e-06,
      "logits/chosen": -1.79794180393219,
      "logits/rejected": -1.14779794216156,
      "logps/chosen": -384.23504638671875,
      "logps/rejected": -348.41314697265625,
      "loss": 0.0052,
      "rewards/accuracies": 0.9906250238418579,
      "rewards/chosen": -1.522377371788025,
      "rewards/margins": 13.347444534301758,
      "rewards/rejected": -14.869821548461914,
      "step": 480
    },
    {
      "epoch": 1.549186676994578,
      "grad_norm": 0.0164234209805727,
      "learning_rate": 0.0,
      "logits/chosen": -1.8267322778701782,
      "logits/rejected": -1.2138524055480957,
      "logps/chosen": -366.42333984375,
      "logps/rejected": -364.5099182128906,
      "loss": 0.0051,
      "rewards/accuracies": 0.9937499761581421,
      "rewards/chosen": -1.8385913372039795,
      "rewards/margins": 13.76579761505127,
      "rewards/rejected": -15.604388236999512,
      "step": 500
    }
  ],
  "logging_steps": 20,
  "max_steps": 500,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 50,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}