ramdhanfirdaus commited on
Commit
517418a
1 Parent(s): 7c4ef21

Training in progress, step 2600, checkpoint

Browse files
last-checkpoint/README.md CHANGED
@@ -216,11 +216,4 @@ The following `bitsandbytes` quantization config was used during training:
216
  ### Framework versions
217
 
218
 
219
- - PEFT 0.6.0.dev0
220
- ## Training procedure
221
-
222
-
223
- ### Framework versions
224
-
225
-
226
  - PEFT 0.6.0.dev0
 
216
  ### Framework versions
217
 
218
 
 
 
 
 
 
 
 
219
  - PEFT 0.6.0.dev0
last-checkpoint/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6dbab27f1afd2c2b68d5d308eb910d4e7396f285a2441571ba4fa59cae956c44
3
  size 50349441
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb4c8b423eac64f9169d308445226ebc768e3a2bffe7ba714d9a896e0f8305b9
3
  size 50349441
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b5c29c227d0a66980592f446e0e2ab4e76d3b4d83b7bf69c8a265756b0811cd
3
- size 100691721
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06434133f6ab7488599636ea48c2cf564242377b300073af5ce35587b031c9f4
3
+ size 100693001
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:91ce772f9b2a32aa1626e6ccbc135efad31ff1869adac26764a35a5c73bb1216
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec8a6a85c74c84b1931c98dfb99024e5154ce0f3a099bd81af66da693b03813f
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:58c098aa199ff890a6444b804a3fbc89c9468b8e4fb7f53db1b50396e4ed0c2d
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7c5d3b3c2a8d9b140a1a8519d056f5ffffcb2bb6ee7238329a9e6866488adf2
3
  size 627
last-checkpoint/special_tokens_map.json CHANGED
@@ -1,24 +1,6 @@
1
  {
2
- "bos_token": {
3
- "content": "<|endoftext|>",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "<|endoftext|>",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
  "pad_token": "<|endoftext|>",
17
- "unk_token": {
18
- "content": "<|endoftext|>",
19
- "lstrip": false,
20
- "normalized": false,
21
- "rstrip": false,
22
- "single_word": false
23
- }
24
  }
 
1
  {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
 
 
 
 
 
 
 
 
 
 
 
 
4
  "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
 
 
 
 
 
 
6
  }
last-checkpoint/tokenizer_config.json CHANGED
@@ -13,12 +13,8 @@
13
  "bos_token": "<|endoftext|>",
14
  "clean_up_tokenization_spaces": true,
15
  "eos_token": "<|endoftext|>",
16
- "max_length": 512,
17
  "model_max_length": 1024,
18
  "pad_token": "<|endoftext|>",
19
- "stride": 0,
20
  "tokenizer_class": "GPT2Tokenizer",
21
- "truncation_side": "right",
22
- "truncation_strategy": "longest_first",
23
  "unk_token": "<|endoftext|>"
24
  }
 
13
  "bos_token": "<|endoftext|>",
14
  "clean_up_tokenization_spaces": true,
15
  "eos_token": "<|endoftext|>",
 
16
  "model_max_length": 1024,
17
  "pad_token": "<|endoftext|>",
 
18
  "tokenizer_class": "GPT2Tokenizer",
 
 
19
  "unk_token": "<|endoftext|>"
20
  }
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.411597490310669,
3
- "best_model_checkpoint": "./outputs/checkpoint-2300",
4
- "epoch": 1.6757741347905282,
5
  "eval_steps": 100,
6
- "global_step": 2300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -151,191 +151,233 @@
151
  {
152
  "epoch": 0.8,
153
  "learning_rate": 0.0002,
154
- "loss": 1.6461,
155
  "step": 1100
156
  },
157
  {
158
  "epoch": 0.8,
159
- "eval_loss": 1.6240431070327759,
160
- "eval_runtime": 293.5989,
161
- "eval_samples_per_second": 21.369,
162
- "eval_steps_per_second": 2.674,
163
  "step": 1100
164
  },
165
  {
166
  "epoch": 0.87,
167
  "learning_rate": 0.0002,
168
- "loss": 1.5992,
169
  "step": 1200
170
  },
171
  {
172
  "epoch": 0.87,
173
- "eval_loss": 1.5974311828613281,
174
- "eval_runtime": 291.7,
175
- "eval_samples_per_second": 21.508,
176
- "eval_steps_per_second": 2.691,
177
  "step": 1200
178
  },
179
  {
180
  "epoch": 0.95,
181
  "learning_rate": 0.0002,
182
- "loss": 1.6021,
183
  "step": 1300
184
  },
185
  {
186
  "epoch": 0.95,
187
- "eval_loss": 1.5751127004623413,
188
- "eval_runtime": 289.9524,
189
- "eval_samples_per_second": 21.638,
190
- "eval_steps_per_second": 2.707,
191
  "step": 1300
192
  },
193
  {
194
  "epoch": 1.02,
195
  "learning_rate": 0.0002,
196
- "loss": 1.5538,
197
  "step": 1400
198
  },
199
  {
200
  "epoch": 1.02,
201
- "eval_loss": 1.5539450645446777,
202
- "eval_runtime": 287.8748,
203
- "eval_samples_per_second": 21.794,
204
- "eval_steps_per_second": 2.727,
205
  "step": 1400
206
  },
207
  {
208
  "epoch": 1.09,
209
  "learning_rate": 0.0002,
210
- "loss": 1.5249,
211
  "step": 1500
212
  },
213
  {
214
  "epoch": 1.09,
215
- "eval_loss": 1.5348094701766968,
216
- "eval_runtime": 287.891,
217
- "eval_samples_per_second": 21.793,
218
- "eval_steps_per_second": 2.727,
219
  "step": 1500
220
  },
221
  {
222
  "epoch": 1.17,
223
  "learning_rate": 0.0002,
224
- "loss": 1.506,
225
  "step": 1600
226
  },
227
  {
228
  "epoch": 1.17,
229
- "eval_loss": 1.515953540802002,
230
- "eval_runtime": 289.836,
231
- "eval_samples_per_second": 21.647,
232
- "eval_steps_per_second": 2.708,
233
  "step": 1600
234
  },
235
  {
236
  "epoch": 1.24,
237
  "learning_rate": 0.0002,
238
- "loss": 1.5042,
239
  "step": 1700
240
  },
241
  {
242
  "epoch": 1.24,
243
- "eval_loss": 1.4988901615142822,
244
- "eval_runtime": 291.5471,
245
- "eval_samples_per_second": 21.52,
246
- "eval_steps_per_second": 2.693,
247
  "step": 1700
248
  },
249
  {
250
  "epoch": 1.31,
251
  "learning_rate": 0.0002,
252
- "loss": 1.4762,
253
  "step": 1800
254
  },
255
  {
256
  "epoch": 1.31,
257
- "eval_loss": 1.4844294786453247,
258
- "eval_runtime": 293.6668,
259
- "eval_samples_per_second": 21.364,
260
- "eval_steps_per_second": 2.673,
261
  "step": 1800
262
  },
263
  {
264
  "epoch": 1.38,
265
  "learning_rate": 0.0002,
266
- "loss": 1.4652,
267
  "step": 1900
268
  },
269
  {
270
  "epoch": 1.38,
271
- "eval_loss": 1.4694663286209106,
272
- "eval_runtime": 295.5867,
273
- "eval_samples_per_second": 21.226,
274
- "eval_steps_per_second": 2.656,
275
  "step": 1900
276
  },
277
  {
278
  "epoch": 1.46,
279
  "learning_rate": 0.0002,
280
- "loss": 1.4481,
281
  "step": 2000
282
  },
283
  {
284
  "epoch": 1.46,
285
- "eval_loss": 1.4534634351730347,
286
- "eval_runtime": 296.7451,
287
- "eval_samples_per_second": 21.143,
288
- "eval_steps_per_second": 2.645,
289
  "step": 2000
290
  },
291
  {
292
  "epoch": 1.53,
293
  "learning_rate": 0.0002,
294
- "loss": 1.4335,
295
  "step": 2100
296
  },
297
  {
298
  "epoch": 1.53,
299
- "eval_loss": 1.4383305311203003,
300
- "eval_runtime": 294.659,
301
- "eval_samples_per_second": 21.292,
302
- "eval_steps_per_second": 2.664,
303
  "step": 2100
304
  },
305
  {
306
  "epoch": 1.6,
307
  "learning_rate": 0.0002,
308
- "loss": 1.4075,
309
  "step": 2200
310
  },
311
  {
312
  "epoch": 1.6,
313
- "eval_loss": 1.4232139587402344,
314
- "eval_runtime": 292.737,
315
- "eval_samples_per_second": 21.432,
316
- "eval_steps_per_second": 2.682,
317
  "step": 2200
318
  },
319
  {
320
  "epoch": 1.68,
321
  "learning_rate": 0.0002,
322
- "loss": 1.4059,
323
  "step": 2300
324
  },
325
  {
326
  "epoch": 1.68,
327
- "eval_loss": 1.411597490310669,
328
- "eval_runtime": 290.6759,
329
- "eval_samples_per_second": 21.584,
330
- "eval_steps_per_second": 2.701,
331
  "step": 2300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332
  }
333
  ],
334
  "logging_steps": 100,
335
  "max_steps": 4116,
336
  "num_train_epochs": 3,
337
  "save_steps": 100,
338
- "total_flos": 1.3711720075820237e+17,
339
  "trial_name": null,
340
  "trial_params": null
341
  }
 
1
  {
2
+ "best_metric": 1.3754903078079224,
3
+ "best_model_checkpoint": "./outputs/checkpoint-2600",
4
+ "epoch": 1.8943533697632058,
5
  "eval_steps": 100,
6
+ "global_step": 2600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
151
  {
152
  "epoch": 0.8,
153
  "learning_rate": 0.0002,
154
+ "loss": 1.6268,
155
  "step": 1100
156
  },
157
  {
158
  "epoch": 0.8,
159
+ "eval_loss": 1.6125303506851196,
160
+ "eval_runtime": 144.0648,
161
+ "eval_samples_per_second": 43.55,
162
+ "eval_steps_per_second": 5.449,
163
  "step": 1100
164
  },
165
  {
166
  "epoch": 0.87,
167
  "learning_rate": 0.0002,
168
+ "loss": 1.5911,
169
  "step": 1200
170
  },
171
  {
172
  "epoch": 0.87,
173
+ "eval_loss": 1.5925209522247314,
174
+ "eval_runtime": 144.042,
175
+ "eval_samples_per_second": 43.557,
176
+ "eval_steps_per_second": 5.45,
177
  "step": 1200
178
  },
179
  {
180
  "epoch": 0.95,
181
  "learning_rate": 0.0002,
182
+ "loss": 1.5986,
183
  "step": 1300
184
  },
185
  {
186
  "epoch": 0.95,
187
+ "eval_loss": 1.571681261062622,
188
+ "eval_runtime": 144.1746,
189
+ "eval_samples_per_second": 43.517,
190
+ "eval_steps_per_second": 5.445,
191
  "step": 1300
192
  },
193
  {
194
  "epoch": 1.02,
195
  "learning_rate": 0.0002,
196
+ "loss": 1.5514,
197
  "step": 1400
198
  },
199
  {
200
  "epoch": 1.02,
201
+ "eval_loss": 1.5524405241012573,
202
+ "eval_runtime": 183.3697,
203
+ "eval_samples_per_second": 34.215,
204
+ "eval_steps_per_second": 4.281,
205
  "step": 1400
206
  },
207
  {
208
  "epoch": 1.09,
209
  "learning_rate": 0.0002,
210
+ "loss": 1.5235,
211
  "step": 1500
212
  },
213
  {
214
  "epoch": 1.09,
215
+ "eval_loss": 1.534788966178894,
216
+ "eval_runtime": 317.3733,
217
+ "eval_samples_per_second": 19.769,
218
+ "eval_steps_per_second": 2.473,
219
  "step": 1500
220
  },
221
  {
222
  "epoch": 1.17,
223
  "learning_rate": 0.0002,
224
+ "loss": 1.5051,
225
  "step": 1600
226
  },
227
  {
228
  "epoch": 1.17,
229
+ "eval_loss": 1.517040491104126,
230
+ "eval_runtime": 315.5897,
231
+ "eval_samples_per_second": 19.88,
232
+ "eval_steps_per_second": 2.487,
233
  "step": 1600
234
  },
235
  {
236
  "epoch": 1.24,
237
  "learning_rate": 0.0002,
238
+ "loss": 1.5036,
239
  "step": 1700
240
  },
241
  {
242
  "epoch": 1.24,
243
+ "eval_loss": 1.500235915184021,
244
+ "eval_runtime": 314.1201,
245
+ "eval_samples_per_second": 19.973,
246
+ "eval_steps_per_second": 2.499,
247
  "step": 1700
248
  },
249
  {
250
  "epoch": 1.31,
251
  "learning_rate": 0.0002,
252
+ "loss": 1.4767,
253
  "step": 1800
254
  },
255
  {
256
  "epoch": 1.31,
257
+ "eval_loss": 1.4854458570480347,
258
+ "eval_runtime": 313.3904,
259
+ "eval_samples_per_second": 20.02,
260
+ "eval_steps_per_second": 2.505,
261
  "step": 1800
262
  },
263
  {
264
  "epoch": 1.38,
265
  "learning_rate": 0.0002,
266
+ "loss": 1.4665,
267
  "step": 1900
268
  },
269
  {
270
  "epoch": 1.38,
271
+ "eval_loss": 1.4697930812835693,
272
+ "eval_runtime": 314.584,
273
+ "eval_samples_per_second": 19.944,
274
+ "eval_steps_per_second": 2.495,
275
  "step": 1900
276
  },
277
  {
278
  "epoch": 1.46,
279
  "learning_rate": 0.0002,
280
+ "loss": 1.4498,
281
  "step": 2000
282
  },
283
  {
284
  "epoch": 1.46,
285
+ "eval_loss": 1.456108808517456,
286
+ "eval_runtime": 316.2748,
287
+ "eval_samples_per_second": 19.837,
288
+ "eval_steps_per_second": 2.482,
289
  "step": 2000
290
  },
291
  {
292
  "epoch": 1.53,
293
  "learning_rate": 0.0002,
294
+ "loss": 1.4358,
295
  "step": 2100
296
  },
297
  {
298
  "epoch": 1.53,
299
+ "eval_loss": 1.4408069849014282,
300
+ "eval_runtime": 317.862,
301
+ "eval_samples_per_second": 19.738,
302
+ "eval_steps_per_second": 2.47,
303
  "step": 2100
304
  },
305
  {
306
  "epoch": 1.6,
307
  "learning_rate": 0.0002,
308
+ "loss": 1.4099,
309
  "step": 2200
310
  },
311
  {
312
  "epoch": 1.6,
313
+ "eval_loss": 1.4274669885635376,
314
+ "eval_runtime": 319.6918,
315
+ "eval_samples_per_second": 19.625,
316
+ "eval_steps_per_second": 2.455,
317
  "step": 2200
318
  },
319
  {
320
  "epoch": 1.68,
321
  "learning_rate": 0.0002,
322
+ "loss": 1.409,
323
  "step": 2300
324
  },
325
  {
326
  "epoch": 1.68,
327
+ "eval_loss": 1.4145855903625488,
328
+ "eval_runtime": 320.1441,
329
+ "eval_samples_per_second": 19.597,
330
+ "eval_steps_per_second": 2.452,
331
  "step": 2300
332
+ },
333
+ {
334
+ "epoch": 1.75,
335
+ "learning_rate": 0.0002,
336
+ "loss": 1.4082,
337
+ "step": 2400
338
+ },
339
+ {
340
+ "epoch": 1.75,
341
+ "eval_loss": 1.4013197422027588,
342
+ "eval_runtime": 318.5235,
343
+ "eval_samples_per_second": 19.697,
344
+ "eval_steps_per_second": 2.464,
345
+ "step": 2400
346
+ },
347
+ {
348
+ "epoch": 1.82,
349
+ "learning_rate": 0.0002,
350
+ "loss": 1.3797,
351
+ "step": 2500
352
+ },
353
+ {
354
+ "epoch": 1.82,
355
+ "eval_loss": 1.388313889503479,
356
+ "eval_runtime": 316.5542,
357
+ "eval_samples_per_second": 19.82,
358
+ "eval_steps_per_second": 2.48,
359
+ "step": 2500
360
+ },
361
+ {
362
+ "epoch": 1.89,
363
+ "learning_rate": 0.0002,
364
+ "loss": 1.37,
365
+ "step": 2600
366
+ },
367
+ {
368
+ "epoch": 1.89,
369
+ "eval_loss": 1.3754903078079224,
370
+ "eval_runtime": 314.9023,
371
+ "eval_samples_per_second": 19.924,
372
+ "eval_steps_per_second": 2.493,
373
+ "step": 2600
374
  }
375
  ],
376
  "logging_steps": 100,
377
  "max_steps": 4116,
378
  "num_train_epochs": 3,
379
  "save_steps": 100,
380
+ "total_flos": 1.5498375085790822e+17,
381
  "trial_name": null,
382
  "trial_params": null
383
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1ab4a714067324690a64db56d021644d50462360424c147b3e9df6b69650fa0
3
  size 4155
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:435ef416a520c327b44f1a335ae059bdb8b9a978d39dfecd5bff01684de2670c
3
  size 4155