tiagoblima commited on
Commit
226ebe1
1 Parent(s): da435b9

End of training

Browse files
Files changed (5) hide show
  1. README.md +4 -2
  2. all_results.json +13 -0
  3. eval_results.json +8 -0
  4. train_results.json +8 -0
  5. trainer_state.json +548 -0
README.md CHANGED
@@ -3,6 +3,8 @@ license: mit
3
  base_model: unicamp-dl/ptt5-base-t5-vocab
4
  tags:
5
  - generated_from_trainer
 
 
6
  model-index:
7
  - name: t5_base-qg-aap-nopeft
8
  results: []
@@ -13,9 +15,9 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # t5_base-qg-aap-nopeft
15
 
16
- This model is a fine-tuned version of [unicamp-dl/ptt5-base-t5-vocab](https://huggingface.co/unicamp-dl/ptt5-base-t5-vocab) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 1.0950
19
 
20
  ## Model description
21
 
 
3
  base_model: unicamp-dl/ptt5-base-t5-vocab
4
  tags:
5
  - generated_from_trainer
6
+ datasets:
7
+ - tiagoblima/qg_squad_v1_pt
8
  model-index:
9
  - name: t5_base-qg-aap-nopeft
10
  results: []
 
15
 
16
  # t5_base-qg-aap-nopeft
17
 
18
+ This model is a fine-tuned version of [unicamp-dl/ptt5-base-t5-vocab](https://huggingface.co/unicamp-dl/ptt5-base-t5-vocab) on the tiagoblima/qg_squad_v1_pt dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 1.0926
21
 
22
  ## Model description
23
 
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_loss": 1.0926330089569092,
4
+ "eval_runtime": 202.1767,
5
+ "eval_samples": 7223,
6
+ "eval_samples_per_second": 35.726,
7
+ "eval_steps_per_second": 8.933,
8
+ "train_loss": 0.9537819692403964,
9
+ "train_runtime": 26727.1522,
10
+ "train_samples": 51704,
11
+ "train_samples_per_second": 9.673,
12
+ "train_steps_per_second": 0.302
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_loss": 1.0926330089569092,
4
+ "eval_runtime": 202.1767,
5
+ "eval_samples": 7223,
6
+ "eval_samples_per_second": 35.726,
7
+ "eval_steps_per_second": 8.933
8
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.9537819692403964,
4
+ "train_runtime": 26727.1522,
5
+ "train_samples": 51704,
6
+ "train_samples_per_second": 9.673,
7
+ "train_steps_per_second": 0.302
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,548 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.0926330089569092,
3
+ "best_model_checkpoint": "/temp/t5_base-qg-aap-nopeft/checkpoint-6464",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 8080,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.06,
13
+ "learning_rate": 9.876237623762377e-05,
14
+ "loss": 1.9841,
15
+ "step": 100
16
+ },
17
+ {
18
+ "epoch": 0.12,
19
+ "learning_rate": 9.752475247524753e-05,
20
+ "loss": 1.2569,
21
+ "step": 200
22
+ },
23
+ {
24
+ "epoch": 0.19,
25
+ "learning_rate": 9.628712871287129e-05,
26
+ "loss": 1.2054,
27
+ "step": 300
28
+ },
29
+ {
30
+ "epoch": 0.25,
31
+ "learning_rate": 9.504950495049505e-05,
32
+ "loss": 1.1704,
33
+ "step": 400
34
+ },
35
+ {
36
+ "epoch": 0.31,
37
+ "learning_rate": 9.381188118811881e-05,
38
+ "loss": 1.1504,
39
+ "step": 500
40
+ },
41
+ {
42
+ "epoch": 0.37,
43
+ "learning_rate": 9.257425742574259e-05,
44
+ "loss": 1.14,
45
+ "step": 600
46
+ },
47
+ {
48
+ "epoch": 0.43,
49
+ "learning_rate": 9.133663366336635e-05,
50
+ "loss": 1.1303,
51
+ "step": 700
52
+ },
53
+ {
54
+ "epoch": 0.5,
55
+ "learning_rate": 9.009900990099011e-05,
56
+ "loss": 1.1022,
57
+ "step": 800
58
+ },
59
+ {
60
+ "epoch": 0.56,
61
+ "learning_rate": 8.886138613861387e-05,
62
+ "loss": 1.0985,
63
+ "step": 900
64
+ },
65
+ {
66
+ "epoch": 0.62,
67
+ "learning_rate": 8.762376237623763e-05,
68
+ "loss": 1.0944,
69
+ "step": 1000
70
+ },
71
+ {
72
+ "epoch": 0.68,
73
+ "learning_rate": 8.638613861386139e-05,
74
+ "loss": 1.0762,
75
+ "step": 1100
76
+ },
77
+ {
78
+ "epoch": 0.74,
79
+ "learning_rate": 8.514851485148515e-05,
80
+ "loss": 1.0784,
81
+ "step": 1200
82
+ },
83
+ {
84
+ "epoch": 0.8,
85
+ "learning_rate": 8.391089108910891e-05,
86
+ "loss": 1.076,
87
+ "step": 1300
88
+ },
89
+ {
90
+ "epoch": 0.87,
91
+ "learning_rate": 8.267326732673268e-05,
92
+ "loss": 1.0672,
93
+ "step": 1400
94
+ },
95
+ {
96
+ "epoch": 0.93,
97
+ "learning_rate": 8.143564356435644e-05,
98
+ "loss": 1.0673,
99
+ "step": 1500
100
+ },
101
+ {
102
+ "epoch": 0.99,
103
+ "learning_rate": 8.019801980198021e-05,
104
+ "loss": 1.0561,
105
+ "step": 1600
106
+ },
107
+ {
108
+ "epoch": 1.0,
109
+ "eval_loss": 1.129577875137329,
110
+ "eval_runtime": 203.5144,
111
+ "eval_samples_per_second": 35.491,
112
+ "eval_steps_per_second": 8.874,
113
+ "step": 1616
114
+ },
115
+ {
116
+ "epoch": 1.05,
117
+ "learning_rate": 7.896039603960397e-05,
118
+ "loss": 0.989,
119
+ "step": 1700
120
+ },
121
+ {
122
+ "epoch": 1.11,
123
+ "learning_rate": 7.772277227722773e-05,
124
+ "loss": 0.9881,
125
+ "step": 1800
126
+ },
127
+ {
128
+ "epoch": 1.18,
129
+ "learning_rate": 7.64851485148515e-05,
130
+ "loss": 0.987,
131
+ "step": 1900
132
+ },
133
+ {
134
+ "epoch": 1.24,
135
+ "learning_rate": 7.524752475247526e-05,
136
+ "loss": 0.9849,
137
+ "step": 2000
138
+ },
139
+ {
140
+ "epoch": 1.3,
141
+ "learning_rate": 7.400990099009902e-05,
142
+ "loss": 0.9807,
143
+ "step": 2100
144
+ },
145
+ {
146
+ "epoch": 1.36,
147
+ "learning_rate": 7.277227722772278e-05,
148
+ "loss": 0.9784,
149
+ "step": 2200
150
+ },
151
+ {
152
+ "epoch": 1.42,
153
+ "learning_rate": 7.153465346534654e-05,
154
+ "loss": 0.9868,
155
+ "step": 2300
156
+ },
157
+ {
158
+ "epoch": 1.49,
159
+ "learning_rate": 7.02970297029703e-05,
160
+ "loss": 0.9792,
161
+ "step": 2400
162
+ },
163
+ {
164
+ "epoch": 1.55,
165
+ "learning_rate": 6.905940594059406e-05,
166
+ "loss": 0.988,
167
+ "step": 2500
168
+ },
169
+ {
170
+ "epoch": 1.61,
171
+ "learning_rate": 6.782178217821783e-05,
172
+ "loss": 0.9806,
173
+ "step": 2600
174
+ },
175
+ {
176
+ "epoch": 1.67,
177
+ "learning_rate": 6.658415841584159e-05,
178
+ "loss": 0.9612,
179
+ "step": 2700
180
+ },
181
+ {
182
+ "epoch": 1.73,
183
+ "learning_rate": 6.534653465346535e-05,
184
+ "loss": 0.9632,
185
+ "step": 2800
186
+ },
187
+ {
188
+ "epoch": 1.79,
189
+ "learning_rate": 6.410891089108911e-05,
190
+ "loss": 0.9861,
191
+ "step": 2900
192
+ },
193
+ {
194
+ "epoch": 1.86,
195
+ "learning_rate": 6.287128712871287e-05,
196
+ "loss": 0.968,
197
+ "step": 3000
198
+ },
199
+ {
200
+ "epoch": 1.92,
201
+ "learning_rate": 6.163366336633663e-05,
202
+ "loss": 0.9727,
203
+ "step": 3100
204
+ },
205
+ {
206
+ "epoch": 1.98,
207
+ "learning_rate": 6.03960396039604e-05,
208
+ "loss": 0.9695,
209
+ "step": 3200
210
+ },
211
+ {
212
+ "epoch": 2.0,
213
+ "eval_loss": 1.1012357473373413,
214
+ "eval_runtime": 203.1507,
215
+ "eval_samples_per_second": 35.555,
216
+ "eval_steps_per_second": 8.89,
217
+ "step": 3232
218
+ },
219
+ {
220
+ "epoch": 2.04,
221
+ "learning_rate": 5.915841584158416e-05,
222
+ "loss": 0.9333,
223
+ "step": 3300
224
+ },
225
+ {
226
+ "epoch": 2.1,
227
+ "learning_rate": 5.792079207920792e-05,
228
+ "loss": 0.9114,
229
+ "step": 3400
230
+ },
231
+ {
232
+ "epoch": 2.17,
233
+ "learning_rate": 5.668316831683168e-05,
234
+ "loss": 0.9173,
235
+ "step": 3500
236
+ },
237
+ {
238
+ "epoch": 2.23,
239
+ "learning_rate": 5.544554455445545e-05,
240
+ "loss": 0.9131,
241
+ "step": 3600
242
+ },
243
+ {
244
+ "epoch": 2.29,
245
+ "learning_rate": 5.420792079207921e-05,
246
+ "loss": 0.9064,
247
+ "step": 3700
248
+ },
249
+ {
250
+ "epoch": 2.35,
251
+ "learning_rate": 5.2970297029702974e-05,
252
+ "loss": 0.9113,
253
+ "step": 3800
254
+ },
255
+ {
256
+ "epoch": 2.41,
257
+ "learning_rate": 5.1732673267326735e-05,
258
+ "loss": 0.8984,
259
+ "step": 3900
260
+ },
261
+ {
262
+ "epoch": 2.48,
263
+ "learning_rate": 5.0495049504950497e-05,
264
+ "loss": 0.9149,
265
+ "step": 4000
266
+ },
267
+ {
268
+ "epoch": 2.54,
269
+ "learning_rate": 4.925742574257426e-05,
270
+ "loss": 0.9041,
271
+ "step": 4100
272
+ },
273
+ {
274
+ "epoch": 2.6,
275
+ "learning_rate": 4.801980198019802e-05,
276
+ "loss": 0.9137,
277
+ "step": 4200
278
+ },
279
+ {
280
+ "epoch": 2.66,
281
+ "learning_rate": 4.678217821782179e-05,
282
+ "loss": 0.9117,
283
+ "step": 4300
284
+ },
285
+ {
286
+ "epoch": 2.72,
287
+ "learning_rate": 4.554455445544555e-05,
288
+ "loss": 0.9024,
289
+ "step": 4400
290
+ },
291
+ {
292
+ "epoch": 2.78,
293
+ "learning_rate": 4.430693069306931e-05,
294
+ "loss": 0.914,
295
+ "step": 4500
296
+ },
297
+ {
298
+ "epoch": 2.85,
299
+ "learning_rate": 4.306930693069307e-05,
300
+ "loss": 0.9295,
301
+ "step": 4600
302
+ },
303
+ {
304
+ "epoch": 2.91,
305
+ "learning_rate": 4.183168316831683e-05,
306
+ "loss": 0.9088,
307
+ "step": 4700
308
+ },
309
+ {
310
+ "epoch": 2.97,
311
+ "learning_rate": 4.05940594059406e-05,
312
+ "loss": 0.9111,
313
+ "step": 4800
314
+ },
315
+ {
316
+ "epoch": 3.0,
317
+ "eval_loss": 1.0933234691619873,
318
+ "eval_runtime": 203.1992,
319
+ "eval_samples_per_second": 35.546,
320
+ "eval_steps_per_second": 8.888,
321
+ "step": 4848
322
+ },
323
+ {
324
+ "epoch": 3.03,
325
+ "learning_rate": 3.935643564356436e-05,
326
+ "loss": 0.8905,
327
+ "step": 4900
328
+ },
329
+ {
330
+ "epoch": 3.09,
331
+ "learning_rate": 3.811881188118812e-05,
332
+ "loss": 0.8628,
333
+ "step": 5000
334
+ },
335
+ {
336
+ "epoch": 3.16,
337
+ "learning_rate": 3.6881188118811884e-05,
338
+ "loss": 0.8712,
339
+ "step": 5100
340
+ },
341
+ {
342
+ "epoch": 3.22,
343
+ "learning_rate": 3.5643564356435645e-05,
344
+ "loss": 0.8545,
345
+ "step": 5200
346
+ },
347
+ {
348
+ "epoch": 3.28,
349
+ "learning_rate": 3.440594059405941e-05,
350
+ "loss": 0.8793,
351
+ "step": 5300
352
+ },
353
+ {
354
+ "epoch": 3.34,
355
+ "learning_rate": 3.3168316831683175e-05,
356
+ "loss": 0.8677,
357
+ "step": 5400
358
+ },
359
+ {
360
+ "epoch": 3.4,
361
+ "learning_rate": 3.1930693069306936e-05,
362
+ "loss": 0.8716,
363
+ "step": 5500
364
+ },
365
+ {
366
+ "epoch": 3.47,
367
+ "learning_rate": 3.06930693069307e-05,
368
+ "loss": 0.8708,
369
+ "step": 5600
370
+ },
371
+ {
372
+ "epoch": 3.53,
373
+ "learning_rate": 2.9455445544554455e-05,
374
+ "loss": 0.8782,
375
+ "step": 5700
376
+ },
377
+ {
378
+ "epoch": 3.59,
379
+ "learning_rate": 2.8217821782178216e-05,
380
+ "loss": 0.8651,
381
+ "step": 5800
382
+ },
383
+ {
384
+ "epoch": 3.65,
385
+ "learning_rate": 2.6980198019801985e-05,
386
+ "loss": 0.8597,
387
+ "step": 5900
388
+ },
389
+ {
390
+ "epoch": 3.71,
391
+ "learning_rate": 2.5742574257425746e-05,
392
+ "loss": 0.8516,
393
+ "step": 6000
394
+ },
395
+ {
396
+ "epoch": 3.77,
397
+ "learning_rate": 2.4504950495049507e-05,
398
+ "loss": 0.8759,
399
+ "step": 6100
400
+ },
401
+ {
402
+ "epoch": 3.84,
403
+ "learning_rate": 2.326732673267327e-05,
404
+ "loss": 0.8631,
405
+ "step": 6200
406
+ },
407
+ {
408
+ "epoch": 3.9,
409
+ "learning_rate": 2.202970297029703e-05,
410
+ "loss": 0.8636,
411
+ "step": 6300
412
+ },
413
+ {
414
+ "epoch": 3.96,
415
+ "learning_rate": 2.079207920792079e-05,
416
+ "loss": 0.8691,
417
+ "step": 6400
418
+ },
419
+ {
420
+ "epoch": 4.0,
421
+ "eval_loss": 1.0926330089569092,
422
+ "eval_runtime": 203.0632,
423
+ "eval_samples_per_second": 35.57,
424
+ "eval_steps_per_second": 8.894,
425
+ "step": 6464
426
+ },
427
+ {
428
+ "epoch": 4.02,
429
+ "learning_rate": 1.9554455445544556e-05,
430
+ "loss": 0.8573,
431
+ "step": 6500
432
+ },
433
+ {
434
+ "epoch": 4.08,
435
+ "learning_rate": 1.8316831683168317e-05,
436
+ "loss": 0.8536,
437
+ "step": 6600
438
+ },
439
+ {
440
+ "epoch": 4.15,
441
+ "learning_rate": 1.707920792079208e-05,
442
+ "loss": 0.8329,
443
+ "step": 6700
444
+ },
445
+ {
446
+ "epoch": 4.21,
447
+ "learning_rate": 1.5841584158415843e-05,
448
+ "loss": 0.8407,
449
+ "step": 6800
450
+ },
451
+ {
452
+ "epoch": 4.27,
453
+ "learning_rate": 1.4603960396039604e-05,
454
+ "loss": 0.8525,
455
+ "step": 6900
456
+ },
457
+ {
458
+ "epoch": 4.33,
459
+ "learning_rate": 1.3366336633663367e-05,
460
+ "loss": 0.8473,
461
+ "step": 7000
462
+ },
463
+ {
464
+ "epoch": 4.39,
465
+ "learning_rate": 1.2128712871287128e-05,
466
+ "loss": 0.825,
467
+ "step": 7100
468
+ },
469
+ {
470
+ "epoch": 4.46,
471
+ "learning_rate": 1.0891089108910891e-05,
472
+ "loss": 0.834,
473
+ "step": 7200
474
+ },
475
+ {
476
+ "epoch": 4.52,
477
+ "learning_rate": 9.653465346534654e-06,
478
+ "loss": 0.8397,
479
+ "step": 7300
480
+ },
481
+ {
482
+ "epoch": 4.58,
483
+ "learning_rate": 8.415841584158417e-06,
484
+ "loss": 0.8436,
485
+ "step": 7400
486
+ },
487
+ {
488
+ "epoch": 4.64,
489
+ "learning_rate": 7.178217821782178e-06,
490
+ "loss": 0.8353,
491
+ "step": 7500
492
+ },
493
+ {
494
+ "epoch": 4.7,
495
+ "learning_rate": 5.940594059405941e-06,
496
+ "loss": 0.8332,
497
+ "step": 7600
498
+ },
499
+ {
500
+ "epoch": 4.76,
501
+ "learning_rate": 4.702970297029704e-06,
502
+ "loss": 0.8402,
503
+ "step": 7700
504
+ },
505
+ {
506
+ "epoch": 4.83,
507
+ "learning_rate": 3.4653465346534657e-06,
508
+ "loss": 0.8526,
509
+ "step": 7800
510
+ },
511
+ {
512
+ "epoch": 4.89,
513
+ "learning_rate": 2.227722772277228e-06,
514
+ "loss": 0.827,
515
+ "step": 7900
516
+ },
517
+ {
518
+ "epoch": 4.95,
519
+ "learning_rate": 9.900990099009902e-07,
520
+ "loss": 0.8543,
521
+ "step": 8000
522
+ },
523
+ {
524
+ "epoch": 5.0,
525
+ "eval_loss": 1.0950443744659424,
526
+ "eval_runtime": 202.8265,
527
+ "eval_samples_per_second": 35.612,
528
+ "eval_steps_per_second": 8.904,
529
+ "step": 8080
530
+ },
531
+ {
532
+ "epoch": 5.0,
533
+ "step": 8080,
534
+ "total_flos": 1.574277938675712e+17,
535
+ "train_loss": 0.9537819692403964,
536
+ "train_runtime": 26727.1522,
537
+ "train_samples_per_second": 9.673,
538
+ "train_steps_per_second": 0.302
539
+ }
540
+ ],
541
+ "logging_steps": 100,
542
+ "max_steps": 8080,
543
+ "num_train_epochs": 5,
544
+ "save_steps": 500,
545
+ "total_flos": 1.574277938675712e+17,
546
+ "trial_name": null,
547
+ "trial_params": null
548
+ }