DewiBrynJones commited on
Commit
01d3eb9
1 Parent(s): bc41011

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -15,12 +15,12 @@ model-index:
15
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
  should probably proofread and complete it, then remove this comment. -->
17
 
18
- # wav2vec2-btb-cv-ft-btb-cy
19
 
20
  This model is a fine-tuned version of [DewiBrynJones/wav2vec2-xlsr-53-ft-btb-cv-cy](https://huggingface.co/DewiBrynJones/wav2vec2-xlsr-53-ft-btb-cv-cy) on an unknown dataset.
21
  It achieves the following results on the evaluation set:
22
- - Loss: 0.4418
23
- - Wer: 0.3371
24
 
25
  ## Model description
26
 
@@ -40,30 +40,69 @@ More information needed
40
 
41
  The following hyperparameters were used during training:
42
  - learning_rate: 0.0003
43
- - train_batch_size: 16
44
  - eval_batch_size: 64
45
  - seed: 42
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
- - lr_scheduler_warmup_steps: 200
49
- - training_steps: 2200
50
  - mixed_precision_training: Native AMP
51
 
52
  ### Training results
53
 
54
- | Training Loss | Epoch | Step | Validation Loss | Wer |
55
- |:-------------:|:------:|:----:|:---------------:|:------:|
56
- | No log | 0.1139 | 200 | 0.7154 | 0.4784 |
57
- | No log | 0.2278 | 400 | 0.6105 | 0.4456 |
58
- | 1.3805 | 0.3417 | 600 | 0.5903 | 0.4618 |
59
- | 1.3805 | 0.4556 | 800 | 0.5655 | 0.4232 |
60
- | 0.6886 | 0.5695 | 1000 | 0.5267 | 0.3967 |
61
- | 0.6886 | 0.6834 | 1200 | 0.5074 | 0.3808 |
62
- | 0.6886 | 0.7973 | 1400 | 0.4814 | 0.3667 |
63
- | 0.6336 | 0.9112 | 1600 | 0.4642 | 0.3621 |
64
- | 0.6336 | 1.0251 | 1800 | 0.4544 | 0.3472 |
65
- | 0.5411 | 1.1390 | 2000 | 0.4474 | 0.3406 |
66
- | 0.5411 | 1.2528 | 2200 | 0.4418 | 0.3371 |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
 
69
  ### Framework versions
 
15
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
  should probably proofread and complete it, then remove this comment. -->
17
 
18
+ # wav2vec2-btb-cv-ft-btb-cy-cand
19
 
20
  This model is a fine-tuned version of [DewiBrynJones/wav2vec2-xlsr-53-ft-btb-cv-cy](https://huggingface.co/DewiBrynJones/wav2vec2-xlsr-53-ft-btb-cv-cy) on an unknown dataset.
21
  It achieves the following results on the evaluation set:
22
+ - Loss: 0.4345
23
+ - Wer: 0.3308
24
 
25
  ## Model description
26
 
 
40
 
41
  The following hyperparameters were used during training:
42
  - learning_rate: 0.0003
43
+ - train_batch_size: 4
44
  - eval_batch_size: 64
45
  - seed: 42
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
+ - lr_scheduler_warmup_steps: 1000
49
+ - training_steps: 10000
50
  - mixed_precision_training: Native AMP
51
 
52
  ### Training results
53
 
54
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
55
+ |:-------------:|:------:|:-----:|:---------------:|:------:|
56
+ | No log | 0.0285 | 200 | 1.2522 | 0.6292 |
57
+ | No log | 0.0570 | 400 | 0.6599 | 0.4544 |
58
+ | 2.2791 | 0.0854 | 600 | 0.6629 | 0.4395 |
59
+ | 2.2791 | 0.1139 | 800 | 0.7910 | 0.5453 |
60
+ | 0.8206 | 0.1424 | 1000 | 0.7758 | 0.5701 |
61
+ | 0.8206 | 0.1709 | 1200 | 0.8025 | 0.5783 |
62
+ | 0.8206 | 0.1994 | 1400 | 0.7715 | 0.5211 |
63
+ | 0.9068 | 0.2279 | 1600 | 0.7349 | 0.5128 |
64
+ | 0.9068 | 0.2563 | 1800 | 0.7258 | 0.5152 |
65
+ | 0.8679 | 0.2848 | 2000 | 0.7084 | 0.5216 |
66
+ | 0.8679 | 0.3133 | 2200 | 0.6904 | 0.5014 |
67
+ | 0.8679 | 0.3418 | 2400 | 0.6993 | 0.5178 |
68
+ | 0.8577 | 0.3703 | 2600 | 0.6746 | 0.4867 |
69
+ | 0.8577 | 0.3987 | 2800 | 0.6622 | 0.4963 |
70
+ | 0.7995 | 0.4272 | 3000 | 0.6793 | 0.4935 |
71
+ | 0.7995 | 0.4557 | 3200 | 0.6368 | 0.4701 |
72
+ | 0.7995 | 0.4842 | 3400 | 0.6363 | 0.4781 |
73
+ | 0.8141 | 0.5127 | 3600 | 0.6217 | 0.4656 |
74
+ | 0.8141 | 0.5412 | 3800 | 0.6418 | 0.4940 |
75
+ | 0.7953 | 0.5696 | 4000 | 0.6018 | 0.4542 |
76
+ | 0.7953 | 0.5981 | 4200 | 0.5962 | 0.4580 |
77
+ | 0.7953 | 0.6266 | 4400 | 0.5883 | 0.4459 |
78
+ | 0.7596 | 0.6551 | 4600 | 0.5788 | 0.4325 |
79
+ | 0.7596 | 0.6836 | 4800 | 0.5709 | 0.4412 |
80
+ | 0.7533 | 0.7120 | 5000 | 0.5595 | 0.4352 |
81
+ | 0.7533 | 0.7405 | 5200 | 0.5546 | 0.4232 |
82
+ | 0.7533 | 0.7690 | 5400 | 0.5545 | 0.4244 |
83
+ | 0.7591 | 0.7975 | 5600 | 0.5443 | 0.4076 |
84
+ | 0.7591 | 0.8260 | 5800 | 0.5341 | 0.4146 |
85
+ | 0.6621 | 0.8545 | 6000 | 0.5104 | 0.3955 |
86
+ | 0.6621 | 0.8829 | 6200 | 0.5139 | 0.4011 |
87
+ | 0.6621 | 0.9114 | 6400 | 0.5044 | 0.3804 |
88
+ | 0.6705 | 0.9399 | 6600 | 0.4999 | 0.3896 |
89
+ | 0.6705 | 0.9684 | 6800 | 0.5097 | 0.4053 |
90
+ | 0.6665 | 0.9969 | 7000 | 0.4925 | 0.3785 |
91
+ | 0.6665 | 1.0253 | 7200 | 0.4896 | 0.3689 |
92
+ | 0.6665 | 1.0538 | 7400 | 0.4749 | 0.3687 |
93
+ | 0.5826 | 1.0823 | 7600 | 0.4684 | 0.3628 |
94
+ | 0.5826 | 1.1108 | 7800 | 0.4729 | 0.3585 |
95
+ | 0.5836 | 1.1393 | 8000 | 0.4641 | 0.3553 |
96
+ | 0.5836 | 1.1678 | 8200 | 0.4575 | 0.3530 |
97
+ | 0.5836 | 1.1962 | 8400 | 0.4585 | 0.3486 |
98
+ | 0.5199 | 1.2247 | 8600 | 0.4549 | 0.3451 |
99
+ | 0.5199 | 1.2532 | 8800 | 0.4521 | 0.3408 |
100
+ | 0.5268 | 1.2817 | 9000 | 0.4425 | 0.3395 |
101
+ | 0.5268 | 1.3102 | 9200 | 0.4407 | 0.3362 |
102
+ | 0.5268 | 1.3386 | 9400 | 0.4383 | 0.3340 |
103
+ | 0.5013 | 1.3671 | 9600 | 0.4357 | 0.3325 |
104
+ | 0.5013 | 1.3956 | 9800 | 0.4350 | 0.3317 |
105
+ | 0.5095 | 1.4241 | 10000 | 0.4345 | 0.3308 |
106
 
107
 
108
  ### Framework versions
all_results.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
- "epoch": 1.2528473804100229,
3
- "eval_loss": 0.44181880354881287,
4
- "eval_runtime": 185.557,
5
  "eval_samples": 7022,
6
- "eval_samples_per_second": 37.843,
7
- "eval_steps_per_second": 0.593,
8
- "eval_wer": 0.3371424015732338,
9
- "total_flos": 4.0903507086063503e+18,
10
- "train_loss": 0.7801296546242454,
11
- "train_runtime": 3176.8931,
12
  "train_samples": 28086,
13
- "train_samples_per_second": 11.08,
14
- "train_steps_per_second": 0.693
15
  }
 
1
  {
2
+ "epoch": 1.4240956992309883,
3
+ "eval_loss": 0.43451622128486633,
4
+ "eval_runtime": 181.7486,
5
  "eval_samples": 7022,
6
+ "eval_samples_per_second": 38.636,
7
+ "eval_steps_per_second": 0.605,
8
+ "eval_wer": 0.3308175766353526,
9
+ "total_flos": 4.5974516642218747e+18,
10
+ "train_loss": 0.7817989181518554,
11
+ "train_runtime": 11412.7197,
12
  "train_samples": 28086,
13
+ "train_samples_per_second": 3.505,
14
+ "train_steps_per_second": 0.876
15
  }
eval_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 1.2528473804100229,
3
- "eval_loss": 0.44181880354881287,
4
- "eval_runtime": 185.557,
5
  "eval_samples": 7022,
6
- "eval_samples_per_second": 37.843,
7
- "eval_steps_per_second": 0.593,
8
- "eval_wer": 0.3371424015732338
9
  }
 
1
  {
2
+ "epoch": 1.4240956992309883,
3
+ "eval_loss": 0.43451622128486633,
4
+ "eval_runtime": 181.7486,
5
  "eval_samples": 7022,
6
+ "eval_samples_per_second": 38.636,
7
+ "eval_steps_per_second": 0.605,
8
+ "eval_wer": 0.3308175766353526
9
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af23faf18214b297d4ccbedad970a31a008a8ec99c3f508bdcf37dd8d3503459
3
  size 1261975580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0906239970b1fb662f4f0199c1cb5f14ad33bd3182b007b4301bba6fb9a447a5
3
  size 1261975580
runs/Sep04_11-00-26_1edf4a34bc24/events.out.tfevents.1725444439.1edf4a34bc24.689.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd685bcabb2ffac72aeb7d4bcde83ea6e04f6b8e0ee3fdb9eee49ae3919c88e2
3
+ size 27173
runs/Sep04_11-00-26_1edf4a34bc24/events.out.tfevents.1725456098.1edf4a34bc24.689.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a061b6ac14c2ac69ab079065661452dd9c80494b3fb57ae921fa45dc635bb337
3
+ size 406
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "epoch": 1.2528473804100229,
3
- "total_flos": 4.0903507086063503e+18,
4
- "train_loss": 0.7801296546242454,
5
- "train_runtime": 3176.8931,
6
  "train_samples": 28086,
7
- "train_samples_per_second": 11.08,
8
- "train_steps_per_second": 0.693
9
  }
 
1
  {
2
+ "epoch": 1.4240956992309883,
3
+ "total_flos": 4.5974516642218747e+18,
4
+ "train_loss": 0.7817989181518554,
5
+ "train_runtime": 11412.7197,
6
  "train_samples": 28086,
7
+ "train_samples_per_second": 3.505,
8
+ "train_steps_per_second": 0.876
9
  }
trainer_state.json CHANGED
@@ -1,152 +1,615 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.2528473804100229,
5
  "eval_steps": 200,
6
- "global_step": 2200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.11389521640091116,
13
- "eval_loss": 0.715434730052948,
14
- "eval_runtime": 184.644,
15
- "eval_samples_per_second": 38.03,
16
- "eval_steps_per_second": 0.596,
17
- "eval_wer": 0.47842782922972665,
18
  "step": 200
19
  },
20
  {
21
- "epoch": 0.22779043280182232,
22
- "eval_loss": 0.6105435490608215,
23
- "eval_runtime": 184.1422,
24
- "eval_samples_per_second": 38.134,
25
- "eval_steps_per_second": 0.597,
26
- "eval_wer": 0.4455679719369112,
27
  "step": 400
28
  },
29
  {
30
- "epoch": 0.2847380410022779,
31
- "grad_norm": 5.275528907775879,
32
- "learning_rate": 0.00025545,
33
- "loss": 1.3805,
34
  "step": 500
35
  },
36
  {
37
- "epoch": 0.3416856492027335,
38
- "eval_loss": 0.5903473496437073,
39
- "eval_runtime": 182.8986,
40
- "eval_samples_per_second": 38.393,
41
- "eval_steps_per_second": 0.601,
42
- "eval_wer": 0.4618450949388113,
43
  "step": 600
44
  },
45
  {
46
- "epoch": 0.45558086560364464,
47
- "eval_loss": 0.5655257701873779,
48
- "eval_runtime": 185.0172,
49
- "eval_samples_per_second": 37.953,
50
- "eval_steps_per_second": 0.595,
51
- "eval_wer": 0.42319191060205424,
52
  "step": 800
53
  },
54
  {
55
- "epoch": 0.5694760820045558,
56
- "grad_norm": 3.021517753601074,
57
- "learning_rate": 0.00018059999999999997,
58
- "loss": 0.6886,
59
  "step": 1000
60
  },
61
  {
62
- "epoch": 0.5694760820045558,
63
- "eval_loss": 0.5266719460487366,
64
- "eval_runtime": 185.4642,
65
- "eval_samples_per_second": 37.862,
66
- "eval_steps_per_second": 0.593,
67
- "eval_wer": 0.3966834531418169,
68
  "step": 1000
69
  },
70
  {
71
- "epoch": 0.683371298405467,
72
- "eval_loss": 0.5073907375335693,
73
- "eval_runtime": 188.0873,
74
- "eval_samples_per_second": 37.334,
75
- "eval_steps_per_second": 0.585,
76
- "eval_wer": 0.3808315284550685,
77
  "step": 1200
78
  },
79
  {
80
- "epoch": 0.7972665148063781,
81
- "eval_loss": 0.4813552498817444,
82
- "eval_runtime": 184.9452,
83
- "eval_samples_per_second": 37.968,
84
- "eval_steps_per_second": 0.595,
85
- "eval_wer": 0.36672025937097225,
86
  "step": 1400
87
  },
88
  {
89
- "epoch": 0.8542141230068337,
90
- "grad_norm": 2.263779640197754,
91
- "learning_rate": 0.00010619999999999998,
92
- "loss": 0.6336,
93
  "step": 1500
94
  },
95
  {
96
- "epoch": 0.9111617312072893,
97
- "eval_loss": 0.4641895592212677,
98
- "eval_runtime": 186.4328,
99
- "eval_samples_per_second": 37.665,
100
- "eval_steps_per_second": 0.59,
101
- "eval_wer": 0.36212280258839474,
102
  "step": 1600
103
  },
104
  {
105
- "epoch": 1.0250569476082005,
106
- "eval_loss": 0.454357385635376,
107
- "eval_runtime": 184.8022,
108
- "eval_samples_per_second": 37.997,
109
- "eval_steps_per_second": 0.595,
110
- "eval_wer": 0.3471877117686921,
111
  "step": 1800
112
  },
113
  {
114
- "epoch": 1.1389521640091116,
115
- "grad_norm": 2.1839754581451416,
116
- "learning_rate": 3.149999999999999e-05,
117
- "loss": 0.5411,
118
  "step": 2000
119
  },
120
  {
121
- "epoch": 1.1389521640091116,
122
- "eval_loss": 0.44735047221183777,
123
- "eval_runtime": 185.5024,
124
- "eval_samples_per_second": 37.854,
125
- "eval_steps_per_second": 0.593,
126
- "eval_wer": 0.34058385043649264,
127
  "step": 2000
128
  },
129
  {
130
- "epoch": 1.2528473804100229,
131
- "eval_loss": 0.44181880354881287,
132
- "eval_runtime": 187.1446,
133
- "eval_samples_per_second": 37.522,
134
- "eval_steps_per_second": 0.588,
135
- "eval_wer": 0.3371424015732338,
136
  "step": 2200
137
  },
138
  {
139
- "epoch": 1.2528473804100229,
140
- "step": 2200,
141
- "total_flos": 4.0903507086063503e+18,
142
- "train_loss": 0.7801296546242454,
143
- "train_runtime": 3176.8931,
144
- "train_samples_per_second": 11.08,
145
- "train_steps_per_second": 0.693
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  }
147
  ],
148
  "logging_steps": 500,
149
- "max_steps": 2200,
150
  "num_input_tokens_seen": 0,
151
  "num_train_epochs": 2,
152
  "save_steps": 500,
@@ -162,8 +625,8 @@
162
  "attributes": {}
163
  }
164
  },
165
- "total_flos": 4.0903507086063503e+18,
166
- "train_batch_size": 16,
167
  "trial_name": null,
168
  "trial_params": null
169
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.4240956992309883,
5
  "eval_steps": 200,
6
+ "global_step": 10000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.028481913984619765,
13
+ "eval_loss": 1.2521600723266602,
14
+ "eval_runtime": 184.7297,
15
+ "eval_samples_per_second": 38.012,
16
+ "eval_steps_per_second": 0.595,
17
+ "eval_wer": 0.6291606319509959,
18
  "step": 200
19
  },
20
  {
21
+ "epoch": 0.05696382796923953,
22
+ "eval_loss": 0.6599467396736145,
23
+ "eval_runtime": 185.0187,
24
+ "eval_samples_per_second": 37.953,
25
+ "eval_steps_per_second": 0.595,
26
+ "eval_wer": 0.45444398676570247,
27
  "step": 400
28
  },
29
  {
30
+ "epoch": 0.07120478496154942,
31
+ "grad_norm": 18.865299224853516,
32
+ "learning_rate": 0.00014879999999999998,
33
+ "loss": 2.2791,
34
  "step": 500
35
  },
36
  {
37
+ "epoch": 0.0854457419538593,
38
+ "eval_loss": 0.6628636717796326,
39
+ "eval_runtime": 185.7673,
40
+ "eval_samples_per_second": 37.8,
41
+ "eval_steps_per_second": 0.592,
42
+ "eval_wer": 0.4394557461566059,
43
  "step": 600
44
  },
45
  {
46
+ "epoch": 0.11392765593847906,
47
+ "eval_loss": 0.7910040020942688,
48
+ "eval_runtime": 186.4058,
49
+ "eval_samples_per_second": 37.671,
50
+ "eval_steps_per_second": 0.59,
51
+ "eval_wer": 0.5453035517346763,
52
  "step": 800
53
  },
54
  {
55
+ "epoch": 0.14240956992309883,
56
+ "grad_norm": 3.8627092838287354,
57
+ "learning_rate": 0.0002988,
58
+ "loss": 0.8206,
59
  "step": 1000
60
  },
61
  {
62
+ "epoch": 0.14240956992309883,
63
+ "eval_loss": 0.7757941484451294,
64
+ "eval_runtime": 186.8087,
65
+ "eval_samples_per_second": 37.589,
66
+ "eval_steps_per_second": 0.589,
67
+ "eval_wer": 0.5701245033816553,
68
  "step": 1000
69
  },
70
  {
71
+ "epoch": 0.1708914839077186,
72
+ "eval_loss": 0.802534818649292,
73
+ "eval_runtime": 187.4308,
74
+ "eval_samples_per_second": 37.464,
75
+ "eval_steps_per_second": 0.587,
76
+ "eval_wer": 0.5782564211589312,
77
  "step": 1200
78
  },
79
  {
80
+ "epoch": 0.19937339789233838,
81
+ "eval_loss": 0.7715001106262207,
82
+ "eval_runtime": 187.8412,
83
+ "eval_samples_per_second": 37.383,
84
+ "eval_steps_per_second": 0.586,
85
+ "eval_wer": 0.5211336850077731,
86
  "step": 1400
87
  },
88
  {
89
+ "epoch": 0.21361435488464825,
90
+ "grad_norm": 11.042049407958984,
91
+ "learning_rate": 0.00028346666666666665,
92
+ "loss": 0.9068,
93
  "step": 1500
94
  },
95
  {
96
+ "epoch": 0.22785531187695812,
97
+ "eval_loss": 0.7349154949188232,
98
+ "eval_runtime": 191.6788,
99
+ "eval_samples_per_second": 36.634,
100
+ "eval_steps_per_second": 0.574,
101
+ "eval_wer": 0.512775880625573,
102
  "step": 1600
103
  },
104
  {
105
+ "epoch": 0.2563372258615779,
106
+ "eval_loss": 0.7257962226867676,
107
+ "eval_runtime": 189.501,
108
+ "eval_samples_per_second": 37.055,
109
+ "eval_steps_per_second": 0.58,
110
+ "eval_wer": 0.5152473458323922,
111
  "step": 1800
112
  },
113
  {
114
+ "epoch": 0.28481913984619767,
115
+ "grad_norm": 6.190296649932861,
116
+ "learning_rate": 0.0002668,
117
+ "loss": 0.8679,
118
  "step": 2000
119
  },
120
  {
121
+ "epoch": 0.28481913984619767,
122
+ "eval_loss": 0.7084089517593384,
123
+ "eval_runtime": 188.267,
124
+ "eval_samples_per_second": 37.298,
125
+ "eval_steps_per_second": 0.584,
126
+ "eval_wer": 0.5216386080070158,
127
  "step": 2000
128
  },
129
  {
130
+ "epoch": 0.3133010538308174,
131
+ "eval_loss": 0.6904259324073792,
132
+ "eval_runtime": 188.556,
133
+ "eval_samples_per_second": 37.241,
134
+ "eval_steps_per_second": 0.583,
135
+ "eval_wer": 0.5014151131426142,
136
  "step": 2200
137
  },
138
  {
139
+ "epoch": 0.3417829678154372,
140
+ "eval_loss": 0.6992842555046082,
141
+ "eval_runtime": 189.0868,
142
+ "eval_samples_per_second": 37.136,
143
+ "eval_steps_per_second": 0.582,
144
+ "eval_wer": 0.5177586733812567,
145
+ "step": 2400
146
+ },
147
+ {
148
+ "epoch": 0.3560239248077471,
149
+ "grad_norm": 4.8257222175598145,
150
+ "learning_rate": 0.0002501333333333333,
151
+ "loss": 0.8577,
152
+ "step": 2500
153
+ },
154
+ {
155
+ "epoch": 0.37026488180005696,
156
+ "eval_loss": 0.6746060848236084,
157
+ "eval_runtime": 190.1492,
158
+ "eval_samples_per_second": 36.929,
159
+ "eval_steps_per_second": 0.578,
160
+ "eval_wer": 0.48673248382253287,
161
+ "step": 2600
162
+ },
163
+ {
164
+ "epoch": 0.39874679578467676,
165
+ "eval_loss": 0.6621994972229004,
166
+ "eval_runtime": 189.6459,
167
+ "eval_samples_per_second": 37.027,
168
+ "eval_steps_per_second": 0.58,
169
+ "eval_wer": 0.4962595835714001,
170
+ "step": 2800
171
+ },
172
+ {
173
+ "epoch": 0.4272287097692965,
174
+ "grad_norm": 3.6695899963378906,
175
+ "learning_rate": 0.00023346666666666666,
176
+ "loss": 0.7995,
177
+ "step": 3000
178
+ },
179
+ {
180
+ "epoch": 0.4272287097692965,
181
+ "eval_loss": 0.6793097853660583,
182
+ "eval_runtime": 188.7722,
183
+ "eval_samples_per_second": 37.198,
184
+ "eval_steps_per_second": 0.583,
185
+ "eval_wer": 0.49348250707556574,
186
+ "step": 3000
187
+ },
188
+ {
189
+ "epoch": 0.45571062375391624,
190
+ "eval_loss": 0.6368467211723328,
191
+ "eval_runtime": 188.0679,
192
+ "eval_samples_per_second": 37.338,
193
+ "eval_steps_per_second": 0.585,
194
+ "eval_wer": 0.47005673740017806,
195
+ "step": 3200
196
+ },
197
+ {
198
+ "epoch": 0.48419253773853604,
199
+ "eval_loss": 0.6363435387611389,
200
+ "eval_runtime": 188.2666,
201
+ "eval_samples_per_second": 37.298,
202
+ "eval_steps_per_second": 0.584,
203
+ "eval_wer": 0.478055780703969,
204
+ "step": 3400
205
+ },
206
+ {
207
+ "epoch": 0.4984334947308459,
208
+ "grad_norm": 3.4502739906311035,
209
+ "learning_rate": 0.0002168333333333333,
210
+ "loss": 0.8141,
211
+ "step": 3500
212
+ },
213
+ {
214
+ "epoch": 0.5126744517231558,
215
+ "eval_loss": 0.6217373609542847,
216
+ "eval_runtime": 187.6755,
217
+ "eval_samples_per_second": 37.416,
218
+ "eval_steps_per_second": 0.586,
219
+ "eval_wer": 0.46555229274904,
220
+ "step": 3600
221
+ },
222
+ {
223
+ "epoch": 0.5411563657077756,
224
+ "eval_loss": 0.641762912273407,
225
+ "eval_runtime": 186.9231,
226
+ "eval_samples_per_second": 37.566,
227
+ "eval_steps_per_second": 0.588,
228
+ "eval_wer": 0.4940140049695053,
229
+ "step": 3800
230
+ },
231
+ {
232
+ "epoch": 0.5696382796923953,
233
+ "grad_norm": 5.877405643463135,
234
+ "learning_rate": 0.00020016666666666666,
235
+ "loss": 0.7953,
236
+ "step": 4000
237
+ },
238
+ {
239
+ "epoch": 0.5696382796923953,
240
+ "eval_loss": 0.6017736196517944,
241
+ "eval_runtime": 182.787,
242
+ "eval_samples_per_second": 38.416,
243
+ "eval_steps_per_second": 0.602,
244
+ "eval_wer": 0.4542313876081266,
245
+ "step": 4000
246
+ },
247
+ {
248
+ "epoch": 0.5981201936770151,
249
+ "eval_loss": 0.5962206721305847,
250
+ "eval_runtime": 183.0007,
251
+ "eval_samples_per_second": 38.371,
252
+ "eval_steps_per_second": 0.601,
253
+ "eval_wer": 0.4580315975497947,
254
+ "step": 4200
255
+ },
256
+ {
257
+ "epoch": 0.6266021076616348,
258
+ "eval_loss": 0.5883399844169617,
259
+ "eval_runtime": 182.7298,
260
+ "eval_samples_per_second": 38.428,
261
+ "eval_steps_per_second": 0.602,
262
+ "eval_wer": 0.44590015812062345,
263
+ "step": 4400
264
+ },
265
+ {
266
+ "epoch": 0.6408430646539447,
267
+ "grad_norm": 3.615546226501465,
268
+ "learning_rate": 0.0001835333333333333,
269
+ "loss": 0.7596,
270
+ "step": 4500
271
+ },
272
+ {
273
+ "epoch": 0.6550840216462547,
274
+ "eval_loss": 0.578825056552887,
275
+ "eval_runtime": 183.3674,
276
+ "eval_samples_per_second": 38.295,
277
+ "eval_steps_per_second": 0.6,
278
+ "eval_wer": 0.43253298608804264,
279
+ "step": 4600
280
+ },
281
+ {
282
+ "epoch": 0.6835659356308744,
283
+ "eval_loss": 0.5708740949630737,
284
+ "eval_runtime": 182.6951,
285
+ "eval_samples_per_second": 38.436,
286
+ "eval_steps_per_second": 0.602,
287
+ "eval_wer": 0.4412362641013035,
288
+ "step": 4800
289
+ },
290
+ {
291
+ "epoch": 0.7120478496154942,
292
+ "grad_norm": 4.345168590545654,
293
+ "learning_rate": 0.0001669,
294
+ "loss": 0.7533,
295
+ "step": 5000
296
+ },
297
+ {
298
+ "epoch": 0.7120478496154942,
299
+ "eval_loss": 0.5594890117645264,
300
+ "eval_runtime": 182.5857,
301
+ "eval_samples_per_second": 38.459,
302
+ "eval_steps_per_second": 0.602,
303
+ "eval_wer": 0.4352170504524376,
304
+ "step": 5000
305
+ },
306
+ {
307
+ "epoch": 0.7405297636001139,
308
+ "eval_loss": 0.5545539259910583,
309
+ "eval_runtime": 182.2233,
310
+ "eval_samples_per_second": 38.535,
311
+ "eval_steps_per_second": 0.604,
312
+ "eval_wer": 0.4231786231547057,
313
+ "step": 5200
314
+ },
315
+ {
316
+ "epoch": 0.7690116775847337,
317
+ "eval_loss": 0.5545418858528137,
318
+ "eval_runtime": 182.2691,
319
+ "eval_samples_per_second": 38.525,
320
+ "eval_steps_per_second": 0.604,
321
+ "eval_wer": 0.4244276432054638,
322
+ "step": 5400
323
+ },
324
+ {
325
+ "epoch": 0.7832526345770435,
326
+ "grad_norm": 9.471431732177734,
327
+ "learning_rate": 0.00015026666666666667,
328
+ "loss": 0.7591,
329
+ "step": 5500
330
+ },
331
+ {
332
+ "epoch": 0.7974935915693535,
333
+ "eval_loss": 0.5442594885826111,
334
+ "eval_runtime": 182.3947,
335
+ "eval_samples_per_second": 38.499,
336
+ "eval_steps_per_second": 0.603,
337
+ "eval_wer": 0.4076455972043211,
338
+ "step": 5600
339
+ },
340
+ {
341
+ "epoch": 0.8259755055539733,
342
+ "eval_loss": 0.5341240763664246,
343
+ "eval_runtime": 182.0603,
344
+ "eval_samples_per_second": 38.57,
345
+ "eval_steps_per_second": 0.604,
346
+ "eval_wer": 0.41462150706227824,
347
+ "step": 5800
348
+ },
349
+ {
350
+ "epoch": 0.854457419538593,
351
+ "grad_norm": 4.406210422515869,
352
+ "learning_rate": 0.00013363333333333332,
353
+ "loss": 0.6621,
354
+ "step": 6000
355
+ },
356
+ {
357
+ "epoch": 0.854457419538593,
358
+ "eval_loss": 0.5104002952575684,
359
+ "eval_runtime": 181.8706,
360
+ "eval_samples_per_second": 38.61,
361
+ "eval_steps_per_second": 0.605,
362
+ "eval_wer": 0.3955141577751498,
363
+ "step": 6000
364
+ },
365
+ {
366
+ "epoch": 0.8829393335232127,
367
+ "eval_loss": 0.5139421820640564,
368
+ "eval_runtime": 181.902,
369
+ "eval_samples_per_second": 38.603,
370
+ "eval_steps_per_second": 0.605,
371
+ "eval_wer": 0.40112146055621256,
372
+ "step": 6200
373
+ },
374
+ {
375
+ "epoch": 0.9114212475078325,
376
+ "eval_loss": 0.5044221878051758,
377
+ "eval_runtime": 181.9538,
378
+ "eval_samples_per_second": 38.592,
379
+ "eval_steps_per_second": 0.605,
380
+ "eval_wer": 0.38039304269256835,
381
+ "step": 6400
382
+ },
383
+ {
384
+ "epoch": 0.9256622045001424,
385
+ "grad_norm": 8.09687328338623,
386
+ "learning_rate": 0.000117,
387
+ "loss": 0.6705,
388
+ "step": 6500
389
+ },
390
+ {
391
+ "epoch": 0.9399031614924523,
392
+ "eval_loss": 0.49985769391059875,
393
+ "eval_runtime": 182.1414,
394
+ "eval_samples_per_second": 38.552,
395
+ "eval_steps_per_second": 0.604,
396
+ "eval_wer": 0.3896012437050718,
397
+ "step": 6600
398
+ },
399
+ {
400
+ "epoch": 0.9683850754770721,
401
+ "eval_loss": 0.5097447037696838,
402
+ "eval_runtime": 181.5418,
403
+ "eval_samples_per_second": 38.68,
404
+ "eval_steps_per_second": 0.606,
405
+ "eval_wer": 0.4052804315762899,
406
+ "step": 6800
407
+ },
408
+ {
409
+ "epoch": 0.9968669894616918,
410
+ "grad_norm": 4.639442443847656,
411
+ "learning_rate": 0.00010033333333333332,
412
+ "loss": 0.6665,
413
+ "step": 7000
414
+ },
415
+ {
416
+ "epoch": 0.9968669894616918,
417
+ "eval_loss": 0.49253013730049133,
418
+ "eval_runtime": 181.6405,
419
+ "eval_samples_per_second": 38.659,
420
+ "eval_steps_per_second": 0.606,
421
+ "eval_wer": 0.3784796502743858,
422
+ "step": 7000
423
+ },
424
+ {
425
+ "epoch": 1.0253489034463117,
426
+ "eval_loss": 0.4896470010280609,
427
+ "eval_runtime": 181.3934,
428
+ "eval_samples_per_second": 38.711,
429
+ "eval_steps_per_second": 0.606,
430
+ "eval_wer": 0.3688728258414276,
431
+ "step": 7200
432
+ },
433
+ {
434
+ "epoch": 1.0538308174309314,
435
+ "eval_loss": 0.47494611144065857,
436
+ "eval_runtime": 181.7386,
437
+ "eval_samples_per_second": 38.638,
438
+ "eval_steps_per_second": 0.605,
439
+ "eval_wer": 0.3687399513679427,
440
+ "step": 7400
441
+ },
442
+ {
443
+ "epoch": 1.0680717744232413,
444
+ "grad_norm": 0.6623511910438538,
445
+ "learning_rate": 8.366666666666666e-05,
446
+ "loss": 0.5826,
447
+ "step": 7500
448
+ },
449
+ {
450
+ "epoch": 1.0823127314155512,
451
+ "eval_loss": 0.4684299826622009,
452
+ "eval_runtime": 182.4026,
453
+ "eval_samples_per_second": 38.497,
454
+ "eval_steps_per_second": 0.603,
455
+ "eval_wer": 0.3628004624031677,
456
+ "step": 7600
457
+ },
458
+ {
459
+ "epoch": 1.110794645400171,
460
+ "eval_loss": 0.47290024161338806,
461
+ "eval_runtime": 182.1043,
462
+ "eval_samples_per_second": 38.56,
463
+ "eval_steps_per_second": 0.604,
464
+ "eval_wer": 0.358495329462257,
465
+ "step": 7800
466
+ },
467
+ {
468
+ "epoch": 1.1392765593847907,
469
+ "grad_norm": 2.393817186355591,
470
+ "learning_rate": 6.699999999999999e-05,
471
+ "loss": 0.5836,
472
+ "step": 8000
473
+ },
474
+ {
475
+ "epoch": 1.1392765593847907,
476
+ "eval_loss": 0.46409761905670166,
477
+ "eval_runtime": 181.7327,
478
+ "eval_samples_per_second": 38.639,
479
+ "eval_steps_per_second": 0.605,
480
+ "eval_wer": 0.3553196295459679,
481
+ "step": 8000
482
+ },
483
+ {
484
+ "epoch": 1.1677584733694104,
485
+ "eval_loss": 0.45749881863594055,
486
+ "eval_runtime": 181.5866,
487
+ "eval_samples_per_second": 38.67,
488
+ "eval_steps_per_second": 0.606,
489
+ "eval_wer": 0.3529810388126337,
490
+ "step": 8200
491
+ },
492
+ {
493
+ "epoch": 1.1962403873540302,
494
+ "eval_loss": 0.45851147174835205,
495
+ "eval_runtime": 181.5801,
496
+ "eval_samples_per_second": 38.672,
497
+ "eval_steps_per_second": 0.606,
498
+ "eval_wer": 0.3485563188455866,
499
+ "step": 8400
500
+ },
501
+ {
502
+ "epoch": 1.21048134434634,
503
+ "grad_norm": 1.9676859378814697,
504
+ "learning_rate": 5.033333333333333e-05,
505
+ "loss": 0.5199,
506
+ "step": 8500
507
+ },
508
+ {
509
+ "epoch": 1.22472230133865,
510
+ "eval_loss": 0.4548875391483307,
511
+ "eval_runtime": 182.6274,
512
+ "eval_samples_per_second": 38.45,
513
+ "eval_steps_per_second": 0.602,
514
+ "eval_wer": 0.3450750076402822,
515
+ "step": 8600
516
+ },
517
+ {
518
+ "epoch": 1.2532042153232696,
519
+ "eval_loss": 0.4520675539970398,
520
+ "eval_runtime": 182.8881,
521
+ "eval_samples_per_second": 38.395,
522
+ "eval_steps_per_second": 0.601,
523
+ "eval_wer": 0.34082302448876545,
524
+ "step": 8800
525
+ },
526
+ {
527
+ "epoch": 1.2816861293078894,
528
+ "grad_norm": 1.1400251388549805,
529
+ "learning_rate": 3.373333333333333e-05,
530
+ "loss": 0.5268,
531
+ "step": 9000
532
+ },
533
+ {
534
+ "epoch": 1.2816861293078894,
535
+ "eval_loss": 0.44252264499664307,
536
+ "eval_runtime": 182.3349,
537
+ "eval_samples_per_second": 38.512,
538
+ "eval_steps_per_second": 0.603,
539
+ "eval_wer": 0.33950756720126496,
540
+ "step": 9000
541
+ },
542
+ {
543
+ "epoch": 1.3101680432925091,
544
+ "eval_loss": 0.44072064757347107,
545
+ "eval_runtime": 184.1579,
546
+ "eval_samples_per_second": 38.13,
547
+ "eval_steps_per_second": 0.597,
548
+ "eval_wer": 0.3361857053641425,
549
+ "step": 9200
550
+ },
551
+ {
552
+ "epoch": 1.338649957277129,
553
+ "eval_loss": 0.4383063018321991,
554
+ "eval_runtime": 181.6966,
555
+ "eval_samples_per_second": 38.647,
556
+ "eval_steps_per_second": 0.605,
557
+ "eval_wer": 0.33397998910429316,
558
+ "step": 9400
559
+ },
560
+ {
561
+ "epoch": 1.352890914269439,
562
+ "grad_norm": 1.0755033493041992,
563
+ "learning_rate": 1.71e-05,
564
+ "loss": 0.5013,
565
+ "step": 9500
566
+ },
567
+ {
568
+ "epoch": 1.3671318712617488,
569
+ "eval_loss": 0.4356846809387207,
570
+ "eval_runtime": 183.1225,
571
+ "eval_samples_per_second": 38.346,
572
+ "eval_steps_per_second": 0.601,
573
+ "eval_wer": 0.33253165734330775,
574
+ "step": 9600
575
+ },
576
+ {
577
+ "epoch": 1.3956137852463686,
578
+ "eval_loss": 0.43495818972587585,
579
+ "eval_runtime": 182.2639,
580
+ "eval_samples_per_second": 38.527,
581
+ "eval_steps_per_second": 0.604,
582
+ "eval_wer": 0.3316812607130044,
583
+ "step": 9800
584
+ },
585
+ {
586
+ "epoch": 1.4240956992309883,
587
+ "grad_norm": 1.6312005519866943,
588
+ "learning_rate": 4.666666666666666e-07,
589
+ "loss": 0.5095,
590
+ "step": 10000
591
+ },
592
+ {
593
+ "epoch": 1.4240956992309883,
594
+ "eval_loss": 0.43451622128486633,
595
+ "eval_runtime": 182.2078,
596
+ "eval_samples_per_second": 38.538,
597
+ "eval_steps_per_second": 0.604,
598
+ "eval_wer": 0.3308175766353526,
599
+ "step": 10000
600
+ },
601
+ {
602
+ "epoch": 1.4240956992309883,
603
+ "step": 10000,
604
+ "total_flos": 4.5974516642218747e+18,
605
+ "train_loss": 0.7817989181518554,
606
+ "train_runtime": 11412.7197,
607
+ "train_samples_per_second": 3.505,
608
+ "train_steps_per_second": 0.876
609
  }
610
  ],
611
  "logging_steps": 500,
612
+ "max_steps": 10000,
613
  "num_input_tokens_seen": 0,
614
  "num_train_epochs": 2,
615
  "save_steps": 500,
 
625
  "attributes": {}
626
  }
627
  },
628
+ "total_flos": 4.5974516642218747e+18,
629
+ "train_batch_size": 4,
630
  "trial_name": null,
631
  "trial_params": null
632
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e78efd8ce7c5e3eff78ed4536df73f36e19bf0e82b2b6461e91b2b5ff66f7412
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acb452a91c703d1a26455b8858033cfe497708ca59e32a5e19ba4ea803c125e5
3
  size 5240