End of training
Browse files- README.md +25 -2
- adapter.eng.safetensors +1 -1
- all_results.json +9 -9
- eval_results.json +5 -5
- train_results.json +4 -4
- trainer_state.json +9 -9
README.md
CHANGED
@@ -1,13 +1,33 @@
|
|
1 |
---
|
|
|
|
|
2 |
license: apache-2.0
|
3 |
base_model: facebook/wav2vec2-large-xlsr-53
|
4 |
tags:
|
|
|
|
|
|
|
5 |
- generated_from_trainer
|
6 |
datasets:
|
7 |
- common_voice_15_0
|
|
|
|
|
8 |
model-index:
|
9 |
- name: Output
|
10 |
-
results:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
---
|
12 |
|
13 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
@@ -15,7 +35,10 @@ should probably proofread and complete it, then remove this comment. -->
|
|
15 |
|
16 |
# Output
|
17 |
|
18 |
-
This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the
|
|
|
|
|
|
|
19 |
|
20 |
## Model description
|
21 |
|
|
|
1 |
---
|
2 |
+
language:
|
3 |
+
- hi
|
4 |
license: apache-2.0
|
5 |
base_model: facebook/wav2vec2-large-xlsr-53
|
6 |
tags:
|
7 |
+
- automatic-speech-recognition
|
8 |
+
- mozilla-foundation/common_voice_15_0
|
9 |
+
- mms
|
10 |
- generated_from_trainer
|
11 |
datasets:
|
12 |
- common_voice_15_0
|
13 |
+
metrics:
|
14 |
+
- wer
|
15 |
model-index:
|
16 |
- name: Output
|
17 |
+
results:
|
18 |
+
- task:
|
19 |
+
name: Automatic Speech Recognition
|
20 |
+
type: automatic-speech-recognition
|
21 |
+
dataset:
|
22 |
+
name: MOZILLA-FOUNDATION/COMMON_VOICE_15_0 - HI
|
23 |
+
type: common_voice_15_0
|
24 |
+
config: hi
|
25 |
+
split: validation
|
26 |
+
args: 'Config: hi, Training split: train, Eval split: validation'
|
27 |
+
metrics:
|
28 |
+
- name: Wer
|
29 |
+
type: wer
|
30 |
+
value: 1.0016248153618907
|
31 |
---
|
32 |
|
33 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
|
|
35 |
|
36 |
# Output
|
37 |
|
38 |
+
This model is a fine-tuned version of [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on the MOZILLA-FOUNDATION/COMMON_VOICE_15_0 - HI dataset.
|
39 |
+
It achieves the following results on the evaluation set:
|
40 |
+
- Loss: 20.2289
|
41 |
+
- Wer: 1.0016
|
42 |
|
43 |
## Model description
|
44 |
|
adapter.eng.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 3918936
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9ebd4b5a652b53ee5ee6bc8c051c5e01ddf466dc4cc7dc21182cfaaf911023d5
|
3 |
size 3918936
|
all_results.json
CHANGED
@@ -1,15 +1,15 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"eval_loss": 20.
|
4 |
-
"eval_runtime":
|
5 |
"eval_samples": 2416,
|
6 |
-
"eval_samples_per_second":
|
7 |
-
"eval_steps_per_second":
|
8 |
-
"eval_wer": 1.
|
9 |
"total_flos": 6.105598680744346e+17,
|
10 |
-
"train_loss": 18.
|
11 |
-
"train_runtime":
|
12 |
"train_samples": 4630,
|
13 |
-
"train_samples_per_second":
|
14 |
-
"train_steps_per_second": 0.
|
15 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"eval_loss": 20.228919982910156,
|
4 |
+
"eval_runtime": 119.2704,
|
5 |
"eval_samples": 2416,
|
6 |
+
"eval_samples_per_second": 20.256,
|
7 |
+
"eval_steps_per_second": 2.532,
|
8 |
+
"eval_wer": 1.0016248153618907,
|
9 |
"total_flos": 6.105598680744346e+17,
|
10 |
+
"train_loss": 18.507504377693966,
|
11 |
+
"train_runtime": 329.6848,
|
12 |
"train_samples": 4630,
|
13 |
+
"train_samples_per_second": 14.044,
|
14 |
+
"train_steps_per_second": 0.44
|
15 |
}
|
eval_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
-
"eval_loss": 20.
|
4 |
-
"eval_runtime":
|
5 |
"eval_samples": 2416,
|
6 |
-
"eval_samples_per_second":
|
7 |
-
"eval_steps_per_second":
|
8 |
-
"eval_wer": 1.
|
9 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
+
"eval_loss": 20.228919982910156,
|
4 |
+
"eval_runtime": 119.2704,
|
5 |
"eval_samples": 2416,
|
6 |
+
"eval_samples_per_second": 20.256,
|
7 |
+
"eval_steps_per_second": 2.532,
|
8 |
+
"eval_wer": 1.0016248153618907
|
9 |
}
|
train_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
"total_flos": 6.105598680744346e+17,
|
4 |
-
"train_loss": 18.
|
5 |
-
"train_runtime":
|
6 |
"train_samples": 4630,
|
7 |
-
"train_samples_per_second":
|
8 |
-
"train_steps_per_second": 0.
|
9 |
}
|
|
|
1 |
{
|
2 |
"epoch": 1.0,
|
3 |
"total_flos": 6.105598680744346e+17,
|
4 |
+
"train_loss": 18.507504377693966,
|
5 |
+
"train_runtime": 329.6848,
|
6 |
"train_samples": 4630,
|
7 |
+
"train_samples_per_second": 14.044,
|
8 |
+
"train_steps_per_second": 0.44
|
9 |
}
|
trainer_state.json
CHANGED
@@ -10,21 +10,21 @@
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.6896551724137931,
|
13 |
-
"eval_loss": 21.
|
14 |
-
"eval_runtime":
|
15 |
-
"eval_samples_per_second":
|
16 |
-
"eval_steps_per_second":
|
17 |
-
"eval_wer": 1.
|
18 |
"step": 100
|
19 |
},
|
20 |
{
|
21 |
"epoch": 1.0,
|
22 |
"step": 145,
|
23 |
"total_flos": 6.105598680744346e+17,
|
24 |
-
"train_loss": 18.
|
25 |
-
"train_runtime":
|
26 |
-
"train_samples_per_second":
|
27 |
-
"train_steps_per_second": 0.
|
28 |
}
|
29 |
],
|
30 |
"logging_steps": 500,
|
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.6896551724137931,
|
13 |
+
"eval_loss": 21.916179656982422,
|
14 |
+
"eval_runtime": 84.3206,
|
15 |
+
"eval_samples_per_second": 28.653,
|
16 |
+
"eval_steps_per_second": 3.582,
|
17 |
+
"eval_wer": 1.0002954209748893,
|
18 |
"step": 100
|
19 |
},
|
20 |
{
|
21 |
"epoch": 1.0,
|
22 |
"step": 145,
|
23 |
"total_flos": 6.105598680744346e+17,
|
24 |
+
"train_loss": 18.507504377693966,
|
25 |
+
"train_runtime": 329.6848,
|
26 |
+
"train_samples_per_second": 14.044,
|
27 |
+
"train_steps_per_second": 0.44
|
28 |
}
|
29 |
],
|
30 |
"logging_steps": 500,
|