marinone94
commited on
Commit
•
990bfc1
1
Parent(s):
d84f4ed
Training in progress, step 500
Browse files- .ipynb_checkpoints/run-checkpoint.sh +1 -1
- .ipynb_checkpoints/run-dummy-sv-gpu-checkpoint.sh +0 -34
- added_tokens.json +1 -1
- all_results.json +0 -14
- checkpoint-10/trainer_state.json +0 -15
- {checkpoint-10 → checkpoint-500}/config.json +40 -29
- {checkpoint-10 → checkpoint-500}/optimizer.pt +2 -2
- {checkpoint-10 → checkpoint-500}/preprocessor_config.json +2 -2
- {checkpoint-10 → checkpoint-500}/pytorch_model.bin +2 -2
- {checkpoint-10 → checkpoint-500}/rng_state.pth +2 -2
- {checkpoint-10 → checkpoint-500}/scaler.pt +1 -1
- {checkpoint-10 → checkpoint-500}/scheduler.pt +1 -1
- checkpoint-500/trainer_state.json +55 -0
- {checkpoint-10 → checkpoint-500}/training_args.bin +1 -1
- config.json +40 -29
- eval_results.json +0 -9
- preprocessor_config.json +2 -2
- pytorch_model.bin +2 -2
- run-dummy-sv-gpu.sh +0 -34
- run.sh +1 -1
- train_results.json +0 -8
- trainer_state.json +0 -25
- training_args.bin +1 -1
- vocab.json +1 -1
.ipynb_checkpoints/run-checkpoint.sh
CHANGED
@@ -4,7 +4,7 @@ python run_speech_recognition_ctc.py \
|
|
4 |
--dataset_config_name="sv-SE" \
|
5 |
--output_dir="./" \
|
6 |
--overwrite_output_dir \
|
7 |
-
--num_train_epochs="
|
8 |
--per_device_train_batch_size="8" \
|
9 |
--per_device_eval_batch_size="8" \
|
10 |
--gradient_accumulation_steps="4" \
|
|
|
4 |
--dataset_config_name="sv-SE" \
|
5 |
--output_dir="./" \
|
6 |
--overwrite_output_dir \
|
7 |
+
--num_train_epochs="5" \
|
8 |
--per_device_train_batch_size="8" \
|
9 |
--per_device_eval_batch_size="8" \
|
10 |
--gradient_accumulation_steps="4" \
|
.ipynb_checkpoints/run-dummy-sv-gpu-checkpoint.sh
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
python run_speech_recognition_ctc.py \
|
2 |
-
--dataset_name="mozilla-foundation/common_voice_7_0" \
|
3 |
-
--model_name_or_path="hf-test/xls-r-dummy" \
|
4 |
-
--dataset_config_name="ab" \
|
5 |
-
--output_dir="./" \
|
6 |
-
--overwrite_output_dir \
|
7 |
-
--max_steps="10" \
|
8 |
-
--per_device_train_batch_size="8" \
|
9 |
-
--per_device_eval_batch_size="8" \
|
10 |
-
--gradient_accumulation_steps="4" \
|
11 |
-
--learning_rate="7.5e-3" \
|
12 |
-
--warmup_steps="2000" \
|
13 |
-
--length_column_name="input_length" \
|
14 |
-
--evaluation_strategy="steps" \
|
15 |
-
--text_column_name="sentence" \
|
16 |
-
--chars_to_ignore , ? . ! \- \; \: \" “ % ‘ ” � — ’ … – \
|
17 |
-
--save_steps="5" \
|
18 |
-
--eval_steps="5" \
|
19 |
-
--logging_steps="1" \
|
20 |
-
--layerdrop="0.0" \
|
21 |
-
--activation_dropout="0.1" \
|
22 |
-
--save_total_limit="3" \
|
23 |
-
--freeze_feature_encoder \
|
24 |
-
--feat_proj_dropout="0.0" \
|
25 |
-
--mask_time_prob="0.75" \
|
26 |
-
--mask_time_length="10" \
|
27 |
-
--mask_feature_prob="0.25" \
|
28 |
-
--mask_feature_length="64" \
|
29 |
-
--gradient_checkpointing \
|
30 |
-
--use_auth_token \
|
31 |
-
--fp16 \
|
32 |
-
--group_by_length \
|
33 |
-
--do_train --do_eval \
|
34 |
-
--push_to_hub
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
added_tokens.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"<s>":
|
|
|
1 |
+
{"<s>": 35, "</s>": 36}
|
all_results.json
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"epoch": 0.03,
|
3 |
-
"eval_loss": 156.8788604736328,
|
4 |
-
"eval_runtime": 10.8705,
|
5 |
-
"eval_samples": 301,
|
6 |
-
"eval_samples_per_second": 27.69,
|
7 |
-
"eval_steps_per_second": 3.496,
|
8 |
-
"eval_wer": 1.3456221198156681,
|
9 |
-
"train_loss": 94.46571044921875,
|
10 |
-
"train_runtime": 13.57,
|
11 |
-
"train_samples": 704,
|
12 |
-
"train_samples_per_second": 1.474,
|
13 |
-
"train_steps_per_second": 0.737
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
checkpoint-10/trainer_state.json
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"best_metric": null,
|
3 |
-
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.028409090909090908,
|
5 |
-
"global_step": 10,
|
6 |
-
"is_hyper_param_search": false,
|
7 |
-
"is_local_process_zero": true,
|
8 |
-
"is_world_process_zero": true,
|
9 |
-
"log_history": [],
|
10 |
-
"max_steps": 10,
|
11 |
-
"num_train_epochs": 1,
|
12 |
-
"total_flos": 508182128640.0,
|
13 |
-
"trial_name": null,
|
14 |
-
"trial_params": null
|
15 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
{checkpoint-10 → checkpoint-500}/config.json
RENAMED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
-
"activation_dropout": 0.
|
4 |
"adapter_kernel_size": 3,
|
5 |
"adapter_stride": 2,
|
6 |
"add_adapter": false,
|
@@ -11,23 +11,35 @@
|
|
11 |
"attention_dropout": 0.0,
|
12 |
"bos_token_id": 1,
|
13 |
"classifier_proj_size": 256,
|
14 |
-
"codevector_dim":
|
15 |
"contrastive_logits_temperature": 0.1,
|
16 |
-
"conv_bias":
|
17 |
"conv_dim": [
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
21 |
],
|
22 |
"conv_kernel": [
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
|
|
26 |
],
|
27 |
"conv_stride": [
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
31 |
],
|
32 |
"ctc_loss_reduction": "mean",
|
33 |
"ctc_zero_infinity": false,
|
@@ -42,31 +54,30 @@
|
|
42 |
"final_dropout": 0.0,
|
43 |
"hidden_act": "gelu",
|
44 |
"hidden_dropout": 0.0,
|
45 |
-
"
|
46 |
-
"hidden_size": 16,
|
47 |
"initializer_range": 0.02,
|
48 |
-
"intermediate_size":
|
49 |
"layer_norm_eps": 1e-05,
|
50 |
"layerdrop": 0.0,
|
51 |
-
"mask_feature_length":
|
52 |
"mask_feature_min_masks": 0,
|
53 |
-
"mask_feature_prob": 0.
|
54 |
"mask_time_length": 10,
|
55 |
"mask_time_min_masks": 2,
|
56 |
-
"mask_time_prob": 0.
|
57 |
"model_type": "wav2vec2",
|
58 |
"num_adapter_layers": 3,
|
59 |
-
"num_attention_heads":
|
60 |
"num_codevector_groups": 2,
|
61 |
"num_codevectors_per_group": 320,
|
62 |
-
"num_conv_pos_embedding_groups":
|
63 |
-
"num_conv_pos_embeddings":
|
64 |
-
"num_feat_extract_layers":
|
65 |
-
"num_hidden_layers":
|
66 |
-
"num_negatives":
|
67 |
-
"output_hidden_size":
|
68 |
-
"pad_token_id":
|
69 |
-
"proj_codevector_dim":
|
70 |
"tdnn_dilation": [
|
71 |
1,
|
72 |
2,
|
@@ -91,6 +102,6 @@
|
|
91 |
"torch_dtype": "float32",
|
92 |
"transformers_version": "4.16.0.dev0",
|
93 |
"use_weighted_layer_sum": false,
|
94 |
-
"vocab_size":
|
95 |
"xvector_output_dim": 512
|
96 |
}
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "facebook/wav2vec2-xls-r-300m",
|
3 |
+
"activation_dropout": 0.1,
|
4 |
"adapter_kernel_size": 3,
|
5 |
"adapter_stride": 2,
|
6 |
"add_adapter": false,
|
|
|
11 |
"attention_dropout": 0.0,
|
12 |
"bos_token_id": 1,
|
13 |
"classifier_proj_size": 256,
|
14 |
+
"codevector_dim": 768,
|
15 |
"contrastive_logits_temperature": 0.1,
|
16 |
+
"conv_bias": true,
|
17 |
"conv_dim": [
|
18 |
+
512,
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512,
|
22 |
+
512,
|
23 |
+
512,
|
24 |
+
512
|
25 |
],
|
26 |
"conv_kernel": [
|
27 |
+
10,
|
28 |
+
3,
|
29 |
+
3,
|
30 |
+
3,
|
31 |
+
3,
|
32 |
+
2,
|
33 |
+
2
|
34 |
],
|
35 |
"conv_stride": [
|
36 |
+
5,
|
37 |
+
2,
|
38 |
+
2,
|
39 |
+
2,
|
40 |
+
2,
|
41 |
+
2,
|
42 |
+
2
|
43 |
],
|
44 |
"ctc_loss_reduction": "mean",
|
45 |
"ctc_zero_infinity": false,
|
|
|
54 |
"final_dropout": 0.0,
|
55 |
"hidden_act": "gelu",
|
56 |
"hidden_dropout": 0.0,
|
57 |
+
"hidden_size": 1024,
|
|
|
58 |
"initializer_range": 0.02,
|
59 |
+
"intermediate_size": 4096,
|
60 |
"layer_norm_eps": 1e-05,
|
61 |
"layerdrop": 0.0,
|
62 |
+
"mask_feature_length": 64,
|
63 |
"mask_feature_min_masks": 0,
|
64 |
+
"mask_feature_prob": 0.25,
|
65 |
"mask_time_length": 10,
|
66 |
"mask_time_min_masks": 2,
|
67 |
+
"mask_time_prob": 0.75,
|
68 |
"model_type": "wav2vec2",
|
69 |
"num_adapter_layers": 3,
|
70 |
+
"num_attention_heads": 16,
|
71 |
"num_codevector_groups": 2,
|
72 |
"num_codevectors_per_group": 320,
|
73 |
+
"num_conv_pos_embedding_groups": 16,
|
74 |
+
"num_conv_pos_embeddings": 128,
|
75 |
+
"num_feat_extract_layers": 7,
|
76 |
+
"num_hidden_layers": 24,
|
77 |
+
"num_negatives": 100,
|
78 |
+
"output_hidden_size": 1024,
|
79 |
+
"pad_token_id": 34,
|
80 |
+
"proj_codevector_dim": 768,
|
81 |
"tdnn_dilation": [
|
82 |
1,
|
83 |
2,
|
|
|
102 |
"torch_dtype": "float32",
|
103 |
"transformers_version": "4.16.0.dev0",
|
104 |
"use_weighted_layer_sum": false,
|
105 |
+
"vocab_size": 37,
|
106 |
"xvector_output_dim": 512
|
107 |
}
|
{checkpoint-10 → checkpoint-500}/optimizer.pt
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dab21b247ec51e4dee7afa84fedd77b13635e30b256cb9caa89e1dfc094123c9
|
3 |
+
size 2490362385
|
{checkpoint-10 → checkpoint-500}/preprocessor_config.json
RENAMED
@@ -3,7 +3,7 @@
|
|
3 |
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
"feature_size": 1,
|
5 |
"padding_side": "right",
|
6 |
-
"padding_value": 0
|
7 |
-
"return_attention_mask":
|
8 |
"sampling_rate": 16000
|
9 |
}
|
|
|
3 |
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
"feature_size": 1,
|
5 |
"padding_side": "right",
|
6 |
+
"padding_value": 0,
|
7 |
+
"return_attention_mask": true,
|
8 |
"sampling_rate": 16000
|
9 |
}
|
{checkpoint-10 → checkpoint-500}/pytorch_model.bin
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:401051ca4bb075cafe2856a328c8f6a5e3e87fa0c13df1de35ee6e7873004e73
|
3 |
+
size 1262075377
|
{checkpoint-10 → checkpoint-500}/rng_state.pth
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:32b82ac75b07e68dc3bc90e76f55a339f64dce724d87a9ae3c69ee46df441867
|
3 |
+
size 14503
|
{checkpoint-10 → checkpoint-500}/scaler.pt
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 559
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0c8bceadebe118ff459b01a775a8e9b38a6b8302c162d022f78d3646163e6486
|
3 |
size 559
|
{checkpoint-10 → checkpoint-500}/scheduler.pt
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 623
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3757aee7e707dcd124ce47058f7058fcb392f4c693c82774d9b2b5dcccf35b49
|
3 |
size 623
|
checkpoint-500/trainer_state.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 1.4525018129079044,
|
5 |
+
"global_step": 500,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.29,
|
12 |
+
"learning_rate": 7.151470588235293e-05,
|
13 |
+
"loss": 5.1135,
|
14 |
+
"step": 100
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 0.58,
|
18 |
+
"learning_rate": 6.710294117647058e-05,
|
19 |
+
"loss": 3.0957,
|
20 |
+
"step": 200
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 0.87,
|
24 |
+
"learning_rate": 6.269117647058824e-05,
|
25 |
+
"loss": 3.0078,
|
26 |
+
"step": 300
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"epoch": 1.16,
|
30 |
+
"learning_rate": 5.827941176470588e-05,
|
31 |
+
"loss": 2.9785,
|
32 |
+
"step": 400
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"epoch": 1.45,
|
36 |
+
"learning_rate": 5.3867647058823525e-05,
|
37 |
+
"loss": 2.9069,
|
38 |
+
"step": 500
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"epoch": 1.45,
|
42 |
+
"eval_loss": 2.9046826362609863,
|
43 |
+
"eval_runtime": 128.7688,
|
44 |
+
"eval_samples_per_second": 35.878,
|
45 |
+
"eval_steps_per_second": 4.489,
|
46 |
+
"eval_wer": 1.0,
|
47 |
+
"step": 500
|
48 |
+
}
|
49 |
+
],
|
50 |
+
"max_steps": 1720,
|
51 |
+
"num_train_epochs": 5,
|
52 |
+
"total_flos": 1.4827194756605722e+18,
|
53 |
+
"trial_name": null,
|
54 |
+
"trial_params": null
|
55 |
+
}
|
{checkpoint-10 → checkpoint-500}/training_args.bin
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2991
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1208f22bb7e06e1e9a51692db0520fbddfc3640941d51dfe45ba3188ada2ecbf
|
3 |
size 2991
|
config.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "
|
3 |
-
"activation_dropout": 0.
|
4 |
"adapter_kernel_size": 3,
|
5 |
"adapter_stride": 2,
|
6 |
"add_adapter": false,
|
@@ -11,23 +11,35 @@
|
|
11 |
"attention_dropout": 0.0,
|
12 |
"bos_token_id": 1,
|
13 |
"classifier_proj_size": 256,
|
14 |
-
"codevector_dim":
|
15 |
"contrastive_logits_temperature": 0.1,
|
16 |
-
"conv_bias":
|
17 |
"conv_dim": [
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
21 |
],
|
22 |
"conv_kernel": [
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
|
|
26 |
],
|
27 |
"conv_stride": [
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
|
|
|
|
|
|
31 |
],
|
32 |
"ctc_loss_reduction": "mean",
|
33 |
"ctc_zero_infinity": false,
|
@@ -42,31 +54,30 @@
|
|
42 |
"final_dropout": 0.0,
|
43 |
"hidden_act": "gelu",
|
44 |
"hidden_dropout": 0.0,
|
45 |
-
"
|
46 |
-
"hidden_size": 16,
|
47 |
"initializer_range": 0.02,
|
48 |
-
"intermediate_size":
|
49 |
"layer_norm_eps": 1e-05,
|
50 |
"layerdrop": 0.0,
|
51 |
-
"mask_feature_length":
|
52 |
"mask_feature_min_masks": 0,
|
53 |
-
"mask_feature_prob": 0.
|
54 |
"mask_time_length": 10,
|
55 |
"mask_time_min_masks": 2,
|
56 |
-
"mask_time_prob": 0.
|
57 |
"model_type": "wav2vec2",
|
58 |
"num_adapter_layers": 3,
|
59 |
-
"num_attention_heads":
|
60 |
"num_codevector_groups": 2,
|
61 |
"num_codevectors_per_group": 320,
|
62 |
-
"num_conv_pos_embedding_groups":
|
63 |
-
"num_conv_pos_embeddings":
|
64 |
-
"num_feat_extract_layers":
|
65 |
-
"num_hidden_layers":
|
66 |
-
"num_negatives":
|
67 |
-
"output_hidden_size":
|
68 |
-
"pad_token_id":
|
69 |
-
"proj_codevector_dim":
|
70 |
"tdnn_dilation": [
|
71 |
1,
|
72 |
2,
|
@@ -91,6 +102,6 @@
|
|
91 |
"torch_dtype": "float32",
|
92 |
"transformers_version": "4.16.0.dev0",
|
93 |
"use_weighted_layer_sum": false,
|
94 |
-
"vocab_size":
|
95 |
"xvector_output_dim": 512
|
96 |
}
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "facebook/wav2vec2-xls-r-300m",
|
3 |
+
"activation_dropout": 0.1,
|
4 |
"adapter_kernel_size": 3,
|
5 |
"adapter_stride": 2,
|
6 |
"add_adapter": false,
|
|
|
11 |
"attention_dropout": 0.0,
|
12 |
"bos_token_id": 1,
|
13 |
"classifier_proj_size": 256,
|
14 |
+
"codevector_dim": 768,
|
15 |
"contrastive_logits_temperature": 0.1,
|
16 |
+
"conv_bias": true,
|
17 |
"conv_dim": [
|
18 |
+
512,
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512,
|
22 |
+
512,
|
23 |
+
512,
|
24 |
+
512
|
25 |
],
|
26 |
"conv_kernel": [
|
27 |
+
10,
|
28 |
+
3,
|
29 |
+
3,
|
30 |
+
3,
|
31 |
+
3,
|
32 |
+
2,
|
33 |
+
2
|
34 |
],
|
35 |
"conv_stride": [
|
36 |
+
5,
|
37 |
+
2,
|
38 |
+
2,
|
39 |
+
2,
|
40 |
+
2,
|
41 |
+
2,
|
42 |
+
2
|
43 |
],
|
44 |
"ctc_loss_reduction": "mean",
|
45 |
"ctc_zero_infinity": false,
|
|
|
54 |
"final_dropout": 0.0,
|
55 |
"hidden_act": "gelu",
|
56 |
"hidden_dropout": 0.0,
|
57 |
+
"hidden_size": 1024,
|
|
|
58 |
"initializer_range": 0.02,
|
59 |
+
"intermediate_size": 4096,
|
60 |
"layer_norm_eps": 1e-05,
|
61 |
"layerdrop": 0.0,
|
62 |
+
"mask_feature_length": 64,
|
63 |
"mask_feature_min_masks": 0,
|
64 |
+
"mask_feature_prob": 0.25,
|
65 |
"mask_time_length": 10,
|
66 |
"mask_time_min_masks": 2,
|
67 |
+
"mask_time_prob": 0.75,
|
68 |
"model_type": "wav2vec2",
|
69 |
"num_adapter_layers": 3,
|
70 |
+
"num_attention_heads": 16,
|
71 |
"num_codevector_groups": 2,
|
72 |
"num_codevectors_per_group": 320,
|
73 |
+
"num_conv_pos_embedding_groups": 16,
|
74 |
+
"num_conv_pos_embeddings": 128,
|
75 |
+
"num_feat_extract_layers": 7,
|
76 |
+
"num_hidden_layers": 24,
|
77 |
+
"num_negatives": 100,
|
78 |
+
"output_hidden_size": 1024,
|
79 |
+
"pad_token_id": 34,
|
80 |
+
"proj_codevector_dim": 768,
|
81 |
"tdnn_dilation": [
|
82 |
1,
|
83 |
2,
|
|
|
102 |
"torch_dtype": "float32",
|
103 |
"transformers_version": "4.16.0.dev0",
|
104 |
"use_weighted_layer_sum": false,
|
105 |
+
"vocab_size": 37,
|
106 |
"xvector_output_dim": 512
|
107 |
}
|
eval_results.json
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"epoch": 0.03,
|
3 |
-
"eval_loss": 156.8788604736328,
|
4 |
-
"eval_runtime": 10.8705,
|
5 |
-
"eval_samples": 301,
|
6 |
-
"eval_samples_per_second": 27.69,
|
7 |
-
"eval_steps_per_second": 3.496,
|
8 |
-
"eval_wer": 1.3456221198156681
|
9 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
preprocessor_config.json
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
"feature_size": 1,
|
5 |
"padding_side": "right",
|
6 |
-
"padding_value": 0
|
7 |
-
"return_attention_mask":
|
8 |
"sampling_rate": 16000
|
9 |
}
|
|
|
3 |
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
"feature_size": 1,
|
5 |
"padding_side": "right",
|
6 |
+
"padding_value": 0,
|
7 |
+
"return_attention_mask": true,
|
8 |
"sampling_rate": 16000
|
9 |
}
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:401051ca4bb075cafe2856a328c8f6a5e3e87fa0c13df1de35ee6e7873004e73
|
3 |
+
size 1262075377
|
run-dummy-sv-gpu.sh
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
python run_speech_recognition_ctc.py \
|
2 |
-
--dataset_name="mozilla-foundation/common_voice_7_0" \
|
3 |
-
--model_name_or_path="hf-test/xls-r-dummy" \
|
4 |
-
--dataset_config_name="ab" \
|
5 |
-
--output_dir="./" \
|
6 |
-
--overwrite_output_dir \
|
7 |
-
--max_steps="10" \
|
8 |
-
--per_device_train_batch_size="8" \
|
9 |
-
--per_device_eval_batch_size="8" \
|
10 |
-
--gradient_accumulation_steps="4" \
|
11 |
-
--learning_rate="7.5e-3" \
|
12 |
-
--warmup_steps="2000" \
|
13 |
-
--length_column_name="input_length" \
|
14 |
-
--evaluation_strategy="steps" \
|
15 |
-
--text_column_name="sentence" \
|
16 |
-
--chars_to_ignore , ? . ! \- \; \: \" “ % ‘ ” � — ’ … – \
|
17 |
-
--save_steps="5" \
|
18 |
-
--eval_steps="5" \
|
19 |
-
--logging_steps="1" \
|
20 |
-
--layerdrop="0.0" \
|
21 |
-
--activation_dropout="0.1" \
|
22 |
-
--save_total_limit="3" \
|
23 |
-
--freeze_feature_encoder \
|
24 |
-
--feat_proj_dropout="0.0" \
|
25 |
-
--mask_time_prob="0.75" \
|
26 |
-
--mask_time_length="10" \
|
27 |
-
--mask_feature_prob="0.25" \
|
28 |
-
--mask_feature_length="64" \
|
29 |
-
--gradient_checkpointing \
|
30 |
-
--use_auth_token \
|
31 |
-
--fp16 \
|
32 |
-
--group_by_length \
|
33 |
-
--do_train --do_eval \
|
34 |
-
--push_to_hub
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
run.sh
CHANGED
@@ -4,7 +4,7 @@ python run_speech_recognition_ctc.py \
|
|
4 |
--dataset_config_name="sv-SE" \
|
5 |
--output_dir="./" \
|
6 |
--overwrite_output_dir \
|
7 |
-
--num_train_epochs="
|
8 |
--per_device_train_batch_size="8" \
|
9 |
--per_device_eval_batch_size="8" \
|
10 |
--gradient_accumulation_steps="4" \
|
|
|
4 |
--dataset_config_name="sv-SE" \
|
5 |
--output_dir="./" \
|
6 |
--overwrite_output_dir \
|
7 |
+
--num_train_epochs="5" \
|
8 |
--per_device_train_batch_size="8" \
|
9 |
--per_device_eval_batch_size="8" \
|
10 |
--gradient_accumulation_steps="4" \
|
train_results.json
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"epoch": 0.03,
|
3 |
-
"train_loss": 94.46571044921875,
|
4 |
-
"train_runtime": 13.57,
|
5 |
-
"train_samples": 704,
|
6 |
-
"train_samples_per_second": 1.474,
|
7 |
-
"train_steps_per_second": 0.737
|
8 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
trainer_state.json
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"best_metric": null,
|
3 |
-
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.028409090909090908,
|
5 |
-
"global_step": 10,
|
6 |
-
"is_hyper_param_search": false,
|
7 |
-
"is_local_process_zero": true,
|
8 |
-
"is_world_process_zero": true,
|
9 |
-
"log_history": [
|
10 |
-
{
|
11 |
-
"epoch": 0.03,
|
12 |
-
"step": 10,
|
13 |
-
"total_flos": 508182128640.0,
|
14 |
-
"train_loss": 94.46571044921875,
|
15 |
-
"train_runtime": 13.57,
|
16 |
-
"train_samples_per_second": 1.474,
|
17 |
-
"train_steps_per_second": 0.737
|
18 |
-
}
|
19 |
-
],
|
20 |
-
"max_steps": 10,
|
21 |
-
"num_train_epochs": 1,
|
22 |
-
"total_flos": 508182128640.0,
|
23 |
-
"trial_name": null,
|
24 |
-
"trial_params": null
|
25 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2991
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1208f22bb7e06e1e9a51692db0520fbddfc3640941d51dfe45ba3188ada2ecbf
|
3 |
size 2991
|
vocab.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"
|
|
|
1 |
+
{"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "g": 7, "h": 8, "i": 9, "j": 10, "k": 11, "l": 12, "m": 13, "n": 14, "o": 15, "p": 16, "q": 17, "r": 18, "s": 19, "t": 20, "u": 21, "v": 22, "w": 23, "x": 24, "y": 25, "z": 26, "ä": 27, "å": 28, "é": 29, "ô": 30, "ö": 31, "ü": 32, "|": 0, "[UNK]": 33, "[PAD]": 34}
|