Akjava commited on
Commit
0834983
1 Parent(s): ec6b8da

Upload folder using huggingface_hub

Browse files
runs/2024-09-18_10-22-35/.hydra/config.yaml ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task_name: train
2
+ run_name: ja005
3
+ tags:
4
+ - ja005
5
+ train: true
6
+ test: true
7
+ ckpt_path: datas/ja005/last.ckpt
8
+ seed: 1234
9
+ data:
10
+ _target_: matcha.data.text_mel_datamodule.TextMelDataModule
11
+ name: ja005
12
+ train_filelist_path: datas/ja005/train.cleaned.txt
13
+ valid_filelist_path: datas/ja005/valid.cleaned.txt
14
+ batch_size: 80
15
+ num_workers: 1
16
+ pin_memory: true
17
+ cleaners:
18
+ - basic_cleaners2
19
+ add_blank: true
20
+ n_spks: 1
21
+ n_fft: 1024
22
+ n_feats: 80
23
+ sample_rate: 22050
24
+ hop_length: 256
25
+ win_length: 1024
26
+ f_min: 0
27
+ f_max: 8000
28
+ data_statistics:
29
+ mel_mean: -5.925878047943115
30
+ mel_std: 2.230491876602173
31
+ seed: 3000
32
+ load_durations: false
33
+ model:
34
+ _target_: matcha.models.matcha_tts.MatchaTTS
35
+ n_vocab: 178
36
+ n_spks: ${data.n_spks}
37
+ spk_emb_dim: 64
38
+ n_feats: 80
39
+ data_statistics: ${data.data_statistics}
40
+ out_size: null
41
+ prior_loss: true
42
+ use_precomputed_durations: ${data.load_durations}
43
+ encoder:
44
+ encoder_type: RoPE Encoder
45
+ encoder_params:
46
+ n_feats: ${model.n_feats}
47
+ n_channels: 192
48
+ filter_channels: 768
49
+ filter_channels_dp: 256
50
+ n_heads: 2
51
+ n_layers: 6
52
+ kernel_size: 3
53
+ p_dropout: 0.1
54
+ spk_emb_dim: 64
55
+ n_spks: 1
56
+ prenet: true
57
+ duration_predictor_params:
58
+ filter_channels_dp: ${model.encoder.encoder_params.filter_channels_dp}
59
+ kernel_size: 3
60
+ p_dropout: ${model.encoder.encoder_params.p_dropout}
61
+ decoder:
62
+ channels:
63
+ - 256
64
+ - 256
65
+ dropout: 0.05
66
+ attention_head_dim: 64
67
+ n_blocks: 1
68
+ num_mid_blocks: 2
69
+ num_heads: 2
70
+ act_fn: snakebeta
71
+ cfm:
72
+ name: CFM
73
+ solver: euler
74
+ sigma_min: 0.0001
75
+ optimizer:
76
+ _target_: torch.optim.Adam
77
+ _partial_: true
78
+ lr: 0.0001
79
+ weight_decay: 0.0
80
+ callbacks:
81
+ model_checkpoint:
82
+ _target_: lightning.pytorch.callbacks.ModelCheckpoint
83
+ dirpath: ${paths.output_dir}/checkpoints
84
+ filename: checkpoint_{epoch:04d}
85
+ monitor: epoch
86
+ verbose: false
87
+ save_last: true
88
+ save_top_k: 25
89
+ mode: max
90
+ auto_insert_metric_name: true
91
+ save_weights_only: false
92
+ every_n_train_steps: null
93
+ train_time_interval: null
94
+ every_n_epochs: 25
95
+ save_on_train_epoch_end: null
96
+ model_summary:
97
+ _target_: lightning.pytorch.callbacks.RichModelSummary
98
+ max_depth: 3
99
+ rich_progress_bar:
100
+ _target_: lightning.pytorch.callbacks.RichProgressBar
101
+ logger:
102
+ tensorboard:
103
+ _target_: lightning.pytorch.loggers.tensorboard.TensorBoardLogger
104
+ save_dir: ${paths.output_dir}/tensorboard/
105
+ name: null
106
+ log_graph: false
107
+ default_hp_metric: true
108
+ prefix: ''
109
+ trainer:
110
+ _target_: lightning.pytorch.trainer.Trainer
111
+ default_root_dir: ${paths.output_dir}
112
+ max_epochs: -1
113
+ accelerator: gpu
114
+ devices:
115
+ - 0
116
+ precision: 16-mixed
117
+ check_val_every_n_epoch: 1
118
+ deterministic: false
119
+ gradient_clip_val: 5.0
120
+ paths:
121
+ root_dir: ${oc.env:PROJECT_ROOT}
122
+ data_dir: ${paths.root_dir}/data/
123
+ log_dir: ${paths.root_dir}/logs/
124
+ output_dir: ${hydra:runtime.output_dir}
125
+ work_dir: ${hydra:runtime.cwd}
126
+ extras:
127
+ ignore_warnings: false
128
+ enforce_tags: true
129
+ print_config: true
runs/2024-09-18_10-22-35/.hydra/hydra.yaml ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: ${paths.log_dir}/${task_name}/${run_name}/runs/${now:%Y-%m-%d}_${now:%H-%M-%S}
4
+ sweep:
5
+ dir: ${paths.log_dir}/${task_name}/${run_name}/multiruns/${now:%Y-%m-%d}_${now:%H-%M-%S}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ colorlog:
72
+ (): colorlog.ColoredFormatter
73
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
74
+ handlers:
75
+ console:
76
+ class: logging.StreamHandler
77
+ formatter: colorlog
78
+ stream: ext://sys.stdout
79
+ root:
80
+ level: INFO
81
+ handlers:
82
+ - console
83
+ disable_existing_loggers: false
84
+ job_logging:
85
+ version: 1
86
+ formatters:
87
+ simple:
88
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
89
+ colorlog:
90
+ (): colorlog.ColoredFormatter
91
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
92
+ - %(message)s'
93
+ log_colors:
94
+ DEBUG: purple
95
+ INFO: green
96
+ WARNING: yellow
97
+ ERROR: red
98
+ CRITICAL: red
99
+ handlers:
100
+ console:
101
+ class: logging.StreamHandler
102
+ formatter: colorlog
103
+ stream: ext://sys.stdout
104
+ file:
105
+ class: logging.FileHandler
106
+ formatter: simple
107
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
108
+ root:
109
+ level: INFO
110
+ handlers:
111
+ - console
112
+ - file
113
+ disable_existing_loggers: false
114
+ env: {}
115
+ mode: RUN
116
+ searchpath: []
117
+ callbacks: {}
118
+ output_subdir: .hydra
119
+ overrides:
120
+ hydra:
121
+ - hydra.mode=RUN
122
+ task:
123
+ - experiment=ja005
124
+ job:
125
+ name: train
126
+ chdir: null
127
+ override_dirname: experiment=ja005
128
+ id: ???
129
+ num: ???
130
+ config_name: train.yaml
131
+ env_set: {}
132
+ env_copy: []
133
+ config:
134
+ override_dirname:
135
+ kv_sep: '='
136
+ item_sep: ','
137
+ exclude_keys: []
138
+ runtime:
139
+ version: 1.3.2
140
+ version_base: '1.3'
141
+ cwd: /notebooks/Matcha-TTS-Japanese
142
+ config_sources:
143
+ - path: hydra.conf
144
+ schema: pkg
145
+ provider: hydra
146
+ - path: /notebooks/Matcha-TTS-Japanese/configs
147
+ schema: file
148
+ provider: main
149
+ - path: hydra_plugins.hydra_colorlog.conf
150
+ schema: pkg
151
+ provider: hydra-colorlog
152
+ - path: ''
153
+ schema: structured
154
+ provider: schema
155
+ output_dir: /notebooks/Matcha-TTS-Japanese/logs/train/ja005/runs/2024-09-18_10-22-35
156
+ choices:
157
+ debug: null
158
+ local: default
159
+ hparams_search: null
160
+ experiment: ja005
161
+ hydra: default
162
+ extras: default
163
+ paths: default
164
+ trainer: default
165
+ logger: tensorboard
166
+ callbacks: default
167
+ model: matcha
168
+ model/optimizer: adam.yaml
169
+ model/cfm: default.yaml
170
+ model/decoder: default.yaml
171
+ model/encoder: default.yaml
172
+ data: ja005.yaml
173
+ hydra/env: default
174
+ hydra/callbacks: null
175
+ hydra/job_logging: colorlog
176
+ hydra/hydra_logging: colorlog
177
+ hydra/hydra_help: default
178
+ hydra/help: default
179
+ hydra/sweeper: basic
180
+ hydra/launcher: basic
181
+ hydra/output: default
182
+ verbose: false
runs/2024-09-18_10-22-35/.hydra/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ - experiment=ja005
runs/2024-09-18_10-22-35/checkpoints/checkpoint_epoch=3124.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8490edaecaa932404327be3ded53e902e88ef7ca387fafdd04773c7bc940b4e
3
+ size 218839242
runs/2024-09-18_10-22-35/checkpoints/checkpoint_epoch=3149.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f95b19822350482e2c17ecc77d61c259832e6138e954c831b34f49c923624be
3
+ size 218839625
runs/2024-09-18_10-22-35/checkpoints/checkpoint_epoch=3174.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2532212efd6bd5f6bc1933a4f945bed47f68a0ec312248baa9b76083c220a7d8
3
+ size 218840008
runs/2024-09-18_10-22-35/checkpoints/last.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2532212efd6bd5f6bc1933a4f945bed47f68a0ec312248baa9b76083c220a7d8
3
+ size 218840008
runs/2024-09-18_10-22-35/config_tree.log ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CONFIG
2
+ ├── data
3
+ │ └── _target_: matcha.data.text_mel_datamodule.TextMelDataModule
4
+ │ name: ja005
5
+ │ train_filelist_path: datas/ja005/train.cleaned.txt
6
+ │ valid_filelist_path: datas/ja005/valid.cleaned.txt
7
+ │ batch_size: 80
8
+ │ num_workers: 1
9
+ │ pin_memory: true
10
+ │ cleaners:
11
+ │ - basic_cleaners2
12
+ │ add_blank: true
13
+ │ n_spks: 1
14
+ │ n_fft: 1024
15
+ │ n_feats: 80
16
+ │ sample_rate: 22050
17
+ │ hop_length: 256
18
+ │ win_length: 1024
19
+ │ f_min: 0
20
+ │ f_max: 8000
21
+ │ data_statistics:
22
+ │ mel_mean: -5.925878047943115
23
+ │ mel_std: 2.230491876602173
24
+ │ seed: 3000
25
+ │ load_durations: false
26
+
27
+ ├── model
28
+ │ └── _target_: matcha.models.matcha_tts.MatchaTTS
29
+ │ n_vocab: 178
30
+ │ n_spks: 1
31
+ │ spk_emb_dim: 64
32
+ │ n_feats: 80
33
+ │ data_statistics:
34
+ │ mel_mean: -5.925878047943115
35
+ │ mel_std: 2.230491876602173
36
+ │ out_size: null
37
+ │ prior_loss: true
38
+ │ use_precomputed_durations: false
39
+ │ encoder:
40
+ │ encoder_type: RoPE Encoder
41
+ │ encoder_params:
42
+ │ n_feats: 80
43
+ │ n_channels: 192
44
+ │ filter_channels: 768
45
+ │ filter_channels_dp: 256
46
+ │ n_heads: 2
47
+ │ n_layers: 6
48
+ │ kernel_size: 3
49
+ │ p_dropout: 0.1
50
+ │ spk_emb_dim: 64
51
+ │ n_spks: 1
52
+ │ prenet: true
53
+ │ duration_predictor_params:
54
+ │ filter_channels_dp: 256
55
+ │ kernel_size: 3
56
+ │ p_dropout: 0.1
57
+ │ decoder:
58
+ │ channels:
59
+ │ - 256
60
+ │ - 256
61
+ │ dropout: 0.05
62
+ │ attention_head_dim: 64
63
+ │ n_blocks: 1
64
+ │ num_mid_blocks: 2
65
+ │ num_heads: 2
66
+ │ act_fn: snakebeta
67
+ │ cfm:
68
+ │ name: CFM
69
+ │ solver: euler
70
+ │ sigma_min: 0.0001
71
+ │ optimizer:
72
+ │ _target_: torch.optim.Adam
73
+ │ _partial_: true
74
+ │ lr: 0.0001
75
+ │ weight_decay: 0.0
76
+
77
+ ├── callbacks
78
+ │ └── model_checkpoint:
79
+ │ _target_: lightning.pytorch.callbacks.ModelCheckpoint
80
+ │ dirpath: /notebooks/Matcha-TTS-Japanese/logs/train/ja005/runs/2024-09-18_10-22-35/checkpoints
81
+ │ filename: checkpoint_{epoch:04d}
82
+ │ monitor: epoch
83
+ │ verbose: false
84
+ │ save_last: true
85
+ │ save_top_k: 25
86
+ │ mode: max
87
+ │ auto_insert_metric_name: true
88
+ │ save_weights_only: false
89
+ │ every_n_train_steps: null
90
+ │ train_time_interval: null
91
+ │ every_n_epochs: 25
92
+ │ save_on_train_epoch_end: null
93
+ │ model_summary:
94
+ │ _target_: lightning.pytorch.callbacks.RichModelSummary
95
+ │ max_depth: 3
96
+ │ rich_progress_bar:
97
+ │ _target_: lightning.pytorch.callbacks.RichProgressBar
98
+
99
+ ├── logger
100
+ │ └── tensorboard:
101
+ │ _target_: lightning.pytorch.loggers.tensorboard.TensorBoardLogger
102
+ │ save_dir: /notebooks/Matcha-TTS-Japanese/logs/train/ja005/runs/2024-09-18_10-22-35/tensorboard/
103
+ │ name: null
104
+ │ log_graph: false
105
+ │ default_hp_metric: true
106
+ │ prefix: ''
107
+
108
+ ├── trainer
109
+ │ └── _target_: lightning.pytorch.trainer.Trainer
110
+ │ default_root_dir: /notebooks/Matcha-TTS-Japanese/logs/train/ja005/runs/2024-09-18_10-22-35
111
+ │ max_epochs: -1
112
+ │ accelerator: gpu
113
+ │ devices:
114
+ │ - 0
115
+ │ precision: 16-mixed
116
+ │ check_val_every_n_epoch: 1
117
+ │ deterministic: false
118
+ │ gradient_clip_val: 5.0
119
+
120
+ ├── paths
121
+ │ └── root_dir: /notebooks/Matcha-TTS-Japanese
122
+ │ data_dir: /notebooks/Matcha-TTS-Japanese/data/
123
+ │ log_dir: /notebooks/Matcha-TTS-Japanese/logs/
124
+ │ output_dir: /notebooks/Matcha-TTS-Japanese/logs/train/ja005/runs/2024-09-18_10-22-35
125
+ │ work_dir: /notebooks/Matcha-TTS-Japanese
126
+
127
+ ├── extras
128
+ │ └── ignore_warnings: false
129
+ │ enforce_tags: true
130
+ │ print_config: true
131
+
132
+ ├── task_name
133
+ │ └── train
134
+ ├── run_name
135
+ │ └── ja005
136
+ ├── tags
137
+ │ └── ['ja005']
138
+ ├── train
139
+ │ └── True
140
+ ├── test
141
+ │ └── True
142
+ ├── ckpt_path
143
+ │ └── datas/ja005/last.ckpt
144
+ └── seed
145
+ └── 1234
runs/2024-09-18_10-22-35/tags.log ADDED
@@ -0,0 +1 @@
 
 
1
+ ['ja005']
runs/2024-09-18_10-22-35/tensorboard/version_0/events.out.tfevents.1726654957.nd457983zv.2243.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6d121c97a124b3443eb643916afd422990e0c7d6f56e68867f1c94532834d90
3
+ size 18545929
runs/2024-09-18_10-22-35/tensorboard/version_0/hparams.yaml ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ _target_: matcha.models.matcha_tts.MatchaTTS
3
+ n_vocab: 178
4
+ n_spks: ${data.n_spks}
5
+ spk_emb_dim: 64
6
+ n_feats: 80
7
+ data_statistics: ${data.data_statistics}
8
+ out_size: null
9
+ prior_loss: true
10
+ use_precomputed_durations: ${data.load_durations}
11
+ encoder:
12
+ encoder_type: RoPE Encoder
13
+ encoder_params:
14
+ n_feats: ${model.n_feats}
15
+ n_channels: 192
16
+ filter_channels: 768
17
+ filter_channels_dp: 256
18
+ n_heads: 2
19
+ n_layers: 6
20
+ kernel_size: 3
21
+ p_dropout: 0.1
22
+ spk_emb_dim: 64
23
+ n_spks: 1
24
+ prenet: true
25
+ duration_predictor_params:
26
+ filter_channels_dp: ${model.encoder.encoder_params.filter_channels_dp}
27
+ kernel_size: 3
28
+ p_dropout: ${model.encoder.encoder_params.p_dropout}
29
+ decoder:
30
+ channels:
31
+ - 256
32
+ - 256
33
+ dropout: 0.05
34
+ attention_head_dim: 64
35
+ n_blocks: 1
36
+ num_mid_blocks: 2
37
+ num_heads: 2
38
+ act_fn: snakebeta
39
+ cfm:
40
+ name: CFM
41
+ solver: euler
42
+ sigma_min: 0.0001
43
+ optimizer:
44
+ _target_: torch.optim.Adam
45
+ _partial_: true
46
+ lr: 0.0001
47
+ weight_decay: 0.0
48
+ model/params/total: 18204193
49
+ model/params/trainable: 18204193
50
+ model/params/non_trainable: 0
51
+ data:
52
+ _target_: matcha.data.text_mel_datamodule.TextMelDataModule
53
+ name: ja005
54
+ train_filelist_path: datas/ja005/train.cleaned.txt
55
+ valid_filelist_path: datas/ja005/valid.cleaned.txt
56
+ batch_size: 80
57
+ num_workers: 1
58
+ pin_memory: true
59
+ cleaners:
60
+ - basic_cleaners2
61
+ add_blank: true
62
+ n_spks: 1
63
+ n_fft: 1024
64
+ n_feats: 80
65
+ sample_rate: 22050
66
+ hop_length: 256
67
+ win_length: 1024
68
+ f_min: 0
69
+ f_max: 8000
70
+ data_statistics:
71
+ mel_mean: -5.925878047943115
72
+ mel_std: 2.230491876602173
73
+ seed: 3000
74
+ load_durations: false
75
+ trainer:
76
+ _target_: lightning.pytorch.trainer.Trainer
77
+ default_root_dir: ${paths.output_dir}
78
+ max_epochs: -1
79
+ accelerator: gpu
80
+ devices:
81
+ - 0
82
+ precision: 16-mixed
83
+ check_val_every_n_epoch: 1
84
+ deterministic: false
85
+ gradient_clip_val: 5.0
86
+ callbacks:
87
+ model_checkpoint:
88
+ _target_: lightning.pytorch.callbacks.ModelCheckpoint
89
+ dirpath: ${paths.output_dir}/checkpoints
90
+ filename: checkpoint_{epoch:04d}
91
+ monitor: epoch
92
+ verbose: false
93
+ save_last: true
94
+ save_top_k: 25
95
+ mode: max
96
+ auto_insert_metric_name: true
97
+ save_weights_only: false
98
+ every_n_train_steps: null
99
+ train_time_interval: null
100
+ every_n_epochs: 25
101
+ save_on_train_epoch_end: null
102
+ model_summary:
103
+ _target_: lightning.pytorch.callbacks.RichModelSummary
104
+ max_depth: 3
105
+ rich_progress_bar:
106
+ _target_: lightning.pytorch.callbacks.RichProgressBar
107
+ extras:
108
+ ignore_warnings: false
109
+ enforce_tags: true
110
+ print_config: true
111
+ task_name: train
112
+ tags:
113
+ - ja005
114
+ ckpt_path: datas/ja005/last.ckpt
115
+ seed: 1234
runs/2024-09-18_10-22-35/train.log ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2024-09-18 10:22:35,928][matcha.utils.utils][INFO] - Enforcing tags! <cfg.extras.enforce_tags=True>
2
+ [2024-09-18 10:22:35,934][matcha.utils.utils][INFO] - Printing config tree with Rich! <cfg.extras.print_config=True>
3
+ [2024-09-18 10:22:36,013][__main__][INFO] - Instantiating datamodule <matcha.data.text_mel_datamodule.TextMelDataModule>
4
+ [2024-09-18 10:22:36,710][__main__][INFO] - Instantiating model <matcha.models.matcha_tts.MatchaTTS>
5
+ [2024-09-18 10:22:37,195][__main__][INFO] - Instantiating callbacks...
6
+ [2024-09-18 10:22:37,196][matcha.utils.instantiators][INFO] - Instantiating callback <lightning.pytorch.callbacks.ModelCheckpoint>
7
+ [2024-09-18 10:22:37,200][matcha.utils.instantiators][INFO] - Instantiating callback <lightning.pytorch.callbacks.RichModelSummary>
8
+ [2024-09-18 10:22:37,201][matcha.utils.instantiators][INFO] - Instantiating callback <lightning.pytorch.callbacks.RichProgressBar>
9
+ [2024-09-18 10:22:37,201][__main__][INFO] - Instantiating loggers...
10
+ [2024-09-18 10:22:37,202][matcha.utils.instantiators][INFO] - Instantiating logger <lightning.pytorch.loggers.tensorboard.TensorBoardLogger>
11
+ [2024-09-18 10:22:37,206][__main__][INFO] - Instantiating trainer <lightning.pytorch.trainer.Trainer>
12
+ [2024-09-18 10:22:37,248][__main__][INFO] - Logging hyperparameters!
13
+ [2024-09-18 10:22:37,310][__main__][INFO] - Starting training!