Simonlob commited on
Commit
6aa5e9b
1 Parent(s): 31bead5
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .env.example +6 -0
  2. .gitattributes +0 -35
  3. .gitignore +163 -0
  4. .pre-commit-config.yaml +59 -0
  5. .project-root +2 -0
  6. .pylintrc +525 -0
  7. Create_dataset/__init__.py +1 -0
  8. Create_dataset/cr_dataset_script.py +97 -0
  9. LICENSE +21 -0
  10. MANIFEST.in +14 -0
  11. Makefile +42 -0
  12. README.md +70 -13
  13. app.py +320 -0
  14. configs/__init__.py +1 -0
  15. configs/callbacks/default.yaml +5 -0
  16. configs/callbacks/model_checkpoint.yaml +17 -0
  17. configs/callbacks/model_summary.yaml +5 -0
  18. configs/callbacks/none.yaml +0 -0
  19. configs/callbacks/rich_progress_bar.yaml +4 -0
  20. configs/data/akylai.yaml +21 -0
  21. configs/data/hi-fi_en-US_female.yaml +14 -0
  22. configs/data/ljspeech.yaml +22 -0
  23. configs/data/vctk.yaml +14 -0
  24. configs/debug/default.yaml +35 -0
  25. configs/debug/fdr.yaml +9 -0
  26. configs/debug/limit.yaml +12 -0
  27. configs/debug/overfit.yaml +13 -0
  28. configs/debug/profiler.yaml +15 -0
  29. configs/eval.yaml +18 -0
  30. configs/experiment/akylai.yaml +14 -0
  31. configs/experiment/hifi_dataset_piper_phonemizer.yaml +14 -0
  32. configs/experiment/ljspeech.yaml +14 -0
  33. configs/experiment/ljspeech_min_memory.yaml +18 -0
  34. configs/experiment/multispeaker.yaml +14 -0
  35. configs/extras/default.yaml +8 -0
  36. configs/hparams_search/mnist_optuna.yaml +52 -0
  37. configs/hydra/default.yaml +19 -0
  38. configs/local/.gitkeep +0 -0
  39. configs/logger/aim.yaml +28 -0
  40. configs/logger/comet.yaml +12 -0
  41. configs/logger/csv.yaml +7 -0
  42. configs/logger/many_loggers.yaml +9 -0
  43. configs/logger/mlflow.yaml +12 -0
  44. configs/logger/neptune.yaml +9 -0
  45. configs/logger/tensorboard.yaml +10 -0
  46. configs/logger/wandb.yaml +16 -0
  47. configs/model/cfm/default.yaml +3 -0
  48. configs/model/decoder/default.yaml +7 -0
  49. configs/model/encoder/default.yaml +18 -0
  50. configs/model/matcha.yaml +15 -0
.env.example ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # example of file for storing private and user specific environment variables, like keys or system paths
2
+ # rename it to ".env" (excluded from version control by default)
3
+ # .env is loaded by train.py automatically
4
+ # hydra allows you to reference variables in .yaml configs with special syntax: ${oc.env:MY_VAR}
5
+
6
+ MY_VAR="/home/user/my/system/path"
.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # IPython
81
+ profile_default/
82
+ ipython_config.py
83
+
84
+ # pyenv
85
+ .python-version
86
+
87
+ # pipenv
88
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
90
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
91
+ # install all needed dependencies.
92
+ #Pipfile.lock
93
+
94
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95
+ __pypackages__/
96
+
97
+ # Celery stuff
98
+ celerybeat-schedule
99
+ celerybeat.pid
100
+
101
+ # SageMath parsed files
102
+ *.sage.py
103
+
104
+ # Environments
105
+ .venv
106
+ env/
107
+ venv/
108
+ ENV/
109
+ env.bak/
110
+ venv.bak/
111
+
112
+ # Spyder project settings
113
+ .spyderproject
114
+ .spyproject
115
+
116
+ # Rope project settings
117
+ .ropeproject
118
+
119
+ # mkdocs documentation
120
+ /site
121
+
122
+ # mypy
123
+ .mypy_cache/
124
+ .dmypy.json
125
+ dmypy.json
126
+
127
+ # Pyre type checker
128
+ .pyre/
129
+
130
+ ### VisualStudioCode
131
+ .vscode/*
132
+ !.vscode/settings.json
133
+ !.vscode/tasks.json
134
+ !.vscode/launch.json
135
+ !.vscode/extensions.json
136
+ *.code-workspace
137
+ **/.vscode
138
+
139
+ # JetBrains
140
+ .idea/
141
+
142
+ # Data & Models
143
+ *.h5
144
+ *.tar
145
+ *.tar.gz
146
+
147
+ # Lightning-Hydra-Template
148
+ configs/local/default.yaml
149
+ /data/
150
+ /logs/
151
+ .env
152
+
153
+ # Aim logging
154
+ .aim
155
+
156
+ # Cython complied files
157
+ matcha/utils/monotonic_align/core.c
158
+
159
+ # Ignoring hifigan checkpoint
160
+ generator_v1
161
+ g_02500000
162
+ gradio_cached_examples/
163
+ synth_output/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_language_version:
2
+ python: python3.10
3
+
4
+ repos:
5
+ - repo: https://github.com/pre-commit/pre-commit-hooks
6
+ rev: v4.5.0
7
+ hooks:
8
+ # list of supported hooks: https://pre-commit.com/hooks.html
9
+ - id: trailing-whitespace
10
+ - id: end-of-file-fixer
11
+ # - id: check-docstring-first
12
+ - id: check-yaml
13
+ - id: debug-statements
14
+ - id: detect-private-key
15
+ - id: check-toml
16
+ - id: check-case-conflict
17
+ - id: check-added-large-files
18
+
19
+ # python code formatting
20
+ - repo: https://github.com/psf/black
21
+ rev: 23.12.1
22
+ hooks:
23
+ - id: black
24
+ args: [--line-length, "120"]
25
+
26
+ # python import sorting
27
+ - repo: https://github.com/PyCQA/isort
28
+ rev: 5.13.2
29
+ hooks:
30
+ - id: isort
31
+ args: ["--profile", "black", "--filter-files"]
32
+
33
+ # python upgrading syntax to newer version
34
+ - repo: https://github.com/asottile/pyupgrade
35
+ rev: v3.15.0
36
+ hooks:
37
+ - id: pyupgrade
38
+ args: [--py38-plus]
39
+
40
+ # python check (PEP8), programming errors and code complexity
41
+ - repo: https://github.com/PyCQA/flake8
42
+ rev: 7.0.0
43
+ hooks:
44
+ - id: flake8
45
+ args:
46
+ [
47
+ "--max-line-length", "120",
48
+ "--extend-ignore",
49
+ "E203,E402,E501,F401,F841,RST2,RST301",
50
+ "--exclude",
51
+ "logs/*,data/*,matcha/hifigan/*",
52
+ ]
53
+ additional_dependencies: [flake8-rst-docstrings==0.3.0]
54
+
55
+ # pylint
56
+ - repo: https://github.com/pycqa/pylint
57
+ rev: v3.0.3
58
+ hooks:
59
+ - id: pylint
.project-root ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # this file is required for inferring the project root directory
2
+ # do not delete
.pylintrc ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [MASTER]
2
+
3
+ # A comma-separated list of package or module names from where C extensions may
4
+ # be loaded. Extensions are loading into the active Python interpreter and may
5
+ # run arbitrary code.
6
+ extension-pkg-whitelist=
7
+
8
+ # Add files or directories to the blacklist. They should be base names, not
9
+ # paths.
10
+ ignore=CVS
11
+
12
+ # Add files or directories matching the regex patterns to the blacklist. The
13
+ # regex matches against base names, not paths.
14
+ ignore-patterns=
15
+
16
+ # Python code to execute, usually for sys.path manipulation such as
17
+ # pygtk.require().
18
+ #init-hook=
19
+
20
+ # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
21
+ # number of processors available to use.
22
+ jobs=1
23
+
24
+ # Control the amount of potential inferred values when inferring a single
25
+ # object. This can help the performance when dealing with large functions or
26
+ # complex, nested conditions.
27
+ limit-inference-results=100
28
+
29
+ # List of plugins (as comma separated values of python modules names) to load,
30
+ # usually to register additional checkers.
31
+ load-plugins=
32
+
33
+ # Pickle collected data for later comparisons.
34
+ persistent=yes
35
+
36
+ # Specify a configuration file.
37
+ #rcfile=
38
+
39
+ # When enabled, pylint would attempt to guess common misconfiguration and emit
40
+ # user-friendly hints instead of false-positive error messages.
41
+ suggestion-mode=yes
42
+
43
+ # Allow loading of arbitrary C extensions. Extensions are imported into the
44
+ # active Python interpreter and may run arbitrary code.
45
+ unsafe-load-any-extension=no
46
+
47
+
48
+ [MESSAGES CONTROL]
49
+
50
+ # Only show warnings with the listed confidence levels. Leave empty to show
51
+ # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED.
52
+ confidence=
53
+
54
+ # Disable the message, report, category or checker with the given id(s). You
55
+ # can either give multiple identifiers separated by comma (,) or put this
56
+ # option multiple times (only on the command line, not in the configuration
57
+ # file where it should appear only once). You can also use "--disable=all" to
58
+ # disable everything first and then reenable specific checks. For example, if
59
+ # you want to run only the similarities checker, you can use "--disable=all
60
+ # --enable=similarities". If you want to run only the classes checker, but have
61
+ # no Warning level messages displayed, use "--disable=all --enable=classes
62
+ # --disable=W".
63
+ disable=missing-docstring,
64
+ too-many-public-methods,
65
+ too-many-lines,
66
+ bare-except,
67
+ ## for avoiding weird p3.6 CI linter error
68
+ ## TODO: see later if we can remove this
69
+ assigning-non-slot,
70
+ unsupported-assignment-operation,
71
+ ## end
72
+ line-too-long,
73
+ fixme,
74
+ wrong-import-order,
75
+ ungrouped-imports,
76
+ wrong-import-position,
77
+ import-error,
78
+ invalid-name,
79
+ too-many-instance-attributes,
80
+ arguments-differ,
81
+ arguments-renamed,
82
+ no-name-in-module,
83
+ no-member,
84
+ unsubscriptable-object,
85
+ raw-checker-failed,
86
+ bad-inline-option,
87
+ locally-disabled,
88
+ file-ignored,
89
+ suppressed-message,
90
+ useless-suppression,
91
+ deprecated-pragma,
92
+ use-symbolic-message-instead,
93
+ useless-object-inheritance,
94
+ too-few-public-methods,
95
+ too-many-branches,
96
+ too-many-arguments,
97
+ too-many-locals,
98
+ too-many-statements,
99
+ duplicate-code,
100
+ not-callable,
101
+ import-outside-toplevel,
102
+ logging-fstring-interpolation,
103
+ logging-not-lazy,
104
+ unused-argument,
105
+ no-else-return,
106
+ chained-comparison,
107
+ redefined-outer-name
108
+
109
+ # Enable the message, report, category or checker with the given id(s). You can
110
+ # either give multiple identifier separated by comma (,) or put this option
111
+ # multiple time (only on the command line, not in the configuration file where
112
+ # it should appear only once). See also the "--disable" option for examples.
113
+ enable=c-extension-no-member
114
+
115
+
116
+ [REPORTS]
117
+
118
+ # Python expression which should return a note less than 10 (10 is the highest
119
+ # note). You have access to the variables errors warning, statement which
120
+ # respectively contain the number of errors / warnings messages and the total
121
+ # number of statements analyzed. This is used by the global evaluation report
122
+ # (RP0004).
123
+ evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
124
+
125
+ # Template used to display messages. This is a python new-style format string
126
+ # used to format the message information. See doc for all details.
127
+ #msg-template=
128
+
129
+ # Set the output format. Available formats are text, parseable, colorized, json
130
+ # and msvs (visual studio). You can also give a reporter class, e.g.
131
+ # mypackage.mymodule.MyReporterClass.
132
+ output-format=text
133
+
134
+ # Tells whether to display a full report or only the messages.
135
+ reports=no
136
+
137
+ # Activate the evaluation score.
138
+ score=yes
139
+
140
+
141
+ [REFACTORING]
142
+
143
+ # Maximum number of nested blocks for function / method body
144
+ max-nested-blocks=5
145
+
146
+ # Complete name of functions that never returns. When checking for
147
+ # inconsistent-return-statements if a never returning function is called then
148
+ # it will be considered as an explicit return statement and no message will be
149
+ # printed.
150
+ never-returning-functions=sys.exit
151
+
152
+
153
+ [LOGGING]
154
+
155
+ # Format style used to check logging format string. `old` means using %
156
+ # formatting, while `new` is for `{}` formatting.
157
+ logging-format-style=old
158
+
159
+ # Logging modules to check that the string format arguments are in logging
160
+ # function parameter format.
161
+ logging-modules=logging
162
+
163
+
164
+ [SPELLING]
165
+
166
+ # Limits count of emitted suggestions for spelling mistakes.
167
+ max-spelling-suggestions=4
168
+
169
+ # Spelling dictionary name. Available dictionaries: none. To make it working
170
+ # install python-enchant package..
171
+ spelling-dict=
172
+
173
+ # List of comma separated words that should not be checked.
174
+ spelling-ignore-words=
175
+
176
+ # A path to a file that contains private dictionary; one word per line.
177
+ spelling-private-dict-file=
178
+
179
+ # Tells whether to store unknown words to indicated private dictionary in
180
+ # --spelling-private-dict-file option instead of raising a message.
181
+ spelling-store-unknown-words=no
182
+
183
+
184
+ [MISCELLANEOUS]
185
+
186
+ # List of note tags to take in consideration, separated by a comma.
187
+ notes=FIXME,
188
+ XXX,
189
+ TODO
190
+
191
+
192
+ [TYPECHECK]
193
+
194
+ # List of decorators that produce context managers, such as
195
+ # contextlib.contextmanager. Add to this list to register other decorators that
196
+ # produce valid context managers.
197
+ contextmanager-decorators=contextlib.contextmanager
198
+
199
+ # List of members which are set dynamically and missed by pylint inference
200
+ # system, and so shouldn't trigger E1101 when accessed. Python regular
201
+ # expressions are accepted.
202
+ generated-members=numpy.*,torch.*
203
+
204
+ # Tells whether missing members accessed in mixin class should be ignored. A
205
+ # mixin class is detected if its name ends with "mixin" (case insensitive).
206
+ ignore-mixin-members=yes
207
+
208
+ # Tells whether to warn about missing members when the owner of the attribute
209
+ # is inferred to be None.
210
+ ignore-none=yes
211
+
212
+ # This flag controls whether pylint should warn about no-member and similar
213
+ # checks whenever an opaque object is returned when inferring. The inference
214
+ # can return multiple potential results while evaluating a Python object, but
215
+ # some branches might not be evaluated, which results in partial inference. In
216
+ # that case, it might be useful to still emit no-member and other checks for
217
+ # the rest of the inferred objects.
218
+ ignore-on-opaque-inference=yes
219
+
220
+ # List of class names for which member attributes should not be checked (useful
221
+ # for classes with dynamically set attributes). This supports the use of
222
+ # qualified names.
223
+ ignored-classes=optparse.Values,thread._local,_thread._local
224
+
225
+ # List of module names for which member attributes should not be checked
226
+ # (useful for modules/projects where namespaces are manipulated during runtime
227
+ # and thus existing member attributes cannot be deduced by static analysis. It
228
+ # supports qualified module names, as well as Unix pattern matching.
229
+ ignored-modules=
230
+
231
+ # Show a hint with possible names when a member name was not found. The aspect
232
+ # of finding the hint is based on edit distance.
233
+ missing-member-hint=yes
234
+
235
+ # The minimum edit distance a name should have in order to be considered a
236
+ # similar match for a missing member name.
237
+ missing-member-hint-distance=1
238
+
239
+ # The total number of similar names that should be taken in consideration when
240
+ # showing a hint for a missing member.
241
+ missing-member-max-choices=1
242
+
243
+
244
+ [VARIABLES]
245
+
246
+ # List of additional names supposed to be defined in builtins. Remember that
247
+ # you should avoid defining new builtins when possible.
248
+ additional-builtins=
249
+
250
+ # Tells whether unused global variables should be treated as a violation.
251
+ allow-global-unused-variables=yes
252
+
253
+ # List of strings which can identify a callback function by name. A callback
254
+ # name must start or end with one of those strings.
255
+ callbacks=cb_,
256
+ _cb
257
+
258
+ # A regular expression matching the name of dummy variables (i.e. expected to
259
+ # not be used).
260
+ dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
261
+
262
+ # Argument names that match this expression will be ignored. Default to name
263
+ # with leading underscore.
264
+ ignored-argument-names=_.*|^ignored_|^unused_
265
+
266
+ # Tells whether we should check for unused import in __init__ files.
267
+ init-import=no
268
+
269
+ # List of qualified module names which can have objects that can redefine
270
+ # builtins.
271
+ redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
272
+
273
+
274
+ [FORMAT]
275
+
276
+ # Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
277
+ expected-line-ending-format=
278
+
279
+ # Regexp for a line that is allowed to be longer than the limit.
280
+ ignore-long-lines=^\s*(# )?<?https?://\S+>?$
281
+
282
+ # Number of spaces of indent required inside a hanging or continued line.
283
+ indent-after-paren=4
284
+
285
+ # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
286
+ # tab).
287
+ indent-string=' '
288
+
289
+ # Maximum number of characters on a single line.
290
+ max-line-length=120
291
+
292
+ # Maximum number of lines in a module.
293
+ max-module-lines=1000
294
+
295
+ # Allow the body of a class to be on the same line as the declaration if body
296
+ # contains single statement.
297
+ single-line-class-stmt=no
298
+
299
+ # Allow the body of an if to be on the same line as the test if there is no
300
+ # else.
301
+ single-line-if-stmt=no
302
+
303
+
304
+ [SIMILARITIES]
305
+
306
+ # Ignore comments when computing similarities.
307
+ ignore-comments=yes
308
+
309
+ # Ignore docstrings when computing similarities.
310
+ ignore-docstrings=yes
311
+
312
+ # Ignore imports when computing similarities.
313
+ ignore-imports=no
314
+
315
+ # Minimum lines number of a similarity.
316
+ min-similarity-lines=4
317
+
318
+
319
+ [BASIC]
320
+
321
+ # Naming style matching correct argument names.
322
+ argument-naming-style=snake_case
323
+
324
+ # Regular expression matching correct argument names. Overrides argument-
325
+ # naming-style.
326
+ argument-rgx=[a-z_][a-z0-9_]{0,30}$
327
+
328
+ # Naming style matching correct attribute names.
329
+ attr-naming-style=snake_case
330
+
331
+ # Regular expression matching correct attribute names. Overrides attr-naming-
332
+ # style.
333
+ #attr-rgx=
334
+
335
+ # Bad variable names which should always be refused, separated by a comma.
336
+ bad-names=
337
+
338
+ # Naming style matching correct class attribute names.
339
+ class-attribute-naming-style=any
340
+
341
+ # Regular expression matching correct class attribute names. Overrides class-
342
+ # attribute-naming-style.
343
+ #class-attribute-rgx=
344
+
345
+ # Naming style matching correct class names.
346
+ class-naming-style=PascalCase
347
+
348
+ # Regular expression matching correct class names. Overrides class-naming-
349
+ # style.
350
+ #class-rgx=
351
+
352
+ # Naming style matching correct constant names.
353
+ const-naming-style=UPPER_CASE
354
+
355
+ # Regular expression matching correct constant names. Overrides const-naming-
356
+ # style.
357
+ #const-rgx=
358
+
359
+ # Minimum line length for functions/classes that require docstrings, shorter
360
+ # ones are exempt.
361
+ docstring-min-length=-1
362
+
363
+ # Naming style matching correct function names.
364
+ function-naming-style=snake_case
365
+
366
+ # Regular expression matching correct function names. Overrides function-
367
+ # naming-style.
368
+ #function-rgx=
369
+
370
+ # Good variable names which should always be accepted, separated by a comma.
371
+ good-names=i,
372
+ j,
373
+ k,
374
+ x,
375
+ ex,
376
+ Run,
377
+ _
378
+
379
+ # Include a hint for the correct naming format with invalid-name.
380
+ include-naming-hint=no
381
+
382
+ # Naming style matching correct inline iteration names.
383
+ inlinevar-naming-style=any
384
+
385
+ # Regular expression matching correct inline iteration names. Overrides
386
+ # inlinevar-naming-style.
387
+ #inlinevar-rgx=
388
+
389
+ # Naming style matching correct method names.
390
+ method-naming-style=snake_case
391
+
392
+ # Regular expression matching correct method names. Overrides method-naming-
393
+ # style.
394
+ #method-rgx=
395
+
396
+ # Naming style matching correct module names.
397
+ module-naming-style=snake_case
398
+
399
+ # Regular expression matching correct module names. Overrides module-naming-
400
+ # style.
401
+ #module-rgx=
402
+
403
+ # Colon-delimited sets of names that determine each other's naming style when
404
+ # the name regexes allow several styles.
405
+ name-group=
406
+
407
+ # Regular expression which should only match function or class names that do
408
+ # not require a docstring.
409
+ no-docstring-rgx=^_
410
+
411
+ # List of decorators that produce properties, such as abc.abstractproperty. Add
412
+ # to this list to register other decorators that produce valid properties.
413
+ # These decorators are taken in consideration only for invalid-name.
414
+ property-classes=abc.abstractproperty
415
+
416
+ # Naming style matching correct variable names.
417
+ variable-naming-style=snake_case
418
+
419
+ # Regular expression matching correct variable names. Overrides variable-
420
+ # naming-style.
421
+ variable-rgx=[a-z_][a-z0-9_]{0,30}$
422
+
423
+
424
+ [STRING]
425
+
426
+ # This flag controls whether the implicit-str-concat-in-sequence should
427
+ # generate a warning on implicit string concatenation in sequences defined over
428
+ # several lines.
429
+ check-str-concat-over-line-jumps=no
430
+
431
+
432
+ [IMPORTS]
433
+
434
+ # Allow wildcard imports from modules that define __all__.
435
+ allow-wildcard-with-all=no
436
+
437
+ # Analyse import fallback blocks. This can be used to support both Python 2 and
438
+ # 3 compatible code, which means that the block might have code that exists
439
+ # only in one or another interpreter, leading to false positives when analysed.
440
+ analyse-fallback-blocks=no
441
+
442
+ # Deprecated modules which should not be used, separated by a comma.
443
+ deprecated-modules=optparse,tkinter.tix
444
+
445
+ # Create a graph of external dependencies in the given file (report RP0402 must
446
+ # not be disabled).
447
+ ext-import-graph=
448
+
449
+ # Create a graph of every (i.e. internal and external) dependencies in the
450
+ # given file (report RP0402 must not be disabled).
451
+ import-graph=
452
+
453
+ # Create a graph of internal dependencies in the given file (report RP0402 must
454
+ # not be disabled).
455
+ int-import-graph=
456
+
457
+ # Force import order to recognize a module as part of the standard
458
+ # compatibility libraries.
459
+ known-standard-library=
460
+
461
+ # Force import order to recognize a module as part of a third party library.
462
+ known-third-party=enchant
463
+
464
+
465
+ [CLASSES]
466
+
467
+ # List of method names used to declare (i.e. assign) instance attributes.
468
+ defining-attr-methods=__init__,
469
+ __new__,
470
+ setUp
471
+
472
+ # List of member names, which should be excluded from the protected access
473
+ # warning.
474
+ exclude-protected=_asdict,
475
+ _fields,
476
+ _replace,
477
+ _source,
478
+ _make
479
+
480
+ # List of valid names for the first argument in a class method.
481
+ valid-classmethod-first-arg=cls
482
+
483
+ # List of valid names for the first argument in a metaclass class method.
484
+ valid-metaclass-classmethod-first-arg=cls
485
+
486
+
487
+ [DESIGN]
488
+
489
+ # Maximum number of arguments for function / method.
490
+ max-args=5
491
+
492
+ # Maximum number of attributes for a class (see R0902).
493
+ max-attributes=7
494
+
495
+ # Maximum number of boolean expressions in an if statement.
496
+ max-bool-expr=5
497
+
498
+ # Maximum number of branch for function / method body.
499
+ max-branches=12
500
+
501
+ # Maximum number of locals for function / method body.
502
+ max-locals=15
503
+
504
+ # Maximum number of parents for a class (see R0901).
505
+ max-parents=15
506
+
507
+ # Maximum number of public methods for a class (see R0904).
508
+ max-public-methods=20
509
+
510
+ # Maximum number of return / yield for function / method body.
511
+ max-returns=6
512
+
513
+ # Maximum number of statements in function / method body.
514
+ max-statements=50
515
+
516
+ # Minimum number of public methods for a class (see R0903).
517
+ min-public-methods=2
518
+
519
+
520
+ [EXCEPTIONS]
521
+
522
+ # Exceptions that will emit a warning when being caught. Defaults to
523
+ # "BaseException, Exception".
524
+ overgeneral-exceptions=builtins.BaseException,
525
+ builtins.Exception
Create_dataset/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+
Create_dataset/cr_dataset_script.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ from datasets import load_dataset
4
+ from datasets import Dataset, DatasetDict
5
+ from IPython.display import Audio
6
+ import scipy
7
+ import librosa
8
+ from tqdm import tqdm
9
+ import re
10
+ import os
11
+
12
+
13
+ def load_audio(audio_dict:dict)->None:
14
+ target_sr = 22050
15
+ audio_resampled = librosa.resample(np.array(audio_dict['array']),
16
+ orig_sr=audio_dict['sampling_rate'],
17
+ target_sr=target_sr)
18
+ scipy.io.wavfile.write(audio_dict['path'],
19
+ rate=target_sr,
20
+ data=(audio_resampled* 32767).astype(np.int16))
21
+
22
+ def remove_outer_quotes_regex(sen:str)->str:
23
+ return re.sub(r'^["\'](.*)["\']$', r'\1', sen)
24
+
25
+ def main()->None:
26
+ os.mkdir('kany_dataset')
27
+ os.chdir('kany_dataset')
28
+ os.mkdir('wavs')
29
+ os.chdir('wavs')
30
+
31
+
32
+ art = """
33
+ /\_/\
34
+ ( o.o )
35
+ > ^ <
36
+
37
+ V O I C E
38
+ """
39
+ print(art)
40
+
41
+ print('--- LOADING DATASET ---')
42
+ dataset_kany = load_dataset("Simonlob/Kany_dataset_mk4")
43
+
44
+ # mk TRAIN
45
+ print()
46
+ print('--- CONVERTIND AND SAVING THE TRAIN DATASET ---')
47
+ num_shards=20
48
+ path = []
49
+ text = []
50
+
51
+ with tqdm(total=len(dataset_kany['train']), leave=False) as pbar:
52
+ for ind in range(num_shards):
53
+ dataset_shard = dataset_kany['train'].shard(num_shards=num_shards, index=ind)
54
+ for row in dataset_shard:
55
+ load_audio(row['audio'])
56
+ path.append(row['audio']['path'])
57
+ text.append(row['raw_transcription'])
58
+ pbar.update(1)
59
+
60
+
61
+ absolute_path = os.path.abspath('../')
62
+ os.chdir(absolute_path)
63
+
64
+ dir = f'{absolute_path}/wavs/'
65
+ df = pd.DataFrame({'path':path, 'text':text})
66
+ df.text = df.text.map(remove_outer_quotes_regex)
67
+ df.path = dir + df.path
68
+ df.to_csv('kany_filelist_train.txt', sep='|', header=None, index=False)
69
+
70
+ # mk TEST
71
+ os.chdir(dir)
72
+ path = []
73
+ text = []
74
+ print()
75
+ print('--- CONVERTIND AND SAVING THE TEST DATASET ---')
76
+ with tqdm(total=len(dataset_kany['test']), leave=False) as pbar2:
77
+ for row in tqdm(dataset_kany['test']):
78
+ load_audio(row['audio'])
79
+ path.append(row['audio']['path'])
80
+ text.append(row['raw_transcription'])
81
+ pbar2.update(1)
82
+
83
+ os.chdir(absolute_path)
84
+ df = pd.DataFrame({'path':path, 'text':text})
85
+ df.text = df.text.map(remove_outer_quotes_regex)
86
+ df.path = dir + df.path
87
+ df.to_csv('kany_filelist_test.txt', sep='|', header=None, index=False)
88
+ print()
89
+ print('--- THE DATASET IS READY ---')
90
+ print(f'Dir of data is "{absolute_path}"')
91
+
92
+ absolute_path_home = os.path.abspath('../')
93
+ os.chdir(absolute_path_home)
94
+
95
+
96
+ if __name__ == "__main__":
97
+ main()
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Shivam Mehta
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
MANIFEST.in ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include README.md
2
+ include LICENSE.txt
3
+ include requirements.*.txt
4
+ include *.cff
5
+ include requirements.txt
6
+ include matcha/VERSION
7
+ recursive-include matcha *.json
8
+ recursive-include matcha *.html
9
+ recursive-include matcha *.png
10
+ recursive-include matcha *.md
11
+ recursive-include matcha *.py
12
+ recursive-include matcha *.pyx
13
+ recursive-exclude tests *
14
+ prune tests*
Makefile ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ help: ## Show help
3
+ @grep -E '^[.a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
4
+
5
+ clean: ## Clean autogenerated files
6
+ rm -rf dist
7
+ find . -type f -name "*.DS_Store" -ls -delete
8
+ find . | grep -E "(__pycache__|\.pyc|\.pyo)" | xargs rm -rf
9
+ find . | grep -E ".pytest_cache" | xargs rm -rf
10
+ find . | grep -E ".ipynb_checkpoints" | xargs rm -rf
11
+ rm -f .coverage
12
+
13
+ clean-logs: ## Clean logs
14
+ rm -rf logs/**
15
+
16
+ create-package: ## Create wheel and tar gz
17
+ rm -rf dist/
18
+ python setup.py bdist_wheel --plat-name=manylinux1_x86_64
19
+ python setup.py sdist
20
+ python -m twine upload dist/* --verbose --skip-existing
21
+
22
+ format: ## Run pre-commit hooks
23
+ pre-commit run -a
24
+
25
+ sync: ## Merge changes from main branch to your current branch
26
+ git pull
27
+ git pull origin main
28
+
29
+ test: ## Run not slow tests
30
+ pytest -k "not slow"
31
+
32
+ test-full: ## Run all tests
33
+ pytest
34
+
35
+ train-ljspeech: ## Train the model
36
+ python matcha/train.py experiment=ljspeech
37
+
38
+ train-ljspeech-min: ## Train the model with minimum memory
39
+ python matcha/train.py experiment=ljspeech_min_memory
40
+
41
+ start_app: ## Start the app
42
+ python matcha/app.py
README.md CHANGED
@@ -1,13 +1,70 @@
1
- ---
2
- title: Akylai Tts Mini
3
- emoji: 📈
4
- colorFrom: gray
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 4.26.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+
3
+
4
+
5
+ # 🍵 Matcha-TTS: A fast TTS architecture with conditional flow matching
6
+
7
+ ### [Shivam Mehta](https://www.kth.se/profile/smehta), [Ruibo Tu](https://www.kth.se/profile/ruibo), [Jonas Beskow](https://www.kth.se/profile/beskow), [Éva Székely](https://www.kth.se/profile/szekely), and [Gustav Eje Henter](https://people.kth.se/~ghe/)
8
+
9
+ [![python](https://img.shields.io/badge/-Python_3.10-blue?logo=python&logoColor=white)](https://www.python.org/downloads/release/python-3100/)
10
+ [![pytorch](https://img.shields.io/badge/PyTorch_2.0+-ee4c2c?logo=pytorch&logoColor=white)](https://pytorch.org/get-started/locally/)
11
+ [![lightning](https://img.shields.io/badge/-Lightning_2.0+-792ee5?logo=pytorchlightning&logoColor=white)](https://pytorchlightning.ai/)
12
+ [![hydra](https://img.shields.io/badge/Config-Hydra_1.3-89b8cd)](https://hydra.cc/)
13
+ [![black](https://img.shields.io/badge/Code%20Style-Black-black.svg?labelColor=gray)](https://black.readthedocs.io/en/stable/)
14
+ [![isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/)
15
+
16
+ <p style="text-align: center;">
17
+ <img src="https://shivammehta25.github.io/Matcha-TTS/images/logo.png" height="128"/>
18
+ </p>
19
+
20
+ </div>
21
+
22
+
23
+
24
+ # Matcha-TTS for Kyrgyz language
25
+
26
+ ## Train with Kany Dataset
27
+
28
+ The training dataset has 7016 samples and 13 hours of speech. All settings for training have already been made.
29
+
30
+ ## Process by Terminal
31
+
32
+ * **Load this repo and connect to HF**
33
+
34
+ ```
35
+ git clone https://github.com/simonlobgromov/Matcha-TTS
36
+ cd Matcha-TTS
37
+ pip install -e .
38
+ ```
39
+ !!!The environment will be restarted!!!
40
+
41
+ Install this:
42
+
43
+ ```
44
+ apt-get install espeak-ng
45
+ ```
46
+ Connect to HF
47
+
48
+ ```
49
+ git config --global credential.helper store
50
+ huggingface-cli login
51
+ ```
52
+
53
+ * **Load the Data**
54
+
55
+ ```
56
+ create-dataset
57
+
58
+ # If you see a cat, then everything is fine!
59
+ ```
60
+
61
+ * **Train**
62
+
63
+ ```
64
+ python matcha/train.py experiment=akylai
65
+ ```
66
+
67
+ * **Checkpoints**
68
+
69
+ Checkpoints will be saved in `./Matcha-TTS/logs/train/akylai/runs/<DATE>_<TIME>/checkpoints`. Unload them or select the last few checkpoints.
70
+
app.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tempfile
2
+ from argparse import Namespace
3
+ from pathlib import Path
4
+
5
+ import gradio as gr
6
+ import soundfile as sf
7
+ import torch
8
+
9
+ from matcha.cli import (
10
+ MATCHA_URLS,
11
+ VOCODER_URLS,
12
+ assert_model_downloaded,
13
+ get_device,
14
+ load_matcha,
15
+ load_vocoder,
16
+ process_text,
17
+ to_waveform,
18
+ )
19
+ from matcha.utils.utils import get_user_data_dir, plot_tensor
20
+
21
+ LOCATION = Path(get_user_data_dir())
22
+
23
+ args = Namespace(
24
+ cpu=False,
25
+ model="matcha_vctk",
26
+ vocoder="hifigan_univ_v1",
27
+ spk=0,
28
+ )
29
+
30
+ CURRENTLY_LOADED_MODEL = args.model
31
+
32
+
33
+ def MATCHA_TTS_LOC(x):
34
+ return LOCATION / f"{x}.ckpt"
35
+
36
+
37
+ def VOCODER_LOC(x):
38
+ return LOCATION / f"{x}"
39
+
40
+
41
+ LOGO_URL = "https://shivammehta25.github.io/Matcha-TTS/images/logo.png"
42
+ RADIO_OPTIONS = {
43
+ "Multi Speaker (VCTK)": {
44
+ "model": "matcha_vctk",
45
+ "vocoder": "hifigan_univ_v1",
46
+ },
47
+ "Single Speaker (LJ Speech)": {
48
+ "model": "akyl_ai",
49
+ "vocoder": "hifigan_T2_v1",
50
+ },
51
+ }
52
+
53
+ # Ensure all the required models are downloaded
54
+ assert_model_downloaded(MATCHA_TTS_LOC("akyl_ai"), MATCHA_URLS["akyl_ai"])
55
+ assert_model_downloaded(VOCODER_LOC("hifigan_T2_v1"), VOCODER_URLS["hifigan_T2_v1"])
56
+ assert_model_downloaded(MATCHA_TTS_LOC("matcha_vctk"), MATCHA_URLS["matcha_vctk"])
57
+ assert_model_downloaded(VOCODER_LOC("hifigan_univ_v1"), VOCODER_URLS["hifigan_univ_v1"])
58
+
59
+ device = get_device(args)
60
+
61
+ # Load default model
62
+ model = load_matcha(args.model, MATCHA_TTS_LOC(args.model), device)
63
+ vocoder, denoiser = load_vocoder(args.vocoder, VOCODER_LOC(args.vocoder), device)
64
+
65
+
66
+ def load_model(model_name, vocoder_name):
67
+ model = load_matcha(model_name, MATCHA_TTS_LOC(model_name), device)
68
+ vocoder, denoiser = load_vocoder(vocoder_name, VOCODER_LOC(vocoder_name), device)
69
+ return model, vocoder, denoiser
70
+
71
+
72
+ def load_model_ui(model_type, textbox):
73
+ model_name, vocoder_name = RADIO_OPTIONS[model_type]["model"], RADIO_OPTIONS[model_type]["vocoder"]
74
+
75
+ global model, vocoder, denoiser, CURRENTLY_LOADED_MODEL # pylint: disable=global-statement
76
+ if CURRENTLY_LOADED_MODEL != model_name:
77
+ model, vocoder, denoiser = load_model(model_name, vocoder_name)
78
+ CURRENTLY_LOADED_MODEL = model_name
79
+
80
+ if model_name == "matcha_ljspeech":
81
+ spk_slider = gr.update(visible=False, value=-1)
82
+ single_speaker_examples = gr.update(visible=True)
83
+ multi_speaker_examples = gr.update(visible=False)
84
+ length_scale = gr.update(value=0.95)
85
+ else:
86
+ spk_slider = gr.update(visible=True, value=0)
87
+ single_speaker_examples = gr.update(visible=False)
88
+ multi_speaker_examples = gr.update(visible=True)
89
+ length_scale = gr.update(value=0.85)
90
+
91
+ return (
92
+ textbox,
93
+ gr.update(interactive=True),
94
+ spk_slider,
95
+ single_speaker_examples,
96
+ multi_speaker_examples,
97
+ length_scale,
98
+ )
99
+
100
+
101
+ @torch.inference_mode()
102
+ def process_text_gradio(text):
103
+ output = process_text(1, text, device)
104
+ return output["x_phones"][1::2], output["x"], output["x_lengths"]
105
+
106
+
107
+ @torch.inference_mode()
108
+ def synthesise_mel(text, text_length, n_timesteps, temperature, length_scale, spk):
109
+ spk = torch.tensor([spk], device=device, dtype=torch.long) if spk >= 0 else None
110
+ output = model.synthesise(
111
+ text,
112
+ text_length,
113
+ n_timesteps=n_timesteps,
114
+ temperature=temperature,
115
+ spks=spk,
116
+ length_scale=length_scale,
117
+ )
118
+ output["waveform"] = to_waveform(output["mel"], vocoder, denoiser)
119
+ with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
120
+ sf.write(fp.name, output["waveform"], 22050, "PCM_24")
121
+
122
+ return fp.name, plot_tensor(output["mel"].squeeze().cpu().numpy())
123
+
124
+
125
+ def multispeaker_example_cacher(text, n_timesteps, mel_temp, length_scale, spk):
126
+ global CURRENTLY_LOADED_MODEL # pylint: disable=global-statement
127
+ if CURRENTLY_LOADED_MODEL != "matcha_vctk":
128
+ global model, vocoder, denoiser # pylint: disable=global-statement
129
+ model, vocoder, denoiser = load_model("matcha_vctk", "hifigan_univ_v1")
130
+ CURRENTLY_LOADED_MODEL = "matcha_vctk"
131
+
132
+ phones, text, text_lengths = process_text_gradio(text)
133
+ audio, mel_spectrogram = synthesise_mel(text, text_lengths, n_timesteps, mel_temp, length_scale, spk)
134
+ return phones, audio, mel_spectrogram
135
+
136
+
137
+ def ljspeech_example_cacher(text, n_timesteps, mel_temp, length_scale, spk=-1):
138
+ global CURRENTLY_LOADED_MODEL # pylint: disable=global-statement
139
+ if CURRENTLY_LOADED_MODEL != "akyl_ai":
140
+ global model, vocoder, denoiser # pylint: disable=global-statement
141
+ model, vocoder, denoiser = load_model("akyl_ai", "hifigan_T2_v1")
142
+ CURRENTLY_LOADED_MODEL = "akyl_ai"
143
+
144
+ phones, text, text_lengths = process_text_gradio(text)
145
+ audio, mel_spectrogram = synthesise_mel(text, text_lengths, n_timesteps, mel_temp, length_scale, spk)
146
+ return phones, audio, mel_spectrogram
147
+
148
+
149
+ def main():
150
+ description = """# 🍵 Matcha-TTS: A fast TTS architecture with conditional flow matching
151
+ ### [Shivam Mehta](https://www.kth.se/profile/smehta), [Ruibo Tu](https://www.kth.se/profile/ruibo), [Jonas Beskow](https://www.kth.se/profile/beskow), [Éva Székely](https://www.kth.se/profile/szekely), and [Gustav Eje Henter](https://people.kth.se/~ghe/)
152
+ We propose 🍵 Matcha-TTS, a new approach to non-autoregressive neural TTS, that uses conditional flow matching (similar to rectified flows) to speed up ODE-based speech synthesis. Our method:
153
+
154
+
155
+ * Is probabilistic
156
+ * Has compact memory footprint
157
+ * Sounds highly natural
158
+ * Is very fast to synthesise from
159
+
160
+
161
+ Check out our [demo page](https://shivammehta25.github.io/Matcha-TTS). Read our [arXiv preprint for more details](https://arxiv.org/abs/2309.03199).
162
+ Code is available in our [GitHub repository](https://github.com/shivammehta25/Matcha-TTS), along with pre-trained models.
163
+
164
+ Cached examples are available at the bottom of the page.
165
+ """
166
+
167
+ with gr.Blocks(title="🍵 Matcha-TTS: A fast TTS architecture with conditional flow matching") as demo:
168
+ processed_text = gr.State(value=None)
169
+ processed_text_len = gr.State(value=None)
170
+
171
+ with gr.Box():
172
+ with gr.Row():
173
+ gr.Markdown(description, scale=3)
174
+ with gr.Column():
175
+ gr.Image(LOGO_URL, label="Matcha-TTS logo", height=50, width=50, scale=1, show_label=False)
176
+ html = '<br><iframe width="560" height="315" src="https://www.youtube.com/embed/xmvJkz3bqw0?si=jN7ILyDsbPwJCGoa" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>'
177
+ gr.HTML(html)
178
+
179
+ with gr.Box():
180
+ radio_options = list(RADIO_OPTIONS.keys())
181
+ model_type = gr.Radio(
182
+ radio_options, value=radio_options[0], label="Choose a Model", interactive=True, container=False
183
+ )
184
+
185
+ with gr.Row():
186
+ gr.Markdown("# Text Input")
187
+ with gr.Row():
188
+ text = gr.Textbox(value="", lines=2, label="Text to synthesise", scale=3)
189
+ spk_slider = gr.Slider(
190
+ minimum=0, maximum=107, step=1, value=args.spk, label="Speaker ID", interactive=True, scale=1
191
+ )
192
+
193
+ with gr.Row():
194
+ gr.Markdown("### Hyper parameters")
195
+ with gr.Row():
196
+ n_timesteps = gr.Slider(
197
+ label="Number of ODE steps",
198
+ minimum=1,
199
+ maximum=100,
200
+ step=1,
201
+ value=10,
202
+ interactive=True,
203
+ )
204
+ length_scale = gr.Slider(
205
+ label="Length scale (Speaking rate)",
206
+ minimum=0.5,
207
+ maximum=1.5,
208
+ step=0.05,
209
+ value=1.0,
210
+ interactive=True,
211
+ )
212
+ mel_temp = gr.Slider(
213
+ label="Sampling temperature",
214
+ minimum=0.00,
215
+ maximum=2.001,
216
+ step=0.16675,
217
+ value=0.667,
218
+ interactive=True,
219
+ )
220
+
221
+ synth_btn = gr.Button("Synthesise")
222
+
223
+ with gr.Box():
224
+ with gr.Row():
225
+ gr.Markdown("### Phonetised text")
226
+ phonetised_text = gr.Textbox(interactive=False, scale=10, label="Phonetised text")
227
+
228
+ with gr.Box():
229
+ with gr.Row():
230
+ mel_spectrogram = gr.Image(interactive=False, label="mel spectrogram")
231
+
232
+ # with gr.Row():
233
+ audio = gr.Audio(interactive=False, label="Audio")
234
+
235
+ with gr.Row(visible=False) as example_row_lj_speech:
236
+ examples = gr.Examples( # pylint: disable=unused-variable
237
+ examples=[
238
+ [
239
+ "We propose Matcha-TTS, a new approach to non-autoregressive neural TTS, that uses conditional flow matching (similar to rectified flows) to speed up O D E-based speech synthesis.",
240
+ 50,
241
+ 0.677,
242
+ 0.95,
243
+ ],
244
+ [
245
+ "The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.",
246
+ 2,
247
+ 0.677,
248
+ 0.95,
249
+ ],
250
+ [
251
+ "The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.",
252
+ 4,
253
+ 0.677,
254
+ 0.95,
255
+ ],
256
+ [
257
+ "The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.",
258
+ 10,
259
+ 0.677,
260
+ 0.95,
261
+ ],
262
+
263
+ ],
264
+ fn=ljspeech_example_cacher,
265
+ inputs=[text, n_timesteps, mel_temp, length_scale],
266
+ outputs=[phonetised_text, audio, mel_spectrogram],
267
+ cache_examples=True,
268
+ )
269
+
270
+ with gr.Row() as example_row_multispeaker:
271
+ multi_speaker_examples = gr.Examples( # pylint: disable=unused-variable
272
+ examples=[
273
+ [
274
+ "Hello everyone! I am speaker 0 and I am here to tell you that Matcha-TTS is amazing!",
275
+ 10,
276
+ 0.677,
277
+ 0.85,
278
+ 0,
279
+ ],
280
+ [
281
+ "Hello everyone! I am speaker 16 and I am here to tell you that Matcha-TTS is amazing!",
282
+ 10,
283
+ 0.677,
284
+ 0.85,
285
+ 16,
286
+ ],
287
+ [
288
+ ],
289
+ fn=multispeaker_example_cacher,
290
+ inputs=[text, n_timesteps, mel_temp, length_scale, spk_slider],
291
+ outputs=[phonetised_text, audio, mel_spectrogram],
292
+ cache_examples=True,
293
+ label="Multi Speaker Examples",
294
+ )
295
+
296
+ model_type.change(lambda x: gr.update(interactive=False), inputs=[synth_btn], outputs=[synth_btn]).then(
297
+ load_model_ui,
298
+ inputs=[model_type, text],
299
+ outputs=[text, synth_btn, spk_slider, example_row_lj_speech, example_row_multispeaker, length_scale],
300
+ )
301
+
302
+ synth_btn.click(
303
+ fn=process_text_gradio,
304
+ inputs=[
305
+ text,
306
+ ],
307
+ outputs=[phonetised_text, processed_text, processed_text_len],
308
+ api_name="matcha_tts",
309
+ queue=True,
310
+ ).then(
311
+ fn=synthesise_mel,
312
+ inputs=[processed_text, processed_text_len, n_timesteps, mel_temp, length_scale, spk_slider],
313
+ outputs=[audio, mel_spectrogram],
314
+ )
315
+
316
+ demo.queue().launch(share=True)
317
+
318
+
319
+ if __name__ == "__main__":
320
+ main()
configs/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # this file is needed here to include configs when building project as a package
configs/callbacks/default.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ defaults:
2
+ - model_checkpoint.yaml
3
+ - model_summary.yaml
4
+ - rich_progress_bar.yaml
5
+ - _self_
configs/callbacks/model_checkpoint.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.ModelCheckpoint.html
2
+
3
+ model_checkpoint:
4
+ _target_: lightning.pytorch.callbacks.ModelCheckpoint
5
+ dirpath: ${paths.output_dir}/checkpoints # directory to save the model file
6
+ filename: checkpoint_{epoch:03d} # checkpoint filename
7
+ monitor: epoch # name of the logged metric which determines when model is improving
8
+ verbose: False # verbosity mode
9
+ save_last: true # additionally always save an exact copy of the last checkpoint to a file last.ckpt
10
+ save_top_k: 5 # save k best models (determined by above metric)
11
+ mode: "max" # "max" means higher metric value is better, can be also "min"
12
+ auto_insert_metric_name: True # when True, the checkpoints filenames will contain the metric name
13
+ save_weights_only: False # if True, then only the model’s weights will be saved
14
+ every_n_train_steps: null # number of training steps between checkpoints
15
+ train_time_interval: null # checkpoints are monitored at the specified time interval
16
+ every_n_epochs: 10 # number of epochs between checkpoints
17
+ save_on_train_epoch_end: null # whether to run checkpointing at the end of the training epoch or the end of validation
configs/callbacks/model_summary.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.RichModelSummary.html
2
+
3
+ model_summary:
4
+ _target_: lightning.pytorch.callbacks.RichModelSummary
5
+ max_depth: 3 # the maximum depth of layer nesting that the summary will include
configs/callbacks/none.yaml ADDED
File without changes
configs/callbacks/rich_progress_bar.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # https://lightning.ai/docs/pytorch/latest/api/lightning.pytorch.callbacks.RichProgressBar.html
2
+
3
+ rich_progress_bar:
4
+ _target_: lightning.pytorch.callbacks.RichProgressBar
configs/data/akylai.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _target_: matcha.data.text_mel_datamodule.TextMelDataModule
2
+ name: akylai
3
+ train_filelist_path: ./kany_dataset/kany_filelist_train.txt
4
+ valid_filelist_path: ./kany_dataset/kany_filelist_test.txt
5
+ batch_size: 32
6
+ num_workers: 20
7
+ pin_memory: True
8
+ cleaners: [kyrgyz_cleaners]
9
+ add_blank: True
10
+ n_spks: 1
11
+ n_fft: 1024
12
+ n_feats: 80
13
+ sample_rate: 22050
14
+ hop_length: 256
15
+ win_length: 1024
16
+ f_min: 0
17
+ f_max: 8000
18
+ data_statistics: # Computed for ljspeech dataset
19
+ mel_mean: -5.6814561
20
+ mel_std: 2.7337122
21
+ seed: ${seed}
configs/data/hi-fi_en-US_female.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - ljspeech
3
+ - _self_
4
+
5
+ # Dataset URL: https://ast-astrec.nict.go.jp/en/release/hi-fi-captain/
6
+ _target_: matcha.data.text_mel_datamodule.TextMelDataModule
7
+ name: hi-fi_en-US_female
8
+ train_filelist_path: data/filelists/hi-fi-captain-en-us-female_train.txt
9
+ valid_filelist_path: data/filelists/hi-fi-captain-en-us-female_val.txt
10
+ batch_size: 32
11
+ cleaners: [english_cleaners_piper]
12
+ data_statistics: # Computed for this dataset
13
+ mel_mean: -6.38385
14
+ mel_std: 2.541796
configs/data/ljspeech.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _target_: matcha.data.text_mel_datamodule.TextMelDataModule
2
+ name: ljspeech
3
+ train_filelist_path: /content/kany_dataset/kany_filelist_train.txt
4
+ valid_filelist_path: /content/kany_dataset/kany_filelist_test.txt
5
+ batch_size: 16
6
+ num_workers: 20
7
+ pin_memory: True
8
+ cleaners: [kyrgyz_cleaners]
9
+ add_blank: True
10
+ n_spks: 1
11
+ n_fft: 1024
12
+ n_feats: 80
13
+ sample_rate: 22050
14
+ hop_length: 256
15
+ win_length: 1024
16
+ f_min: 0
17
+ f_max: 8000
18
+ data_statistics: # Computed for ljspeech dataset
19
+ mel_mean: -5.68145561
20
+ mel_std: 2.7337122
21
+ seed: ${seed}
22
+
configs/data/vctk.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - ljspeech
3
+ - _self_
4
+
5
+ _target_: matcha.data.text_mel_datamodule.TextMelDataModule
6
+ name: vctk
7
+ train_filelist_path: data/filelists/vctk_audio_sid_text_train_filelist.txt
8
+ valid_filelist_path: data/filelists/vctk_audio_sid_text_val_filelist.txt
9
+ batch_size: 32
10
+ add_blank: True
11
+ n_spks: 109
12
+ data_statistics: # Computed for vctk dataset
13
+ mel_mean: -6.630575
14
+ mel_std: 2.482914
configs/debug/default.yaml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # default debugging setup, runs 1 full epoch
4
+ # other debugging configs can inherit from this one
5
+
6
+ # overwrite task name so debugging logs are stored in separate folder
7
+ task_name: "debug"
8
+
9
+ # disable callbacks and loggers during debugging
10
+ # callbacks: null
11
+ # logger: null
12
+
13
+ extras:
14
+ ignore_warnings: False
15
+ enforce_tags: False
16
+
17
+ # sets level of all command line loggers to 'DEBUG'
18
+ # https://hydra.cc/docs/tutorials/basic/running_your_app/logging/
19
+ hydra:
20
+ job_logging:
21
+ root:
22
+ level: DEBUG
23
+
24
+ # use this to also set hydra loggers to 'DEBUG'
25
+ # verbose: True
26
+
27
+ trainer:
28
+ max_epochs: 1
29
+ accelerator: cpu # debuggers don't like gpus
30
+ devices: 1 # debuggers don't like multiprocessing
31
+ detect_anomaly: true # raise exception if NaN or +/-inf is detected in any tensor
32
+
33
+ data:
34
+ num_workers: 0 # debuggers don't like multiprocessing
35
+ pin_memory: False # disable gpu memory pin
configs/debug/fdr.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # runs 1 train, 1 validation and 1 test step
4
+
5
+ defaults:
6
+ - default
7
+
8
+ trainer:
9
+ fast_dev_run: true
configs/debug/limit.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # uses only 1% of the training data and 5% of validation/test data
4
+
5
+ defaults:
6
+ - default
7
+
8
+ trainer:
9
+ max_epochs: 3
10
+ limit_train_batches: 0.01
11
+ limit_val_batches: 0.05
12
+ limit_test_batches: 0.05
configs/debug/overfit.yaml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # overfits to 3 batches
4
+
5
+ defaults:
6
+ - default
7
+
8
+ trainer:
9
+ max_epochs: 20
10
+ overfit_batches: 3
11
+
12
+ # model ckpt and early stopping need to be disabled during overfitting
13
+ callbacks: null
configs/debug/profiler.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # runs with execution time profiling
4
+
5
+ defaults:
6
+ - default
7
+
8
+ trainer:
9
+ max_epochs: 1
10
+ # profiler: "simple"
11
+ profiler: "advanced"
12
+ # profiler: "pytorch"
13
+ accelerator: gpu
14
+
15
+ limit_train_batches: 0.02
configs/eval.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ defaults:
4
+ - _self_
5
+ - data: akylai # choose datamodule with `test_dataloader()` for evaluation
6
+ - model: matcha
7
+ - logger: null
8
+ - trainer: default
9
+ - paths: default
10
+ - extras: default
11
+ - hydra: default
12
+
13
+ task_name: "eval"
14
+
15
+ tags: ["dev"]
16
+
17
+ # passing checkpoint path is necessary for evaluation
18
+ ckpt_path: ???
configs/experiment/akylai.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # to execute this experiment run:
4
+ # python train.py experiment=multispeaker
5
+
6
+ defaults:
7
+ - override /data: akylai.yaml
8
+
9
+ # all parameters below will be merged with parameters from default configurations set above
10
+ # this allows you to overwrite only specified parameters
11
+
12
+ tags: ["akylai"]
13
+
14
+ run_name: akylai
configs/experiment/hifi_dataset_piper_phonemizer.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # to execute this experiment run:
4
+ # python train.py experiment=multispeaker
5
+
6
+ defaults:
7
+ - override /data: hi-fi_en-US_female.yaml
8
+
9
+ # all parameters below will be merged with parameters from default configurations set above
10
+ # this allows you to overwrite only specified parameters
11
+
12
+ tags: ["hi-fi", "single_speaker", "piper_phonemizer", "en_US", "female"]
13
+
14
+ run_name: hi-fi_en-US_female_piper_phonemizer
configs/experiment/ljspeech.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # to execute this experiment run:
4
+ # python train.py experiment=multispeaker
5
+
6
+ defaults:
7
+ - override /data: ljspeech.yaml
8
+
9
+ # all parameters below will be merged with parameters from default configurations set above
10
+ # this allows you to overwrite only specified parameters
11
+
12
+ tags: ["ljspeech"]
13
+
14
+ run_name: ljspeech
configs/experiment/ljspeech_min_memory.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # to execute this experiment run:
4
+ # python train.py experiment=multispeaker
5
+
6
+ defaults:
7
+ - override /data: ljspeech.yaml
8
+
9
+ # all parameters below will be merged with parameters from default configurations set above
10
+ # this allows you to overwrite only specified parameters
11
+
12
+ tags: ["ljspeech"]
13
+
14
+ run_name: ljspeech_min
15
+
16
+
17
+ model:
18
+ out_size: 172
configs/experiment/multispeaker.yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # to execute this experiment run:
4
+ # python train.py experiment=multispeaker
5
+
6
+ defaults:
7
+ - override /data: vctk.yaml
8
+
9
+ # all parameters below will be merged with parameters from default configurations set above
10
+ # this allows you to overwrite only specified parameters
11
+
12
+ tags: ["multispeaker"]
13
+
14
+ run_name: multispeaker
configs/extras/default.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # disable python warnings if they annoy you
2
+ ignore_warnings: False
3
+
4
+ # ask user for tags if none are provided in the config
5
+ enforce_tags: True
6
+
7
+ # pretty print config tree at the start of the run using Rich library
8
+ print_config: True
configs/hparams_search/mnist_optuna.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # example hyperparameter optimization of some experiment with Optuna:
4
+ # python train.py -m hparams_search=mnist_optuna experiment=example
5
+
6
+ defaults:
7
+ - override /hydra/sweeper: optuna
8
+
9
+ # choose metric which will be optimized by Optuna
10
+ # make sure this is the correct name of some metric logged in lightning module!
11
+ optimized_metric: "val/acc_best"
12
+
13
+ # here we define Optuna hyperparameter search
14
+ # it optimizes for value returned from function with @hydra.main decorator
15
+ # docs: https://hydra.cc/docs/next/plugins/optuna_sweeper
16
+ hydra:
17
+ mode: "MULTIRUN" # set hydra to multirun by default if this config is attached
18
+
19
+ sweeper:
20
+ _target_: hydra_plugins.hydra_optuna_sweeper.optuna_sweeper.OptunaSweeper
21
+
22
+ # storage URL to persist optimization results
23
+ # for example, you can use SQLite if you set 'sqlite:///example.db'
24
+ storage: null
25
+
26
+ # name of the study to persist optimization results
27
+ study_name: null
28
+
29
+ # number of parallel workers
30
+ n_jobs: 1
31
+
32
+ # 'minimize' or 'maximize' the objective
33
+ direction: maximize
34
+
35
+ # total number of runs that will be executed
36
+ n_trials: 20
37
+
38
+ # choose Optuna hyperparameter sampler
39
+ # you can choose bayesian sampler (tpe), random search (without optimization), grid sampler, and others
40
+ # docs: https://optuna.readthedocs.io/en/stable/reference/samplers.html
41
+ sampler:
42
+ _target_: optuna.samplers.TPESampler
43
+ seed: 1234
44
+ n_startup_trials: 10 # number of random sampling runs before optimization starts
45
+
46
+ # define hyperparameter search space
47
+ params:
48
+ model.optimizer.lr: interval(0.0001, 0.1)
49
+ data.batch_size: choice(32, 64, 128, 256)
50
+ model.net.lin1_size: choice(64, 128, 256)
51
+ model.net.lin2_size: choice(64, 128, 256)
52
+ model.net.lin3_size: choice(32, 64, 128, 256)
configs/hydra/default.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://hydra.cc/docs/configure_hydra/intro/
2
+
3
+ # enable color logging
4
+ defaults:
5
+ - override hydra_logging: colorlog
6
+ - override job_logging: colorlog
7
+
8
+ # output directory, generated dynamically on each run
9
+ run:
10
+ dir: ${paths.log_dir}/${task_name}/${run_name}/runs/${now:%Y-%m-%d}_${now:%H-%M-%S}
11
+ sweep:
12
+ dir: ${paths.log_dir}/${task_name}/${run_name}/multiruns/${now:%Y-%m-%d}_${now:%H-%M-%S}
13
+ subdir: ${hydra.job.num}
14
+
15
+ job_logging:
16
+ handlers:
17
+ file:
18
+ # Incorporates fix from https://github.com/facebookresearch/hydra/pull/2242
19
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
configs/local/.gitkeep ADDED
File without changes
configs/logger/aim.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://aimstack.io/
2
+
3
+ # example usage in lightning module:
4
+ # https://github.com/aimhubio/aim/blob/main/examples/pytorch_lightning_track.py
5
+
6
+ # open the Aim UI with the following command (run in the folder containing the `.aim` folder):
7
+ # `aim up`
8
+
9
+ aim:
10
+ _target_: aim.pytorch_lightning.AimLogger
11
+ repo: ${paths.root_dir} # .aim folder will be created here
12
+ # repo: "aim://ip_address:port" # can instead provide IP address pointing to Aim remote tracking server which manages the repo, see https://aimstack.readthedocs.io/en/latest/using/remote_tracking.html#
13
+
14
+ # aim allows to group runs under experiment name
15
+ experiment: null # any string, set to "default" if not specified
16
+
17
+ train_metric_prefix: "train/"
18
+ val_metric_prefix: "val/"
19
+ test_metric_prefix: "test/"
20
+
21
+ # sets the tracking interval in seconds for system usage metrics (CPU, GPU, memory, etc.)
22
+ system_tracking_interval: 10 # set to null to disable system metrics tracking
23
+
24
+ # enable/disable logging of system params such as installed packages, git info, env vars, etc.
25
+ log_system_params: true
26
+
27
+ # enable/disable tracking console logs (default value is true)
28
+ capture_terminal_logs: false # set to false to avoid infinite console log loop issue https://github.com/aimhubio/aim/issues/2550
configs/logger/comet.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://www.comet.ml
2
+
3
+ comet:
4
+ _target_: lightning.pytorch.loggers.comet.CometLogger
5
+ api_key: ${oc.env:COMET_API_TOKEN} # api key is loaded from environment variable
6
+ save_dir: "${paths.output_dir}"
7
+ project_name: "lightning-hydra-template"
8
+ rest_api_key: null
9
+ # experiment_name: ""
10
+ experiment_key: null # set to resume experiment
11
+ offline: False
12
+ prefix: ""
configs/logger/csv.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # csv logger built in lightning
2
+
3
+ csv:
4
+ _target_: lightning.pytorch.loggers.csv_logs.CSVLogger
5
+ save_dir: "${paths.output_dir}"
6
+ name: "csv/"
7
+ prefix: ""
configs/logger/many_loggers.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # train with many loggers at once
2
+
3
+ defaults:
4
+ # - comet
5
+ - csv
6
+ # - mlflow
7
+ # - neptune
8
+ - tensorboard
9
+ - wandb
configs/logger/mlflow.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://mlflow.org
2
+
3
+ mlflow:
4
+ _target_: lightning.pytorch.loggers.mlflow.MLFlowLogger
5
+ # experiment_name: ""
6
+ # run_name: ""
7
+ tracking_uri: ${paths.log_dir}/mlflow/mlruns # run `mlflow ui` command inside the `logs/mlflow/` dir to open the UI
8
+ tags: null
9
+ # save_dir: "./mlruns"
10
+ prefix: ""
11
+ artifact_location: null
12
+ # run_id: ""
configs/logger/neptune.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # https://neptune.ai
2
+
3
+ neptune:
4
+ _target_: lightning.pytorch.loggers.neptune.NeptuneLogger
5
+ api_key: ${oc.env:NEPTUNE_API_TOKEN} # api key is loaded from environment variable
6
+ project: username/lightning-hydra-template
7
+ # name: ""
8
+ log_model_checkpoints: True
9
+ prefix: ""
configs/logger/tensorboard.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://www.tensorflow.org/tensorboard/
2
+
3
+ tensorboard:
4
+ _target_: lightning.pytorch.loggers.tensorboard.TensorBoardLogger
5
+ save_dir: "${paths.output_dir}/tensorboard/"
6
+ name: null
7
+ log_graph: False
8
+ default_hp_metric: True
9
+ prefix: ""
10
+ # version: ""
configs/logger/wandb.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://wandb.ai
2
+
3
+ wandb:
4
+ _target_: lightning.pytorch.loggers.wandb.WandbLogger
5
+ # name: "" # name of the run (normally generated by wandb)
6
+ save_dir: "${paths.output_dir}"
7
+ offline: False
8
+ id: null # pass correct id to resume experiment!
9
+ anonymous: null # enable anonymous logging
10
+ project: "lightning-hydra-template"
11
+ log_model: False # upload lightning ckpts
12
+ prefix: "" # a string to put at the beginning of metric keys
13
+ # entity: "" # set to name of your wandb team
14
+ group: ""
15
+ tags: []
16
+ job_type: ""
configs/model/cfm/default.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ name: CFM
2
+ solver: euler
3
+ sigma_min: 1e-4
configs/model/decoder/default.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ channels: [256, 256]
2
+ dropout: 0.05
3
+ attention_head_dim: 64
4
+ n_blocks: 1
5
+ num_mid_blocks: 2
6
+ num_heads: 2
7
+ act_fn: snakebeta
configs/model/encoder/default.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ encoder_type: RoPE Encoder
2
+ encoder_params:
3
+ n_feats: ${model.n_feats}
4
+ n_channels: 192
5
+ filter_channels: 768
6
+ filter_channels_dp: 256
7
+ n_heads: 2
8
+ n_layers: 6
9
+ kernel_size: 3
10
+ p_dropout: 0.1
11
+ spk_emb_dim: 64
12
+ n_spks: 1
13
+ prenet: true
14
+
15
+ duration_predictor_params:
16
+ filter_channels_dp: ${model.encoder.encoder_params.filter_channels_dp}
17
+ kernel_size: 3
18
+ p_dropout: ${model.encoder.encoder_params.p_dropout}
configs/model/matcha.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - _self_
3
+ - encoder: default.yaml
4
+ - decoder: default.yaml
5
+ - cfm: default.yaml
6
+ - optimizer: adam.yaml
7
+
8
+ _target_: matcha.models.matcha_tts.MatchaTTS
9
+ n_vocab: 178
10
+ n_spks: ${data.n_spks}
11
+ spk_emb_dim: 64
12
+ n_feats: 80
13
+ data_statistics: ${data.data_statistics}
14
+ out_size: null # Must be divisible by 4
15
+ prior_loss: true